Anthropic
Add persistent memory to your Anthropic Claude conversations.
Setup
pip install memorer anthropicUsage Pattern
from memorer import Memorer
from anthropic import Anthropic
# Initialize both clients
memorer = Memorer(api_key="mem_sk_...")
anthropic_client = Anthropic()
user = memorer.for_user("user-123")
def chat_with_memory(user_message: str) -> str:
# 1. Recall relevant memories
memories = user.recall(user_message)
# 2. Build system prompt with memory context
system_prompt = "You are a helpful assistant."
if memories.context:
system_prompt += f"\n\nWhat you know about this user:\n{memories.context}"
# 3. Call Anthropic
response = anthropic_client.messages.create(
model="claude-sonnet-4-5-20250929",
max_tokens=1024,
system=system_prompt,
messages=[
{"role": "user", "content": user_message},
],
)
assistant_message = response.content[0].text
# 4. Remember the exchange
user.remember(f"User: {user_message}\nAssistant: {assistant_message}")
return assistant_messageWith Conversations
For multi-turn sessions, use conversation tracking:
conv = user.conversation()
def chat_with_conversation(user_message: str) -> str:
# Add user message
conv.add("user", user_message)
# Recall with conversation context + long-term memory
result = conv.recall(user_message)
# Call Anthropic with combined context
response = anthropic_client.messages.create(
model="claude-sonnet-4-5-20250929",
max_tokens=1024,
system=f"You are a helpful assistant.\n\n{result.context}",
messages=[
{"role": "user", "content": user_message},
],
)
assistant_message = response.content[0].text
# Add assistant response to conversation
conv.add("assistant", assistant_message)
return assistant_messageHow it works
user.recall()searches long-term memory for relevant context- The memory context is injected into the system prompt
- Claude generates a response with the enriched context
user.remember()stores the exchange for future recall
Last updated on