OpenAI
Add persistent memory to your OpenAI chat completions.
Setup
pip install memorer openaiUsage Pattern
from memorer import Memorer
from openai import OpenAI
# Initialize both clients
memorer = Memorer(api_key="mem_sk_...")
openai_client = OpenAI()
user = memorer.for_user("user-123")
def chat_with_memory(user_message: str) -> str:
# 1. Recall relevant memories
memories = user.recall(user_message)
# 2. Build system prompt with memory context
system_prompt = "You are a helpful assistant."
if memories.context:
system_prompt += f"\n\nWhat you know about this user:\n{memories.context}"
# 3. Call OpenAI
response = openai_client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_message},
],
)
assistant_message = response.choices[0].message.content
# 4. Remember the exchange
user.remember(f"User: {user_message}\nAssistant: {assistant_message}")
return assistant_messageWith Conversations
For multi-turn sessions, use conversation tracking to maintain short-term context alongside long-term memory:
# Create a conversation session
conv = user.conversation()
def chat_with_conversation(user_message: str) -> str:
# Add user message
conv.add("user", user_message)
# Recall with conversation context + long-term memory
result = conv.recall(user_message)
# Call OpenAI with combined context
response = openai_client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": f"You are a helpful assistant.\n\n{result.context}"},
{"role": "user", "content": user_message},
],
)
assistant_message = response.choices[0].message.content
# Add assistant response to conversation
conv.add("assistant", assistant_message)
return assistant_messageHow it works
user.recall()searches long-term memory for relevant context- The memory context is injected into the system prompt
- OpenAI generates a response with the enriched context
user.remember()stores the exchange for future recall
Last updated on