Multi-Agent Memory Sharing
In this tutorial you will build a multi-agent system where specialized agents share a common memory layer while maintaining private per-agent memories. Agents write discoveries to shared namespaces and read context from both their private store and the shared pool.
By the end you will have:
- A shared memory namespace accessible by all agents in the pipeline
- Private per-agent memory that is invisible to other agents
- Cross-agent context retrieval using semantic search
- Working examples in both CrewAI and LangChain
Prerequisites
- Python 3.10+
- HatiData proxy running locally or in the cloud
hatidataSDK installed
pip install hatidata langchain-hatidata crewai-hatidata
export HATIDATA_API_KEY="hd_live_your_api_key"
export HATIDATA_HOST="localhost"
export OPENAI_API_KEY="sk-..."
Step 1: Design the Memory Namespace
Use agent ID conventions to separate shared and private memories:
| Namespace | Agent ID Pattern | Access |
|---|---|---|
| Shared knowledge | shared-project-x | All agents read and write |
| Private to researcher | researcher-agent | Only researcher reads/writes |
| Private to writer | writer-agent | Only writer reads/writes |
| Private to reviewer | reviewer-agent | Only reviewer reads/writes |
from hatidata import HatiDataClient
from hatidata.memory import MemoryClient
client = HatiDataClient(
host="localhost",
port=5439,
api_key="hd_live_your_api_key",
)
memory = MemoryClient(client)
SHARED_NS = "shared-project-x"
Step 2: Shared Memory Write and Read
Any agent in the pipeline can write to the shared namespace:
def publish_to_shared(content: str, source_agent: str, metadata: dict = None):
"""Publish a fact to the shared memory namespace."""
memory.store(
agent_id=SHARED_NS,
content=content,
metadata={
"source_agent": source_agent,
"type": "shared_fact",
**(metadata or {}),
},
)
def search_shared(query: str, top_k: int = 5) -> list:
"""Search the shared memory namespace."""
return memory.search(
agent_id=SHARED_NS,
query=query,
top_k=top_k,
min_score=0.65,
)
And each agent has its own private store:
def store_private(agent_id: str, content: str, metadata: dict = None):
"""Store a memory in the agent's private namespace."""
memory.store(
agent_id=agent_id,
content=content,
metadata={"type": "private_note", **(metadata or {})},
)
def search_private(agent_id: str, query: str, top_k: int = 5) -> list:
"""Search the agent's private memory."""
return memory.search(
agent_id=agent_id,
query=query,
top_k=top_k,
)
Step 3: Multi-Agent Pipeline
Build a three-agent pipeline: researcher, writer, and reviewer.
def researcher_step(topic: str) -> str:
"""Research phase: gather facts and publish to shared memory."""
agent_id = "researcher-agent"
# Simulate research findings
findings = [
f"Market size for {topic} is projected at $4.2B by 2027.",
f"Key competitors in {topic}: Acme Corp, Beta Inc, Gamma Systems.",
f"Customer pain point: existing {topic} solutions lack real-time capabilities.",
]
for finding in findings:
# Publish to shared namespace
publish_to_shared(
content=finding,
source_agent=agent_id,
metadata={"topic": topic, "phase": "research"},
)
# Also keep in private notes
store_private(agent_id, f"Research note: {finding}")
return f"Researcher published {len(findings)} findings to shared memory."
def writer_step(topic: str) -> str:
"""Writing phase: retrieve shared context and draft content."""
agent_id = "writer-agent"
# Read from shared memory
shared_context = search_shared(f"facts about {topic}")
context_text = "\n".join(f"- {r.content}" for r in shared_context)
# Draft content using shared context
draft = f"Report on {topic}:\n\nBased on research findings:\n{context_text}\n\n"
draft += "Analysis: The market opportunity is significant given the projected growth..."
# Store the draft privately
store_private(agent_id, f"Draft v1: {draft[:200]}", {"phase": "writing"})
# Publish the draft summary to shared memory for the reviewer
publish_to_shared(
content=f"Draft completed for {topic}. Key themes: market sizing, competition, pain points.",
source_agent=agent_id,
metadata={"phase": "writing", "artifact": "draft-v1"},
)
return draft
def reviewer_step(topic: str) -> str:
"""Review phase: read shared context and provide feedback."""
agent_id = "reviewer-agent"
# Read all shared context including researcher facts and writer draft
all_context = search_shared(topic, top_k=10)
# Separate by source
research = [r for r in all_context if r.metadata.get("source_agent") == "researcher-agent"]
writing = [r for r in all_context if r.metadata.get("source_agent") == "writer-agent"]
feedback = f"Review for {topic}:\n"
feedback += f"- Research facts available: {len(research)}\n"
feedback += f"- Writer artifacts available: {len(writing)}\n"
feedback += "- Recommendation: Include competitor analysis section.\n"
publish_to_shared(
content=feedback,
source_agent=agent_id,
metadata={"phase": "review"},
)
return feedback
# Run the pipeline
topic = "agent-native data infrastructure"
print(researcher_step(topic))
print(writer_step(topic))
print(reviewer_step(topic))
Step 4: CrewAI Integration
Use HatiData as the shared memory layer for a CrewAI crew:
from crewai import Agent, Task, Crew
from crewai_hatidata import HatiDataMemory
shared_memory = HatiDataMemory(
host="localhost",
port=5439,
agent_id=SHARED_NS,
password="hd_live_your_api_key",
)
researcher = Agent(
role="Market Researcher",
goal="Find market data and competitive intelligence",
backstory="You are an expert market analyst.",
memory=shared_memory,
verbose=True,
)
writer = Agent(
role="Content Writer",
goal="Draft reports using research findings",
backstory="You write clear, data-driven reports.",
memory=shared_memory,
verbose=True,
)
research_task = Task(
description="Research the agent-native data infrastructure market.",
expected_output="A list of key market facts and competitor analysis.",
agent=researcher,
)
writing_task = Task(
description="Write a market report using the research findings.",
expected_output="A structured market report with analysis.",
agent=writer,
)
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, writing_task],
verbose=True,
)
result = crew.kickoff()
Step 5: LangChain Integration
Use HatiData shared memory with LangChain agents:
from langchain_hatidata import HatiDataMemory
from langchain_openai import ChatOpenAI
from langchain.chains import ConversationChain
# Both agents share the same memory namespace
researcher_memory = HatiDataMemory(
host="localhost",
agent_id=SHARED_NS,
password="hd_live_your_api_key",
session_id="project-x-session",
)
writer_memory = HatiDataMemory(
host="localhost",
agent_id=SHARED_NS,
password="hd_live_your_api_key",
session_id="project-x-session",
)
llm = ChatOpenAI(model="gpt-4o", temperature=0)
researcher_chain = ConversationChain(llm=llm, memory=researcher_memory)
writer_chain = ConversationChain(llm=llm, memory=writer_memory)
# Researcher adds context
researcher_chain.predict(input="The TAM for agent infrastructure is $4.2B by 2027.")
# Writer can see the researcher's context
response = writer_chain.predict(input="Write a summary of the market opportunity.")
print(response)
Verifying Cross-Agent Memory with SQL
Query the underlying memory table directly to inspect cross-agent sharing:
-- See all shared memories
SELECT agent_id, content, metadata->>'source_agent' AS source, created_at
FROM _hatidata_agent_memory
WHERE agent_id = 'shared-project-x'
ORDER BY created_at DESC;
-- Count memories by source agent
SELECT metadata->>'source_agent' AS source_agent, COUNT(*) AS memory_count
FROM _hatidata_agent_memory
WHERE agent_id = 'shared-project-x'
GROUP BY source_agent;
Related Concepts
- Persistent Memory -- How HatiData memory works
- Memory Patterns -- Advanced memory patterns
- LangChain Integration -- Full LangChain reference
- CrewAI Integration -- Full CrewAI reference
- Agent Identity Model -- Agent keys and RBAC