Persistent, semantically-recalled memory for AutoGen agents. Your agents remember everything — across sessions, across restarts. Dakera handles embedding, storage, and retrieval server-side.
autogen-dakera
docker run -d \ --name dakera \ -p 3300:3300 \ -e DAKERA_ROOT_API_KEY=dk-mykey \ ghcr.io/dakera-ai/dakera:latest curl http://localhost:3300/health
# Core + integration pip install autogen-dakera # With AutoGen (if not already installed) pip install "autogen-dakera[autogen]"
Requirements: Python ≥ 3.10, a running Dakera server.
from autogen_agentchat.agents import AssistantAgent from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_dakera import DakeraMemory memory = DakeraMemory( api_url="http://localhost:3300", api_key="dk-mykey", agent_id="my-agent", ) model_client = OpenAIChatCompletionClient(model="gpt-4o") agent = AssistantAgent( name="assistant", model_client=model_client, memory=[memory], ) # Agent now persists what it learns across sessions
api_url
http://localhost:3300
api_key
""
DAKERA_ROOT_API_KEY
agent_id
min_importance
0.0
top_k
5
import asyncio from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import RoundRobinGroupChat from autogen_agentchat.conditions import MaxMessageTermination from autogen_ext.models.openai import OpenAIChatCompletionClient from autogen_dakera import DakeraMemory async def main(): shared_memory = DakeraMemory( api_url="http://localhost:3300", api_key="dk-mykey", agent_id="research-team", top_k=8, ) model_client = OpenAIChatCompletionClient(model="gpt-4o") researcher = AssistantAgent( name="researcher", model_client=model_client, memory=[shared_memory], system_message="You are a research expert. Remember key findings.", ) analyst = AssistantAgent( name="analyst", model_client=model_client, memory=[shared_memory], system_message="You are a data analyst. Build on what the researcher found.", ) team = RoundRobinGroupChat( [researcher, analyst], termination_condition=MaxMessageTermination(max_messages=6), ) # First session — agents learn and store result = await team.run(task="Research AI memory architectures") print(result.messages[-1].content) # Later session — agents recall prior research result = await team.run(task="What do we know about transformer memory?") print(result.messages[-1].content) asyncio.run(main())
DakeraMemory.add()
DakeraMemory.query()
Memory and vector store for chains
Long-term memory for crews
Memory store and vector index
TypeScript integration