crewai-dakera 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crewai_dakera-0.1.0/PKG-INFO +250 -0
- crewai_dakera-0.1.0/README.md +218 -0
- crewai_dakera-0.1.0/pyproject.toml +59 -0
- crewai_dakera-0.1.0/setup.cfg +4 -0
- crewai_dakera-0.1.0/src/crewai_dakera/__init__.py +4 -0
- crewai_dakera-0.1.0/src/crewai_dakera/py.typed +0 -0
- crewai_dakera-0.1.0/src/crewai_dakera/storage.py +32 -0
- crewai_dakera-0.1.0/src/crewai_dakera.egg-info/PKG-INFO +250 -0
- crewai_dakera-0.1.0/src/crewai_dakera.egg-info/SOURCES.txt +11 -0
- crewai_dakera-0.1.0/src/crewai_dakera.egg-info/dependency_links.txt +1 -0
- crewai_dakera-0.1.0/src/crewai_dakera.egg-info/requires.txt +11 -0
- crewai_dakera-0.1.0/src/crewai_dakera.egg-info/top_level.txt +1 -0
- crewai_dakera-0.1.0/tests/test_storage.py +55 -0
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: crewai-dakera
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: CrewAI integration for the Dakera AI memory platform
|
|
5
|
+
Author: Dakera Team
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/dakera-ai/dakera-crewai
|
|
8
|
+
Project-URL: Documentation, https://docs.dakera.ai/integrations/crewai
|
|
9
|
+
Project-URL: Repository, https://github.com/dakera-ai/dakera-crewai
|
|
10
|
+
Keywords: crewai,dakera,memory,ai,agents
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Classifier: Typing :: Typed
|
|
21
|
+
Requires-Python: >=3.10
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
Requires-Dist: dakera>=0.9.0
|
|
24
|
+
Provides-Extra: crewai
|
|
25
|
+
Requires-Dist: crewai>=0.30.0; extra == "crewai"
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
28
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
29
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
30
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
31
|
+
Requires-Dist: pip-audit>=2.0.0; extra == "dev"
|
|
32
|
+
|
|
33
|
+
# crewai-dakera
|
|
34
|
+
|
|
35
|
+
[](https://github.com/Dakera-AI/dakera-crewai/actions/workflows/ci.yml)
|
|
36
|
+
[](https://pypi.org/project/crewai-dakera/)
|
|
37
|
+
[](https://pypi.org/project/crewai-dakera/)
|
|
38
|
+
[](LICENSE)
|
|
39
|
+
|
|
40
|
+
**Persistent, semantically-recalled memory for [CrewAI](https://crewai.com) agents, powered by [Dakera](https://github.com/Dakera-AI/dakera-deploy).**
|
|
41
|
+
|
|
42
|
+
Your CrewAI crews remember everything — across sessions, across restarts. Dakera handles embedding, storage, and retrieval server-side with no local model required.
|
|
43
|
+
|
|
44
|
+
---
|
|
45
|
+
|
|
46
|
+
## Quick Start
|
|
47
|
+
|
|
48
|
+
### Step 1 — Run Dakera
|
|
49
|
+
|
|
50
|
+
Dakera is a self-hosted memory server. Spin it up with Docker:
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
docker run -d \
|
|
54
|
+
--name dakera \
|
|
55
|
+
-p 3300:3300 \
|
|
56
|
+
-e DAKERA_ROOT_API_KEY=dk-mykey \
|
|
57
|
+
ghcr.io/dakera-ai/dakera:latest
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
For a production setup with persistent storage, use Docker Compose:
|
|
61
|
+
|
|
62
|
+
```bash
|
|
63
|
+
# Download and start
|
|
64
|
+
curl -sSfL https://raw.githubusercontent.com/Dakera-AI/dakera-deploy/main/docker-compose.yml \
|
|
65
|
+
-o docker-compose.yml
|
|
66
|
+
DAKERA_API_KEY=dk-mykey docker compose up -d
|
|
67
|
+
|
|
68
|
+
# Verify it's running
|
|
69
|
+
curl http://localhost:3300/health
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
> Full deployment guide: [github.com/Dakera-AI/dakera-deploy](https://github.com/Dakera-AI/dakera-deploy)
|
|
73
|
+
|
|
74
|
+
### Step 2 — Install the integration
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
pip install crewai-dakera
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### Step 3 — Add memory to your crew
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from crewai import Crew, Agent, Task
|
|
84
|
+
from crewai.memory import LongTermMemory
|
|
85
|
+
from crewai_dakera import DakeraStorage
|
|
86
|
+
|
|
87
|
+
storage = DakeraStorage(
|
|
88
|
+
api_url="http://localhost:3300",
|
|
89
|
+
api_key="dk-mykey",
|
|
90
|
+
agent_id="my-crew",
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
crew = Crew(
|
|
94
|
+
agents=[...],
|
|
95
|
+
tasks=[...],
|
|
96
|
+
memory=True,
|
|
97
|
+
long_term_memory=LongTermMemory(storage=storage),
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
result = crew.kickoff(inputs={"topic": "AI trends"})
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
Your crew now persists everything it learns across runs.
|
|
104
|
+
|
|
105
|
+
---
|
|
106
|
+
|
|
107
|
+
## Installation
|
|
108
|
+
|
|
109
|
+
```bash
|
|
110
|
+
# Core + integration
|
|
111
|
+
pip install crewai-dakera
|
|
112
|
+
|
|
113
|
+
# With CrewAI (if not already installed)
|
|
114
|
+
pip install "crewai-dakera[crewai]"
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
**Requirements:** Python ≥ 3.10, a running Dakera server (see Step 1 above)
|
|
118
|
+
|
|
119
|
+
---
|
|
120
|
+
|
|
121
|
+
## Configuration
|
|
122
|
+
|
|
123
|
+
| Parameter | Type | Default | Description |
|
|
124
|
+
|-----------|------|---------|-------------|
|
|
125
|
+
| `api_url` | `str` | — | Dakera server URL (e.g. `http://localhost:3300`) |
|
|
126
|
+
| `api_key` | `str` | `""` | API key set via `DAKERA_ROOT_API_KEY` |
|
|
127
|
+
| `agent_id` | `str` | — | Logical identifier for this crew's memory |
|
|
128
|
+
| `min_importance` | `float` | `0.0` | Minimum importance score for recalled memories |
|
|
129
|
+
| `top_k` | `int` | `5` | Number of memories to surface per turn |
|
|
130
|
+
|
|
131
|
+
Use environment variables to avoid hardcoding credentials:
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
import os
|
|
135
|
+
from crewai_dakera import DakeraStorage
|
|
136
|
+
|
|
137
|
+
storage = DakeraStorage(
|
|
138
|
+
api_url=os.environ["DAKERA_URL"],
|
|
139
|
+
api_key=os.environ["DAKERA_API_KEY"],
|
|
140
|
+
agent_id="research-crew",
|
|
141
|
+
)
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
---
|
|
145
|
+
|
|
146
|
+
## Examples
|
|
147
|
+
|
|
148
|
+
### Research crew with persistent memory
|
|
149
|
+
|
|
150
|
+
```python
|
|
151
|
+
from crewai import Agent, Task, Crew, Process
|
|
152
|
+
from crewai.memory import LongTermMemory, ShortTermMemory, EntityMemory
|
|
153
|
+
from crewai_dakera import DakeraStorage
|
|
154
|
+
|
|
155
|
+
dakera = DakeraStorage(
|
|
156
|
+
api_url="http://localhost:3300",
|
|
157
|
+
api_key="dk-mykey",
|
|
158
|
+
agent_id="research-crew",
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
researcher = Agent(
|
|
162
|
+
role="Senior Researcher",
|
|
163
|
+
goal="Uncover groundbreaking insights in {topic}",
|
|
164
|
+
backstory="An expert researcher with decades of experience.",
|
|
165
|
+
verbose=True,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
writer = Agent(
|
|
169
|
+
role="Content Writer",
|
|
170
|
+
goal="Craft compelling reports based on research findings",
|
|
171
|
+
backstory="A skilled writer who turns complex ideas into clear prose.",
|
|
172
|
+
verbose=True,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
research_task = Task(
|
|
176
|
+
description="Research the latest developments in {topic}",
|
|
177
|
+
expected_output="A detailed research report",
|
|
178
|
+
agent=researcher,
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
write_task = Task(
|
|
182
|
+
description="Write a blog post based on the research",
|
|
183
|
+
expected_output="A polished 500-word article",
|
|
184
|
+
agent=writer,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
crew = Crew(
|
|
188
|
+
agents=[researcher, writer],
|
|
189
|
+
tasks=[research_task, write_task],
|
|
190
|
+
process=Process.sequential,
|
|
191
|
+
memory=True,
|
|
192
|
+
long_term_memory=LongTermMemory(storage=dakera),
|
|
193
|
+
verbose=True,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
# First run — learns and stores findings
|
|
197
|
+
result = crew.kickoff(inputs={"topic": "quantum computing"})
|
|
198
|
+
print(result.raw)
|
|
199
|
+
|
|
200
|
+
# Second run — recalls prior research automatically
|
|
201
|
+
result = crew.kickoff(inputs={"topic": "quantum computing advances"})
|
|
202
|
+
print(result.raw)
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
### Custom importance scoring
|
|
206
|
+
|
|
207
|
+
```python
|
|
208
|
+
storage = DakeraStorage(
|
|
209
|
+
api_url="http://localhost:3300",
|
|
210
|
+
api_key="dk-mykey",
|
|
211
|
+
agent_id="my-crew",
|
|
212
|
+
min_importance=0.6, # only surface high-quality memories
|
|
213
|
+
top_k=10,
|
|
214
|
+
)
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
---
|
|
218
|
+
|
|
219
|
+
## How it works
|
|
220
|
+
|
|
221
|
+
1. After each task, CrewAI calls `DakeraStorage.save()` with the result
|
|
222
|
+
2. Dakera embeds the content server-side (no local model needed) and stores it with a semantic vector
|
|
223
|
+
3. Before the next task, CrewAI calls `DakeraStorage.search()` — Dakera performs hybrid search (vector + BM25) and returns the most relevant past memories
|
|
224
|
+
4. Memories decay gracefully over time based on access patterns — frequently-accessed memories stay prominent
|
|
225
|
+
|
|
226
|
+
---
|
|
227
|
+
|
|
228
|
+
## Related packages
|
|
229
|
+
|
|
230
|
+
| Package | Framework | Language |
|
|
231
|
+
|---------|-----------|----------|
|
|
232
|
+
| `langchain-dakera` | LangChain | Python |
|
|
233
|
+
| `llamaindex-dakera` | LlamaIndex | Python |
|
|
234
|
+
| `autogen-dakera` | AutoGen | Python |
|
|
235
|
+
| `@dakera-ai/langchain` | LangChain.js | TypeScript |
|
|
236
|
+
|
|
237
|
+
---
|
|
238
|
+
|
|
239
|
+
## Links
|
|
240
|
+
|
|
241
|
+
- [Dakera Server](https://github.com/Dakera-AI/dakera-deploy) — self-hosted memory server
|
|
242
|
+
- [Dakera Python SDK](https://github.com/Dakera-AI/dakera-py) — low-level API client
|
|
243
|
+
- [Documentation](https://docs.dakera.ai/integrations/crewai)
|
|
244
|
+
- [All integrations](https://github.com/Dakera-AI/dakera-integrations)
|
|
245
|
+
|
|
246
|
+
---
|
|
247
|
+
|
|
248
|
+
## License
|
|
249
|
+
|
|
250
|
+
MIT © [Dakera AI](https://dakera.ai)
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
# crewai-dakera
|
|
2
|
+
|
|
3
|
+
[](https://github.com/Dakera-AI/dakera-crewai/actions/workflows/ci.yml)
|
|
4
|
+
[](https://pypi.org/project/crewai-dakera/)
|
|
5
|
+
[](https://pypi.org/project/crewai-dakera/)
|
|
6
|
+
[](LICENSE)
|
|
7
|
+
|
|
8
|
+
**Persistent, semantically-recalled memory for [CrewAI](https://crewai.com) agents, powered by [Dakera](https://github.com/Dakera-AI/dakera-deploy).**
|
|
9
|
+
|
|
10
|
+
Your CrewAI crews remember everything — across sessions, across restarts. Dakera handles embedding, storage, and retrieval server-side with no local model required.
|
|
11
|
+
|
|
12
|
+
---
|
|
13
|
+
|
|
14
|
+
## Quick Start
|
|
15
|
+
|
|
16
|
+
### Step 1 — Run Dakera
|
|
17
|
+
|
|
18
|
+
Dakera is a self-hosted memory server. Spin it up with Docker:
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
docker run -d \
|
|
22
|
+
--name dakera \
|
|
23
|
+
-p 3300:3300 \
|
|
24
|
+
-e DAKERA_ROOT_API_KEY=dk-mykey \
|
|
25
|
+
ghcr.io/dakera-ai/dakera:latest
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
For a production setup with persistent storage, use Docker Compose:
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
# Download and start
|
|
32
|
+
curl -sSfL https://raw.githubusercontent.com/Dakera-AI/dakera-deploy/main/docker-compose.yml \
|
|
33
|
+
-o docker-compose.yml
|
|
34
|
+
DAKERA_API_KEY=dk-mykey docker compose up -d
|
|
35
|
+
|
|
36
|
+
# Verify it's running
|
|
37
|
+
curl http://localhost:3300/health
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
> Full deployment guide: [github.com/Dakera-AI/dakera-deploy](https://github.com/Dakera-AI/dakera-deploy)
|
|
41
|
+
|
|
42
|
+
### Step 2 — Install the integration
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install crewai-dakera
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
### Step 3 — Add memory to your crew
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
from crewai import Crew, Agent, Task
|
|
52
|
+
from crewai.memory import LongTermMemory
|
|
53
|
+
from crewai_dakera import DakeraStorage
|
|
54
|
+
|
|
55
|
+
storage = DakeraStorage(
|
|
56
|
+
api_url="http://localhost:3300",
|
|
57
|
+
api_key="dk-mykey",
|
|
58
|
+
agent_id="my-crew",
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
crew = Crew(
|
|
62
|
+
agents=[...],
|
|
63
|
+
tasks=[...],
|
|
64
|
+
memory=True,
|
|
65
|
+
long_term_memory=LongTermMemory(storage=storage),
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
result = crew.kickoff(inputs={"topic": "AI trends"})
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
Your crew now persists everything it learns across runs.
|
|
72
|
+
|
|
73
|
+
---
|
|
74
|
+
|
|
75
|
+
## Installation
|
|
76
|
+
|
|
77
|
+
```bash
|
|
78
|
+
# Core + integration
|
|
79
|
+
pip install crewai-dakera
|
|
80
|
+
|
|
81
|
+
# With CrewAI (if not already installed)
|
|
82
|
+
pip install "crewai-dakera[crewai]"
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
**Requirements:** Python ≥ 3.10, a running Dakera server (see Step 1 above)
|
|
86
|
+
|
|
87
|
+
---
|
|
88
|
+
|
|
89
|
+
## Configuration
|
|
90
|
+
|
|
91
|
+
| Parameter | Type | Default | Description |
|
|
92
|
+
|-----------|------|---------|-------------|
|
|
93
|
+
| `api_url` | `str` | — | Dakera server URL (e.g. `http://localhost:3300`) |
|
|
94
|
+
| `api_key` | `str` | `""` | API key set via `DAKERA_ROOT_API_KEY` |
|
|
95
|
+
| `agent_id` | `str` | — | Logical identifier for this crew's memory |
|
|
96
|
+
| `min_importance` | `float` | `0.0` | Minimum importance score for recalled memories |
|
|
97
|
+
| `top_k` | `int` | `5` | Number of memories to surface per turn |
|
|
98
|
+
|
|
99
|
+
Use environment variables to avoid hardcoding credentials:
|
|
100
|
+
|
|
101
|
+
```python
|
|
102
|
+
import os
|
|
103
|
+
from crewai_dakera import DakeraStorage
|
|
104
|
+
|
|
105
|
+
storage = DakeraStorage(
|
|
106
|
+
api_url=os.environ["DAKERA_URL"],
|
|
107
|
+
api_key=os.environ["DAKERA_API_KEY"],
|
|
108
|
+
agent_id="research-crew",
|
|
109
|
+
)
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
---
|
|
113
|
+
|
|
114
|
+
## Examples
|
|
115
|
+
|
|
116
|
+
### Research crew with persistent memory
|
|
117
|
+
|
|
118
|
+
```python
|
|
119
|
+
from crewai import Agent, Task, Crew, Process
|
|
120
|
+
from crewai.memory import LongTermMemory, ShortTermMemory, EntityMemory
|
|
121
|
+
from crewai_dakera import DakeraStorage
|
|
122
|
+
|
|
123
|
+
dakera = DakeraStorage(
|
|
124
|
+
api_url="http://localhost:3300",
|
|
125
|
+
api_key="dk-mykey",
|
|
126
|
+
agent_id="research-crew",
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
researcher = Agent(
|
|
130
|
+
role="Senior Researcher",
|
|
131
|
+
goal="Uncover groundbreaking insights in {topic}",
|
|
132
|
+
backstory="An expert researcher with decades of experience.",
|
|
133
|
+
verbose=True,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
writer = Agent(
|
|
137
|
+
role="Content Writer",
|
|
138
|
+
goal="Craft compelling reports based on research findings",
|
|
139
|
+
backstory="A skilled writer who turns complex ideas into clear prose.",
|
|
140
|
+
verbose=True,
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
research_task = Task(
|
|
144
|
+
description="Research the latest developments in {topic}",
|
|
145
|
+
expected_output="A detailed research report",
|
|
146
|
+
agent=researcher,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
write_task = Task(
|
|
150
|
+
description="Write a blog post based on the research",
|
|
151
|
+
expected_output="A polished 500-word article",
|
|
152
|
+
agent=writer,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
crew = Crew(
|
|
156
|
+
agents=[researcher, writer],
|
|
157
|
+
tasks=[research_task, write_task],
|
|
158
|
+
process=Process.sequential,
|
|
159
|
+
memory=True,
|
|
160
|
+
long_term_memory=LongTermMemory(storage=dakera),
|
|
161
|
+
verbose=True,
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
# First run — learns and stores findings
|
|
165
|
+
result = crew.kickoff(inputs={"topic": "quantum computing"})
|
|
166
|
+
print(result.raw)
|
|
167
|
+
|
|
168
|
+
# Second run — recalls prior research automatically
|
|
169
|
+
result = crew.kickoff(inputs={"topic": "quantum computing advances"})
|
|
170
|
+
print(result.raw)
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
### Custom importance scoring
|
|
174
|
+
|
|
175
|
+
```python
|
|
176
|
+
storage = DakeraStorage(
|
|
177
|
+
api_url="http://localhost:3300",
|
|
178
|
+
api_key="dk-mykey",
|
|
179
|
+
agent_id="my-crew",
|
|
180
|
+
min_importance=0.6, # only surface high-quality memories
|
|
181
|
+
top_k=10,
|
|
182
|
+
)
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
---
|
|
186
|
+
|
|
187
|
+
## How it works
|
|
188
|
+
|
|
189
|
+
1. After each task, CrewAI calls `DakeraStorage.save()` with the result
|
|
190
|
+
2. Dakera embeds the content server-side (no local model needed) and stores it with a semantic vector
|
|
191
|
+
3. Before the next task, CrewAI calls `DakeraStorage.search()` — Dakera performs hybrid search (vector + BM25) and returns the most relevant past memories
|
|
192
|
+
4. Memories decay gracefully over time based on access patterns — frequently-accessed memories stay prominent
|
|
193
|
+
|
|
194
|
+
---
|
|
195
|
+
|
|
196
|
+
## Related packages
|
|
197
|
+
|
|
198
|
+
| Package | Framework | Language |
|
|
199
|
+
|---------|-----------|----------|
|
|
200
|
+
| `langchain-dakera` | LangChain | Python |
|
|
201
|
+
| `llamaindex-dakera` | LlamaIndex | Python |
|
|
202
|
+
| `autogen-dakera` | AutoGen | Python |
|
|
203
|
+
| `@dakera-ai/langchain` | LangChain.js | TypeScript |
|
|
204
|
+
|
|
205
|
+
---
|
|
206
|
+
|
|
207
|
+
## Links
|
|
208
|
+
|
|
209
|
+
- [Dakera Server](https://github.com/Dakera-AI/dakera-deploy) — self-hosted memory server
|
|
210
|
+
- [Dakera Python SDK](https://github.com/Dakera-AI/dakera-py) — low-level API client
|
|
211
|
+
- [Documentation](https://docs.dakera.ai/integrations/crewai)
|
|
212
|
+
- [All integrations](https://github.com/Dakera-AI/dakera-integrations)
|
|
213
|
+
|
|
214
|
+
---
|
|
215
|
+
|
|
216
|
+
## License
|
|
217
|
+
|
|
218
|
+
MIT © [Dakera AI](https://dakera.ai)
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "crewai-dakera"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "CrewAI integration for the Dakera AI memory platform"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = {text = "MIT"}
|
|
11
|
+
authors = [{name = "Dakera Team"}]
|
|
12
|
+
requires-python = ">=3.10"
|
|
13
|
+
classifiers = [
|
|
14
|
+
"Development Status :: 4 - Beta",
|
|
15
|
+
"Intended Audience :: Developers",
|
|
16
|
+
"License :: OSI Approved :: MIT License",
|
|
17
|
+
"Operating System :: OS Independent",
|
|
18
|
+
"Programming Language :: Python :: 3",
|
|
19
|
+
"Programming Language :: Python :: 3.10",
|
|
20
|
+
"Programming Language :: Python :: 3.11",
|
|
21
|
+
"Programming Language :: Python :: 3.12",
|
|
22
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
23
|
+
"Typing :: Typed",
|
|
24
|
+
]
|
|
25
|
+
keywords = ["crewai", "dakera", "memory", "ai", "agents"]
|
|
26
|
+
dependencies = ["dakera>=0.9.0"]
|
|
27
|
+
|
|
28
|
+
[project.optional-dependencies]
|
|
29
|
+
crewai = ["crewai>=0.30.0"]
|
|
30
|
+
dev = ["pytest>=7.0.0", "pytest-asyncio>=0.21.0", "ruff>=0.1.0", "mypy>=1.0.0", "pip-audit>=2.0.0"]
|
|
31
|
+
|
|
32
|
+
[project.urls]
|
|
33
|
+
Homepage = "https://github.com/dakera-ai/dakera-crewai"
|
|
34
|
+
Documentation = "https://docs.dakera.ai/integrations/crewai"
|
|
35
|
+
Repository = "https://github.com/dakera-ai/dakera-crewai"
|
|
36
|
+
|
|
37
|
+
[tool.setuptools.packages.find]
|
|
38
|
+
where = ["src"]
|
|
39
|
+
|
|
40
|
+
[tool.setuptools.package-data]
|
|
41
|
+
crewai_dakera = ["py.typed"]
|
|
42
|
+
|
|
43
|
+
[tool.pytest.ini_options]
|
|
44
|
+
asyncio_mode = "auto"
|
|
45
|
+
testpaths = ["tests"]
|
|
46
|
+
|
|
47
|
+
[tool.ruff]
|
|
48
|
+
line-length = 100
|
|
49
|
+
target-version = "py310"
|
|
50
|
+
|
|
51
|
+
[tool.ruff.lint]
|
|
52
|
+
select = ["E", "F", "I", "UP"]
|
|
53
|
+
|
|
54
|
+
[tool.mypy]
|
|
55
|
+
python_version = "3.10"
|
|
56
|
+
ignore_missing_imports = true
|
|
57
|
+
check_untyped_defs = true
|
|
58
|
+
warn_return_any = true
|
|
59
|
+
warn_unused_ignores = true
|
|
File without changes
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""DakeraStorage — CrewAI storage backed by the Dakera AI memory platform."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from dakera import DakeraClient
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class DakeraStorage:
|
|
11
|
+
"""Persistent semantic storage for CrewAI agents backed by Dakera AI."""
|
|
12
|
+
|
|
13
|
+
def __init__(self, api_url: str, agent_id: str, api_key: str = "",
|
|
14
|
+
search_k: int = 5, min_importance: float = 0.0, importance: float = 0.7) -> None:
|
|
15
|
+
self._client = DakeraClient(api_url, api_key=api_key)
|
|
16
|
+
self._agent_id = agent_id
|
|
17
|
+
self._search_k = search_k
|
|
18
|
+
self._min_importance = min_importance
|
|
19
|
+
self._importance = importance
|
|
20
|
+
|
|
21
|
+
def save(self, value: str, metadata: dict[str, Any] | None = None) -> None:
|
|
22
|
+
self._client.store_memory(self._agent_id, content=value, memory_type="episodic",
|
|
23
|
+
importance=self._importance, metadata=metadata or {})
|
|
24
|
+
|
|
25
|
+
def search(self, query: str, limit: int | None = None) -> list[dict[str, Any]]:
|
|
26
|
+
k = limit if limit is not None else self._search_k
|
|
27
|
+
min_imp = self._min_importance if self._min_importance > 0.0 else None
|
|
28
|
+
memories = self._client.recall(self._agent_id, query=query, top_k=k, min_importance=min_imp)
|
|
29
|
+
return [{"content": m.content, "id": m.id, "score": m.score} for m in memories.memories]
|
|
30
|
+
|
|
31
|
+
def reset(self) -> None:
|
|
32
|
+
"""No-op: Dakera memories are persistent by design."""
|
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: crewai-dakera
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: CrewAI integration for the Dakera AI memory platform
|
|
5
|
+
Author: Dakera Team
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/dakera-ai/dakera-crewai
|
|
8
|
+
Project-URL: Documentation, https://docs.dakera.ai/integrations/crewai
|
|
9
|
+
Project-URL: Repository, https://github.com/dakera-ai/dakera-crewai
|
|
10
|
+
Keywords: crewai,dakera,memory,ai,agents
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Classifier: Typing :: Typed
|
|
21
|
+
Requires-Python: >=3.10
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
Requires-Dist: dakera>=0.9.0
|
|
24
|
+
Provides-Extra: crewai
|
|
25
|
+
Requires-Dist: crewai>=0.30.0; extra == "crewai"
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
28
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
29
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
30
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
31
|
+
Requires-Dist: pip-audit>=2.0.0; extra == "dev"
|
|
32
|
+
|
|
33
|
+
# crewai-dakera
|
|
34
|
+
|
|
35
|
+
[](https://github.com/Dakera-AI/dakera-crewai/actions/workflows/ci.yml)
|
|
36
|
+
[](https://pypi.org/project/crewai-dakera/)
|
|
37
|
+
[](https://pypi.org/project/crewai-dakera/)
|
|
38
|
+
[](LICENSE)
|
|
39
|
+
|
|
40
|
+
**Persistent, semantically-recalled memory for [CrewAI](https://crewai.com) agents, powered by [Dakera](https://github.com/Dakera-AI/dakera-deploy).**
|
|
41
|
+
|
|
42
|
+
Your CrewAI crews remember everything — across sessions, across restarts. Dakera handles embedding, storage, and retrieval server-side with no local model required.
|
|
43
|
+
|
|
44
|
+
---
|
|
45
|
+
|
|
46
|
+
## Quick Start
|
|
47
|
+
|
|
48
|
+
### Step 1 — Run Dakera
|
|
49
|
+
|
|
50
|
+
Dakera is a self-hosted memory server. Spin it up with Docker:
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
docker run -d \
|
|
54
|
+
--name dakera \
|
|
55
|
+
-p 3300:3300 \
|
|
56
|
+
-e DAKERA_ROOT_API_KEY=dk-mykey \
|
|
57
|
+
ghcr.io/dakera-ai/dakera:latest
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
For a production setup with persistent storage, use Docker Compose:
|
|
61
|
+
|
|
62
|
+
```bash
|
|
63
|
+
# Download and start
|
|
64
|
+
curl -sSfL https://raw.githubusercontent.com/Dakera-AI/dakera-deploy/main/docker-compose.yml \
|
|
65
|
+
-o docker-compose.yml
|
|
66
|
+
DAKERA_API_KEY=dk-mykey docker compose up -d
|
|
67
|
+
|
|
68
|
+
# Verify it's running
|
|
69
|
+
curl http://localhost:3300/health
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
> Full deployment guide: [github.com/Dakera-AI/dakera-deploy](https://github.com/Dakera-AI/dakera-deploy)
|
|
73
|
+
|
|
74
|
+
### Step 2 — Install the integration
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
pip install crewai-dakera
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### Step 3 — Add memory to your crew
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from crewai import Crew, Agent, Task
|
|
84
|
+
from crewai.memory import LongTermMemory
|
|
85
|
+
from crewai_dakera import DakeraStorage
|
|
86
|
+
|
|
87
|
+
storage = DakeraStorage(
|
|
88
|
+
api_url="http://localhost:3300",
|
|
89
|
+
api_key="dk-mykey",
|
|
90
|
+
agent_id="my-crew",
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
crew = Crew(
|
|
94
|
+
agents=[...],
|
|
95
|
+
tasks=[...],
|
|
96
|
+
memory=True,
|
|
97
|
+
long_term_memory=LongTermMemory(storage=storage),
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
result = crew.kickoff(inputs={"topic": "AI trends"})
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
Your crew now persists everything it learns across runs.
|
|
104
|
+
|
|
105
|
+
---
|
|
106
|
+
|
|
107
|
+
## Installation
|
|
108
|
+
|
|
109
|
+
```bash
|
|
110
|
+
# Core + integration
|
|
111
|
+
pip install crewai-dakera
|
|
112
|
+
|
|
113
|
+
# With CrewAI (if not already installed)
|
|
114
|
+
pip install "crewai-dakera[crewai]"
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
**Requirements:** Python ≥ 3.10, a running Dakera server (see Step 1 above)
|
|
118
|
+
|
|
119
|
+
---
|
|
120
|
+
|
|
121
|
+
## Configuration
|
|
122
|
+
|
|
123
|
+
| Parameter | Type | Default | Description |
|
|
124
|
+
|-----------|------|---------|-------------|
|
|
125
|
+
| `api_url` | `str` | — | Dakera server URL (e.g. `http://localhost:3300`) |
|
|
126
|
+
| `api_key` | `str` | `""` | API key set via `DAKERA_ROOT_API_KEY` |
|
|
127
|
+
| `agent_id` | `str` | — | Logical identifier for this crew's memory |
|
|
128
|
+
| `min_importance` | `float` | `0.0` | Minimum importance score for recalled memories |
|
|
129
|
+
| `top_k` | `int` | `5` | Number of memories to surface per turn |
|
|
130
|
+
|
|
131
|
+
Use environment variables to avoid hardcoding credentials:
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
import os
|
|
135
|
+
from crewai_dakera import DakeraStorage
|
|
136
|
+
|
|
137
|
+
storage = DakeraStorage(
|
|
138
|
+
api_url=os.environ["DAKERA_URL"],
|
|
139
|
+
api_key=os.environ["DAKERA_API_KEY"],
|
|
140
|
+
agent_id="research-crew",
|
|
141
|
+
)
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
---
|
|
145
|
+
|
|
146
|
+
## Examples
|
|
147
|
+
|
|
148
|
+
### Research crew with persistent memory
|
|
149
|
+
|
|
150
|
+
```python
|
|
151
|
+
from crewai import Agent, Task, Crew, Process
|
|
152
|
+
from crewai.memory import LongTermMemory, ShortTermMemory, EntityMemory
|
|
153
|
+
from crewai_dakera import DakeraStorage
|
|
154
|
+
|
|
155
|
+
dakera = DakeraStorage(
|
|
156
|
+
api_url="http://localhost:3300",
|
|
157
|
+
api_key="dk-mykey",
|
|
158
|
+
agent_id="research-crew",
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
researcher = Agent(
|
|
162
|
+
role="Senior Researcher",
|
|
163
|
+
goal="Uncover groundbreaking insights in {topic}",
|
|
164
|
+
backstory="An expert researcher with decades of experience.",
|
|
165
|
+
verbose=True,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
writer = Agent(
|
|
169
|
+
role="Content Writer",
|
|
170
|
+
goal="Craft compelling reports based on research findings",
|
|
171
|
+
backstory="A skilled writer who turns complex ideas into clear prose.",
|
|
172
|
+
verbose=True,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
research_task = Task(
|
|
176
|
+
description="Research the latest developments in {topic}",
|
|
177
|
+
expected_output="A detailed research report",
|
|
178
|
+
agent=researcher,
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
write_task = Task(
|
|
182
|
+
description="Write a blog post based on the research",
|
|
183
|
+
expected_output="A polished 500-word article",
|
|
184
|
+
agent=writer,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
crew = Crew(
|
|
188
|
+
agents=[researcher, writer],
|
|
189
|
+
tasks=[research_task, write_task],
|
|
190
|
+
process=Process.sequential,
|
|
191
|
+
memory=True,
|
|
192
|
+
long_term_memory=LongTermMemory(storage=dakera),
|
|
193
|
+
verbose=True,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
# First run — learns and stores findings
|
|
197
|
+
result = crew.kickoff(inputs={"topic": "quantum computing"})
|
|
198
|
+
print(result.raw)
|
|
199
|
+
|
|
200
|
+
# Second run — recalls prior research automatically
|
|
201
|
+
result = crew.kickoff(inputs={"topic": "quantum computing advances"})
|
|
202
|
+
print(result.raw)
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
### Custom importance scoring
|
|
206
|
+
|
|
207
|
+
```python
|
|
208
|
+
storage = DakeraStorage(
|
|
209
|
+
api_url="http://localhost:3300",
|
|
210
|
+
api_key="dk-mykey",
|
|
211
|
+
agent_id="my-crew",
|
|
212
|
+
min_importance=0.6, # only surface high-quality memories
|
|
213
|
+
top_k=10,
|
|
214
|
+
)
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
---
|
|
218
|
+
|
|
219
|
+
## How it works
|
|
220
|
+
|
|
221
|
+
1. After each task, CrewAI calls `DakeraStorage.save()` with the result
|
|
222
|
+
2. Dakera embeds the content server-side (no local model needed) and stores it with a semantic vector
|
|
223
|
+
3. Before the next task, CrewAI calls `DakeraStorage.search()` — Dakera performs hybrid search (vector + BM25) and returns the most relevant past memories
|
|
224
|
+
4. Memories decay gracefully over time based on access patterns — frequently-accessed memories stay prominent
|
|
225
|
+
|
|
226
|
+
---
|
|
227
|
+
|
|
228
|
+
## Related packages
|
|
229
|
+
|
|
230
|
+
| Package | Framework | Language |
|
|
231
|
+
|---------|-----------|----------|
|
|
232
|
+
| `langchain-dakera` | LangChain | Python |
|
|
233
|
+
| `llamaindex-dakera` | LlamaIndex | Python |
|
|
234
|
+
| `autogen-dakera` | AutoGen | Python |
|
|
235
|
+
| `@dakera-ai/langchain` | LangChain.js | TypeScript |
|
|
236
|
+
|
|
237
|
+
---
|
|
238
|
+
|
|
239
|
+
## Links
|
|
240
|
+
|
|
241
|
+
- [Dakera Server](https://github.com/Dakera-AI/dakera-deploy) — self-hosted memory server
|
|
242
|
+
- [Dakera Python SDK](https://github.com/Dakera-AI/dakera-py) — low-level API client
|
|
243
|
+
- [Documentation](https://docs.dakera.ai/integrations/crewai)
|
|
244
|
+
- [All integrations](https://github.com/Dakera-AI/dakera-integrations)
|
|
245
|
+
|
|
246
|
+
---
|
|
247
|
+
|
|
248
|
+
## License
|
|
249
|
+
|
|
250
|
+
MIT © [Dakera AI](https://dakera.ai)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
src/crewai_dakera/__init__.py
|
|
4
|
+
src/crewai_dakera/py.typed
|
|
5
|
+
src/crewai_dakera/storage.py
|
|
6
|
+
src/crewai_dakera.egg-info/PKG-INFO
|
|
7
|
+
src/crewai_dakera.egg-info/SOURCES.txt
|
|
8
|
+
src/crewai_dakera.egg-info/dependency_links.txt
|
|
9
|
+
src/crewai_dakera.egg-info/requires.txt
|
|
10
|
+
src/crewai_dakera.egg-info/top_level.txt
|
|
11
|
+
tests/test_storage.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
crewai_dakera
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
"""Tests for DakeraStorage (CrewAI integration)."""
|
|
2
|
+
|
|
3
|
+
from unittest.mock import MagicMock, patch
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
|
|
7
|
+
from crewai_dakera import DakeraStorage
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@pytest.fixture
|
|
11
|
+
def storage():
|
|
12
|
+
with patch("crewai_dakera.storage.DakeraClient") as MockClient:
|
|
13
|
+
mock_client = MagicMock()
|
|
14
|
+
MockClient.return_value = mock_client
|
|
15
|
+
s = DakeraStorage(api_url="http://localhost:3000", api_key="test-key",
|
|
16
|
+
agent_id="crew-1", search_k=3)
|
|
17
|
+
s._client = mock_client
|
|
18
|
+
yield s, mock_client
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def test_save_stores_memory(storage):
|
|
22
|
+
s, mock_client = storage
|
|
23
|
+
s.save("Client prefers weekly status reports")
|
|
24
|
+
mock_client.store_memory.assert_called_once_with(
|
|
25
|
+
"crew-1", content="Client prefers weekly status reports",
|
|
26
|
+
memory_type="episodic", importance=0.7, metadata={})
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def test_search_returns_memories(storage):
|
|
30
|
+
s, mock_client = storage
|
|
31
|
+
mem = MagicMock(content="Client prefers weekly status reports", id="m-1", score=0.9)
|
|
32
|
+
mock_recall = MagicMock()
|
|
33
|
+
mock_recall.memories = [mem]
|
|
34
|
+
mock_client.recall.return_value = mock_recall
|
|
35
|
+
results = s.search("What does the client want?")
|
|
36
|
+
assert len(results) == 1
|
|
37
|
+
assert results[0]["content"] == "Client prefers weekly status reports"
|
|
38
|
+
mock_client.recall.assert_called_once_with(
|
|
39
|
+
"crew-1", query="What does the client want?", top_k=3, min_importance=None)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def test_search_wraps_non_dict_results(storage):
|
|
43
|
+
s, mock_client = storage
|
|
44
|
+
mem = MagicMock(content="plain string memory", id="m-2", score=0.8)
|
|
45
|
+
mock_recall = MagicMock()
|
|
46
|
+
mock_recall.memories = [mem]
|
|
47
|
+
mock_client.recall.return_value = mock_recall
|
|
48
|
+
results = s.search("test")
|
|
49
|
+
assert results[0]["content"] == "plain string memory"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def test_reset_is_noop(storage):
|
|
53
|
+
s, mock_client = storage
|
|
54
|
+
s.reset()
|
|
55
|
+
mock_client.forget.assert_not_called()
|