graphiti-core 0.17.4__py3-none-any.whl → 0.24.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- graphiti_core/cross_encoder/gemini_reranker_client.py +1 -1
- graphiti_core/cross_encoder/openai_reranker_client.py +1 -1
- graphiti_core/decorators.py +110 -0
- graphiti_core/driver/driver.py +62 -2
- graphiti_core/driver/falkordb_driver.py +215 -23
- graphiti_core/driver/graph_operations/graph_operations.py +191 -0
- graphiti_core/driver/kuzu_driver.py +182 -0
- graphiti_core/driver/neo4j_driver.py +61 -8
- graphiti_core/driver/neptune_driver.py +305 -0
- graphiti_core/driver/search_interface/search_interface.py +89 -0
- graphiti_core/edges.py +264 -132
- graphiti_core/embedder/azure_openai.py +10 -3
- graphiti_core/embedder/client.py +2 -1
- graphiti_core/graph_queries.py +114 -101
- graphiti_core/graphiti.py +582 -255
- graphiti_core/graphiti_types.py +2 -0
- graphiti_core/helpers.py +21 -14
- graphiti_core/llm_client/anthropic_client.py +142 -52
- graphiti_core/llm_client/azure_openai_client.py +57 -19
- graphiti_core/llm_client/client.py +83 -21
- graphiti_core/llm_client/config.py +1 -1
- graphiti_core/llm_client/gemini_client.py +75 -57
- graphiti_core/llm_client/openai_base_client.py +94 -50
- graphiti_core/llm_client/openai_client.py +28 -8
- graphiti_core/llm_client/openai_generic_client.py +91 -56
- graphiti_core/models/edges/edge_db_queries.py +259 -35
- graphiti_core/models/nodes/node_db_queries.py +311 -32
- graphiti_core/nodes.py +388 -164
- graphiti_core/prompts/dedupe_edges.py +42 -31
- graphiti_core/prompts/dedupe_nodes.py +56 -39
- graphiti_core/prompts/eval.py +4 -4
- graphiti_core/prompts/extract_edges.py +23 -14
- graphiti_core/prompts/extract_nodes.py +73 -32
- graphiti_core/prompts/prompt_helpers.py +39 -0
- graphiti_core/prompts/snippets.py +29 -0
- graphiti_core/prompts/summarize_nodes.py +23 -25
- graphiti_core/search/search.py +154 -74
- graphiti_core/search/search_config.py +39 -4
- graphiti_core/search/search_filters.py +109 -31
- graphiti_core/search/search_helpers.py +5 -6
- graphiti_core/search/search_utils.py +1360 -473
- graphiti_core/tracer.py +193 -0
- graphiti_core/utils/bulk_utils.py +216 -90
- graphiti_core/utils/datetime_utils.py +13 -0
- graphiti_core/utils/maintenance/community_operations.py +62 -38
- graphiti_core/utils/maintenance/dedup_helpers.py +262 -0
- graphiti_core/utils/maintenance/edge_operations.py +286 -126
- graphiti_core/utils/maintenance/graph_data_operations.py +44 -74
- graphiti_core/utils/maintenance/node_operations.py +320 -158
- graphiti_core/utils/maintenance/temporal_operations.py +11 -3
- graphiti_core/utils/ontology_utils/entity_types_utils.py +1 -1
- graphiti_core/utils/text_utils.py +53 -0
- {graphiti_core-0.17.4.dist-info → graphiti_core-0.24.3.dist-info}/METADATA +221 -87
- graphiti_core-0.24.3.dist-info/RECORD +86 -0
- {graphiti_core-0.17.4.dist-info → graphiti_core-0.24.3.dist-info}/WHEEL +1 -1
- graphiti_core-0.17.4.dist-info/RECORD +0 -77
- /graphiti_core/{utils/maintenance/utils.py → migrations/__init__.py} +0 -0
- {graphiti_core-0.17.4.dist-info → graphiti_core-0.24.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Copyright 2024, Zep Software, Inc.
|
|
3
|
+
|
|
4
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
you may not use this file except in compliance with the License.
|
|
6
|
+
You may obtain a copy of the License at
|
|
7
|
+
|
|
8
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
|
|
10
|
+
Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
See the License for the specific language governing permissions and
|
|
14
|
+
limitations under the License.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import re
|
|
18
|
+
|
|
19
|
+
# Maximum length for entity/node summaries
|
|
20
|
+
MAX_SUMMARY_CHARS = 500
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def truncate_at_sentence(text: str, max_chars: int) -> str:
|
|
24
|
+
"""
|
|
25
|
+
Truncate text at or about max_chars while respecting sentence boundaries.
|
|
26
|
+
|
|
27
|
+
Attempts to truncate at the last complete sentence before max_chars.
|
|
28
|
+
If no sentence boundary is found before max_chars, truncates at max_chars.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
text: The text to truncate
|
|
32
|
+
max_chars: Maximum number of characters
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Truncated text
|
|
36
|
+
"""
|
|
37
|
+
if not text or len(text) <= max_chars:
|
|
38
|
+
return text
|
|
39
|
+
|
|
40
|
+
# Find all sentence boundaries (., !, ?) up to max_chars
|
|
41
|
+
truncated = text[:max_chars]
|
|
42
|
+
|
|
43
|
+
# Look for sentence boundaries: period, exclamation, or question mark followed by space or end
|
|
44
|
+
sentence_pattern = r'[.!?](?:\s|$)'
|
|
45
|
+
matches = list(re.finditer(sentence_pattern, truncated))
|
|
46
|
+
|
|
47
|
+
if matches:
|
|
48
|
+
# Truncate at the last sentence boundary found
|
|
49
|
+
last_match = matches[-1]
|
|
50
|
+
return text[: last_match.end()].rstrip()
|
|
51
|
+
|
|
52
|
+
# No sentence boundary found, truncate at max_chars
|
|
53
|
+
return truncated.rstrip()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: graphiti-core
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.24.3
|
|
4
4
|
Summary: A temporal graph building library
|
|
5
5
|
Project-URL: Homepage, https://help.getzep.com/graphiti/graphiti/overview
|
|
6
6
|
Project-URL: Repository, https://github.com/getzep/graphiti
|
|
@@ -20,17 +20,22 @@ Provides-Extra: anthropic
|
|
|
20
20
|
Requires-Dist: anthropic>=0.49.0; extra == 'anthropic'
|
|
21
21
|
Provides-Extra: dev
|
|
22
22
|
Requires-Dist: anthropic>=0.49.0; extra == 'dev'
|
|
23
|
+
Requires-Dist: boto3>=1.39.16; extra == 'dev'
|
|
23
24
|
Requires-Dist: diskcache-stubs>=5.6.3.6.20240818; extra == 'dev'
|
|
24
25
|
Requires-Dist: falkordb<2.0.0,>=1.1.2; extra == 'dev'
|
|
25
26
|
Requires-Dist: google-genai>=1.8.0; extra == 'dev'
|
|
26
27
|
Requires-Dist: groq>=0.2.0; extra == 'dev'
|
|
27
28
|
Requires-Dist: ipykernel>=6.29.5; extra == 'dev'
|
|
28
29
|
Requires-Dist: jupyterlab>=4.2.4; extra == 'dev'
|
|
30
|
+
Requires-Dist: kuzu>=0.11.3; extra == 'dev'
|
|
29
31
|
Requires-Dist: langchain-anthropic>=0.2.4; extra == 'dev'
|
|
32
|
+
Requires-Dist: langchain-aws>=0.2.29; extra == 'dev'
|
|
30
33
|
Requires-Dist: langchain-openai>=0.2.6; extra == 'dev'
|
|
31
34
|
Requires-Dist: langgraph>=0.2.15; extra == 'dev'
|
|
32
35
|
Requires-Dist: langsmith>=0.1.108; extra == 'dev'
|
|
33
|
-
Requires-Dist:
|
|
36
|
+
Requires-Dist: opensearch-py>=3.0.0; extra == 'dev'
|
|
37
|
+
Requires-Dist: opentelemetry-sdk>=1.20.0; extra == 'dev'
|
|
38
|
+
Requires-Dist: pyright>=1.1.404; extra == 'dev'
|
|
34
39
|
Requires-Dist: pytest-asyncio>=0.24.0; extra == 'dev'
|
|
35
40
|
Requires-Dist: pytest-xdist>=3.6.1; extra == 'dev'
|
|
36
41
|
Requires-Dist: pytest>=8.3.3; extra == 'dev'
|
|
@@ -44,8 +49,20 @@ Provides-Extra: google-genai
|
|
|
44
49
|
Requires-Dist: google-genai>=1.8.0; extra == 'google-genai'
|
|
45
50
|
Provides-Extra: groq
|
|
46
51
|
Requires-Dist: groq>=0.2.0; extra == 'groq'
|
|
52
|
+
Provides-Extra: kuzu
|
|
53
|
+
Requires-Dist: kuzu>=0.11.3; extra == 'kuzu'
|
|
54
|
+
Provides-Extra: neo4j-opensearch
|
|
55
|
+
Requires-Dist: boto3>=1.39.16; extra == 'neo4j-opensearch'
|
|
56
|
+
Requires-Dist: opensearch-py>=3.0.0; extra == 'neo4j-opensearch'
|
|
57
|
+
Provides-Extra: neptune
|
|
58
|
+
Requires-Dist: boto3>=1.39.16; extra == 'neptune'
|
|
59
|
+
Requires-Dist: langchain-aws>=0.2.29; extra == 'neptune'
|
|
60
|
+
Requires-Dist: opensearch-py>=3.0.0; extra == 'neptune'
|
|
47
61
|
Provides-Extra: sentence-transformers
|
|
48
62
|
Requires-Dist: sentence-transformers>=3.2.1; extra == 'sentence-transformers'
|
|
63
|
+
Provides-Extra: tracing
|
|
64
|
+
Requires-Dist: opentelemetry-api>=1.20.0; extra == 'tracing'
|
|
65
|
+
Requires-Dist: opentelemetry-sdk>=1.20.0; extra == 'tracing'
|
|
49
66
|
Provides-Extra: voyageai
|
|
50
67
|
Requires-Dist: voyageai>=0.2.3; extra == 'voyageai'
|
|
51
68
|
Description-Content-Type: text/markdown
|
|
@@ -83,9 +100,15 @@ Graphiti
|
|
|
83
100
|
<br />
|
|
84
101
|
|
|
85
102
|
> [!TIP]
|
|
86
|
-
> Check out the new [MCP server for Graphiti](mcp_server/README.md)! Give Claude, Cursor, and other MCP clients powerful
|
|
103
|
+
> Check out the new [MCP server for Graphiti](mcp_server/README.md)! Give Claude, Cursor, and other MCP clients powerful
|
|
104
|
+
> Knowledge Graph-based memory.
|
|
87
105
|
|
|
88
|
-
Graphiti is a framework for building and querying temporally-aware knowledge graphs, specifically tailored for AI agents
|
|
106
|
+
Graphiti is a framework for building and querying temporally-aware knowledge graphs, specifically tailored for AI agents
|
|
107
|
+
operating in dynamic environments. Unlike traditional retrieval-augmented generation (RAG) methods, Graphiti
|
|
108
|
+
continuously integrates user interactions, structured and unstructured enterprise data, and external information into a
|
|
109
|
+
coherent, queryable graph. The framework supports incremental data updates, efficient retrieval, and precise historical
|
|
110
|
+
queries without requiring complete graph recomputation, making it suitable for developing interactive, context-aware AI
|
|
111
|
+
applications.
|
|
89
112
|
|
|
90
113
|
Use Graphiti to:
|
|
91
114
|
|
|
@@ -96,19 +119,21 @@ Use Graphiti to:
|
|
|
96
119
|
<br />
|
|
97
120
|
|
|
98
121
|
<p align="center">
|
|
99
|
-
<img src="images/graphiti-graph-intro.gif" alt="Graphiti temporal walkthrough" width="700px">
|
|
122
|
+
<img src="images/graphiti-graph-intro.gif" alt="Graphiti temporal walkthrough" width="700px">
|
|
100
123
|
</p>
|
|
101
124
|
|
|
102
125
|
<br />
|
|
103
126
|
|
|
104
|
-
A knowledge graph is a network of interconnected facts, such as _"Kendra loves Adidas shoes."_ Each fact is a "triplet"
|
|
127
|
+
A knowledge graph is a network of interconnected facts, such as _"Kendra loves Adidas shoes."_ Each fact is a "triplet"
|
|
128
|
+
represented by two entities, or
|
|
105
129
|
nodes ("Kendra", "Adidas shoes"), and their relationship, or edge ("loves"). Knowledge Graphs have been explored
|
|
106
130
|
extensively for information retrieval. What makes Graphiti unique is its ability to autonomously build a knowledge graph
|
|
107
131
|
while handling changing relationships and maintaining historical context.
|
|
108
132
|
|
|
109
|
-
## Graphiti and Zep
|
|
133
|
+
## Graphiti and Zep's Context Engineering Platform.
|
|
110
134
|
|
|
111
|
-
Graphiti powers the core of [Zep
|
|
135
|
+
Graphiti powers the core of [Zep](https://www.getzep.com), a turn-key context engineering platform for AI Agents. Zep
|
|
136
|
+
offers agent memory, Graph RAG for dynamic data, and context retrieval and assembly.
|
|
112
137
|
|
|
113
138
|
Using Graphiti, we've demonstrated Zep is
|
|
114
139
|
the [State of the Art in Agent Memory](https://blog.getzep.com/state-of-the-art-agent-memory/).
|
|
@@ -121,24 +146,45 @@ We're excited to open-source Graphiti, believing its potential reaches far beyon
|
|
|
121
146
|
<a href="https://arxiv.org/abs/2501.13956"><img src="images/arxiv-screenshot.png" alt="Zep: A Temporal Knowledge Graph Architecture for Agent Memory" width="700px"></a>
|
|
122
147
|
</p>
|
|
123
148
|
|
|
149
|
+
## Zep vs Graphiti
|
|
150
|
+
|
|
151
|
+
| Aspect | Zep | Graphiti |
|
|
152
|
+
|--------|-----|----------|
|
|
153
|
+
| **What they are** | Fully managed platform for context engineering and AI memory | Open-source graph framework |
|
|
154
|
+
| **User & conversation management** | Built-in users, threads, and message storage | Build your own |
|
|
155
|
+
| **Retrieval & performance** | Pre-configured, production-ready retrieval with sub-200ms performance at scale | Custom implementation required; performance depends on your setup |
|
|
156
|
+
| **Developer tools** | Dashboard with graph visualization, debug logs, API logs; SDKs for Python, TypeScript, and Go | Build your own tools |
|
|
157
|
+
| **Enterprise features** | SLAs, support, security guarantees | Self-managed |
|
|
158
|
+
| **Deployment** | Fully managed or in your cloud | Self-hosted only |
|
|
159
|
+
|
|
160
|
+
### When to choose which
|
|
161
|
+
|
|
162
|
+
**Choose Zep** if you want a turnkey, enterprise-grade platform with security, performance, and support baked in.
|
|
163
|
+
|
|
164
|
+
**Choose Graphiti** if you want a flexible OSS core and you're comfortable building/operating the surrounding system.
|
|
165
|
+
|
|
124
166
|
## Why Graphiti?
|
|
125
167
|
|
|
126
|
-
Traditional RAG approaches often rely on batch processing and static data summarization, making them inefficient for
|
|
168
|
+
Traditional RAG approaches often rely on batch processing and static data summarization, making them inefficient for
|
|
169
|
+
frequently changing data. Graphiti addresses these challenges by providing:
|
|
127
170
|
|
|
128
171
|
- **Real-Time Incremental Updates:** Immediate integration of new data episodes without batch recomputation.
|
|
129
|
-
- **Bi-Temporal Data Model:** Explicit tracking of event occurrence and ingestion times, allowing accurate point-in-time
|
|
130
|
-
|
|
131
|
-
- **
|
|
172
|
+
- **Bi-Temporal Data Model:** Explicit tracking of event occurrence and ingestion times, allowing accurate point-in-time
|
|
173
|
+
queries.
|
|
174
|
+
- **Efficient Hybrid Retrieval:** Combines semantic embeddings, keyword (BM25), and graph traversal to achieve
|
|
175
|
+
low-latency queries without reliance on LLM summarization.
|
|
176
|
+
- **Custom Entity Definitions:** Flexible ontology creation and support for developer-defined entities through
|
|
177
|
+
straightforward Pydantic models.
|
|
132
178
|
- **Scalability:** Efficiently manages large datasets with parallel processing, suitable for enterprise environments.
|
|
133
179
|
|
|
134
180
|
<p align="center">
|
|
135
|
-
<img src="/images/graphiti-intro-slides-stock-2.gif" alt="Graphiti structured + unstructured demo" width="700px">
|
|
181
|
+
<img src="/images/graphiti-intro-slides-stock-2.gif" alt="Graphiti structured + unstructured demo" width="700px">
|
|
136
182
|
</p>
|
|
137
183
|
|
|
138
184
|
## Graphiti vs. GraphRAG
|
|
139
185
|
|
|
140
186
|
| Aspect | GraphRAG | Graphiti |
|
|
141
|
-
|
|
187
|
+
|----------------------------|---------------------------------------|--------------------------------------------------|
|
|
142
188
|
| **Primary Use** | Static document summarization | Dynamic data management |
|
|
143
189
|
| **Data Handling** | Batch-oriented processing | Continuous, incremental updates |
|
|
144
190
|
| **Knowledge Structure** | Entity clusters & community summaries | Episodic data, semantic entities, communities |
|
|
@@ -150,14 +196,16 @@ Traditional RAG approaches often rely on batch processing and static data summar
|
|
|
150
196
|
| **Custom Entity Types** | No | Yes, customizable |
|
|
151
197
|
| **Scalability** | Moderate | High, optimized for large datasets |
|
|
152
198
|
|
|
153
|
-
Graphiti is specifically designed to address the challenges of dynamic and frequently updated datasets, making it
|
|
199
|
+
Graphiti is specifically designed to address the challenges of dynamic and frequently updated datasets, making it
|
|
200
|
+
particularly suitable for applications requiring real-time interaction and precise historical queries.
|
|
154
201
|
|
|
155
202
|
## Installation
|
|
156
203
|
|
|
157
204
|
Requirements:
|
|
158
205
|
|
|
159
206
|
- Python 3.10 or higher
|
|
160
|
-
- Neo4j 5.26 / FalkorDB 1.1.2
|
|
207
|
+
- Neo4j 5.26 / FalkorDB 1.1.2 / Kuzu 0.11.2 / Amazon Neptune Database Cluster or Neptune Analytics Graph + Amazon
|
|
208
|
+
OpenSearch Serverless collection (serves as the full text search backend)
|
|
161
209
|
- OpenAI API key (Graphiti defaults to OpenAI for LLM inference and embedding)
|
|
162
210
|
|
|
163
211
|
> [!IMPORTANT]
|
|
@@ -200,6 +248,28 @@ pip install graphiti-core[falkordb]
|
|
|
200
248
|
uv add graphiti-core[falkordb]
|
|
201
249
|
```
|
|
202
250
|
|
|
251
|
+
### Installing with Kuzu Support
|
|
252
|
+
|
|
253
|
+
If you plan to use Kuzu as your graph database backend, install with the Kuzu extra:
|
|
254
|
+
|
|
255
|
+
```bash
|
|
256
|
+
pip install graphiti-core[kuzu]
|
|
257
|
+
|
|
258
|
+
# or with uv
|
|
259
|
+
uv add graphiti-core[kuzu]
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
### Installing with Amazon Neptune Support
|
|
263
|
+
|
|
264
|
+
If you plan to use Amazon Neptune as your graph database backend, install with the Amazon Neptune extra:
|
|
265
|
+
|
|
266
|
+
```bash
|
|
267
|
+
pip install graphiti-core[neptune]
|
|
268
|
+
|
|
269
|
+
# or with uv
|
|
270
|
+
uv add graphiti-core[neptune]
|
|
271
|
+
```
|
|
272
|
+
|
|
203
273
|
### You can also install optional LLM providers as extras:
|
|
204
274
|
|
|
205
275
|
```bash
|
|
@@ -217,29 +287,64 @@ pip install graphiti-core[anthropic,groq,google-genai]
|
|
|
217
287
|
|
|
218
288
|
# Install with FalkorDB and LLM providers
|
|
219
289
|
pip install graphiti-core[falkordb,anthropic,google-genai]
|
|
290
|
+
|
|
291
|
+
# Install with Amazon Neptune
|
|
292
|
+
pip install graphiti-core[neptune]
|
|
220
293
|
```
|
|
221
294
|
|
|
295
|
+
## Default to Low Concurrency; LLM Provider 429 Rate Limit Errors
|
|
296
|
+
|
|
297
|
+
Graphiti's ingestion pipelines are designed for high concurrency. By default, concurrency is set low to avoid LLM
|
|
298
|
+
Provider 429 Rate Limit Errors. If you find Graphiti slow, please increase concurrency as described below.
|
|
299
|
+
|
|
300
|
+
Concurrency controlled by the `SEMAPHORE_LIMIT` environment variable. By default, `SEMAPHORE_LIMIT` is set to `10`
|
|
301
|
+
concurrent operations to help prevent `429` rate limit errors from your LLM provider. If you encounter such errors, try
|
|
302
|
+
lowering this value.
|
|
303
|
+
|
|
304
|
+
If your LLM provider allows higher throughput, you can increase `SEMAPHORE_LIMIT` to boost episode ingestion
|
|
305
|
+
performance.
|
|
306
|
+
|
|
222
307
|
## Quick Start
|
|
223
308
|
|
|
224
309
|
> [!IMPORTANT]
|
|
225
|
-
> Graphiti defaults to using OpenAI for LLM inference and embedding. Ensure that an `OPENAI_API_KEY` is set in your
|
|
310
|
+
> Graphiti defaults to using OpenAI for LLM inference and embedding. Ensure that an `OPENAI_API_KEY` is set in your
|
|
311
|
+
> environment.
|
|
226
312
|
> Support for Anthropic and Groq LLM inferences is available, too. Other LLM providers may be supported via OpenAI
|
|
227
313
|
> compatible APIs.
|
|
228
314
|
|
|
229
|
-
For a complete working example, see the [Quickstart Example](./examples/quickstart/README.md) in the examples directory.
|
|
315
|
+
For a complete working example, see the [Quickstart Example](./examples/quickstart/README.md) in the examples directory.
|
|
316
|
+
The quickstart demonstrates:
|
|
230
317
|
|
|
231
|
-
1. Connecting to a Neo4j
|
|
318
|
+
1. Connecting to a Neo4j, Amazon Neptune, FalkorDB, or Kuzu database
|
|
232
319
|
2. Initializing Graphiti indices and constraints
|
|
233
320
|
3. Adding episodes to the graph (both text and structured JSON)
|
|
234
321
|
4. Searching for relationships (edges) using hybrid search
|
|
235
322
|
5. Reranking search results using graph distance
|
|
236
323
|
6. Searching for nodes using predefined search recipes
|
|
237
324
|
|
|
238
|
-
The example is fully documented with clear explanations of each functionality and includes a comprehensive README with
|
|
325
|
+
The example is fully documented with clear explanations of each functionality and includes a comprehensive README with
|
|
326
|
+
setup instructions and next steps.
|
|
327
|
+
|
|
328
|
+
### Running with Docker Compose
|
|
329
|
+
|
|
330
|
+
You can use Docker Compose to quickly start the required services:
|
|
331
|
+
|
|
332
|
+
- **Neo4j Docker:**
|
|
333
|
+
```sh
|
|
334
|
+
docker compose up
|
|
335
|
+
```
|
|
336
|
+
This will start the Neo4j Docker service and related components.
|
|
337
|
+
|
|
338
|
+
- **FalkorDB Docker:**
|
|
339
|
+
```sh
|
|
340
|
+
docker compose --profile falkordb up
|
|
341
|
+
```
|
|
342
|
+
This will start the FalkorDB Docker service and related components.
|
|
239
343
|
|
|
240
344
|
## MCP Server
|
|
241
345
|
|
|
242
|
-
The `mcp_server` directory contains a Model Context Protocol (MCP) server implementation for Graphiti. This server
|
|
346
|
+
The `mcp_server` directory contains a Model Context Protocol (MCP) server implementation for Graphiti. This server
|
|
347
|
+
allows AI assistants to interact with Graphiti's knowledge graph capabilities through the MCP protocol.
|
|
243
348
|
|
|
244
349
|
Key features of the MCP server include:
|
|
245
350
|
|
|
@@ -249,7 +354,8 @@ Key features of the MCP server include:
|
|
|
249
354
|
- Group management for organizing related data
|
|
250
355
|
- Graph maintenance operations
|
|
251
356
|
|
|
252
|
-
The MCP server can be deployed using Docker with Neo4j, making it easy to integrate Graphiti into your AI assistant
|
|
357
|
+
The MCP server can be deployed using Docker with Neo4j, making it easy to integrate Graphiti into your AI assistant
|
|
358
|
+
workflows.
|
|
253
359
|
|
|
254
360
|
For detailed setup instructions and usage examples, see the [MCP server README](./mcp_server/README.md).
|
|
255
361
|
|
|
@@ -272,7 +378,8 @@ Database names are configured directly in the driver constructors:
|
|
|
272
378
|
- **Neo4j**: Database name defaults to `neo4j` (hardcoded in Neo4jDriver)
|
|
273
379
|
- **FalkorDB**: Database name defaults to `default_db` (hardcoded in FalkorDriver)
|
|
274
380
|
|
|
275
|
-
As of v0.17.0, if you need to customize your database configuration, you can instantiate a database driver and pass it
|
|
381
|
+
As of v0.17.0, if you need to customize your database configuration, you can instantiate a database driver and pass it
|
|
382
|
+
to the Graphiti constructor using the `graph_driver` parameter.
|
|
276
383
|
|
|
277
384
|
#### Neo4j with Custom Database Name
|
|
278
385
|
|
|
@@ -311,48 +418,71 @@ driver = FalkorDriver(
|
|
|
311
418
|
graphiti = Graphiti(graph_driver=driver)
|
|
312
419
|
```
|
|
313
420
|
|
|
421
|
+
#### Kuzu
|
|
314
422
|
|
|
315
|
-
|
|
423
|
+
```python
|
|
424
|
+
from graphiti_core import Graphiti
|
|
425
|
+
from graphiti_core.driver.kuzu_driver import KuzuDriver
|
|
316
426
|
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
Note that this feature is not supported for Neo4j Community edition or for smaller AuraDB instances,
|
|
320
|
-
as such this feature is off by default.
|
|
427
|
+
# Create a Kuzu driver
|
|
428
|
+
driver = KuzuDriver(db="/tmp/graphiti.kuzu")
|
|
321
429
|
|
|
322
|
-
|
|
430
|
+
# Pass the driver to Graphiti
|
|
431
|
+
graphiti = Graphiti(graph_driver=driver)
|
|
432
|
+
```
|
|
323
433
|
|
|
324
|
-
|
|
434
|
+
#### Amazon Neptune
|
|
325
435
|
|
|
326
436
|
```python
|
|
327
|
-
from openai import AsyncAzureOpenAI
|
|
328
437
|
from graphiti_core import Graphiti
|
|
329
|
-
from graphiti_core.
|
|
330
|
-
from graphiti_core.embedder.openai import OpenAIEmbedder, OpenAIEmbedderConfig
|
|
331
|
-
from graphiti_core.cross_encoder.openai_reranker_client import OpenAIRerankerClient
|
|
438
|
+
from graphiti_core.driver.neptune_driver import NeptuneDriver
|
|
332
439
|
|
|
333
|
-
#
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
azure_endpoint=llm_endpoint
|
|
440
|
+
# Create a FalkorDB driver with custom database name
|
|
441
|
+
driver = NeptuneDriver(
|
|
442
|
+
host= < NEPTUNE
|
|
443
|
+
ENDPOINT >,
|
|
444
|
+
aoss_host = < Amazon
|
|
445
|
+
OpenSearch
|
|
446
|
+
Serverless
|
|
447
|
+
Host >,
|
|
448
|
+
port = < PORT > # Optional, defaults to 8182,
|
|
449
|
+
aoss_port = < PORT > # Optional, defaults to 443
|
|
344
450
|
)
|
|
345
451
|
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
452
|
+
driver = NeptuneDriver(host=neptune_uri, aoss_host=aoss_host, port=neptune_port)
|
|
453
|
+
|
|
454
|
+
# Pass the driver to Graphiti
|
|
455
|
+
graphiti = Graphiti(graph_driver=driver)
|
|
456
|
+
```
|
|
457
|
+
|
|
458
|
+
## Using Graphiti with Azure OpenAI
|
|
459
|
+
|
|
460
|
+
Graphiti supports Azure OpenAI for both LLM inference and embeddings using Azure's OpenAI v1 API compatibility layer.
|
|
461
|
+
|
|
462
|
+
### Quick Start
|
|
463
|
+
|
|
464
|
+
```python
|
|
465
|
+
from openai import AsyncOpenAI
|
|
466
|
+
from graphiti_core import Graphiti
|
|
467
|
+
from graphiti_core.llm_client.azure_openai_client import AzureOpenAILLMClient
|
|
468
|
+
from graphiti_core.llm_client.config import LLMConfig
|
|
469
|
+
from graphiti_core.embedder.azure_openai import AzureOpenAIEmbedderClient
|
|
470
|
+
|
|
471
|
+
# Initialize Azure OpenAI client using the standard OpenAI client
|
|
472
|
+
# with Azure's v1 API endpoint
|
|
473
|
+
azure_client = AsyncOpenAI(
|
|
474
|
+
base_url="https://your-resource-name.openai.azure.com/openai/v1/",
|
|
475
|
+
api_key="your-api-key",
|
|
350
476
|
)
|
|
351
477
|
|
|
352
|
-
# Create LLM
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
model="gpt-
|
|
478
|
+
# Create LLM and Embedder clients
|
|
479
|
+
llm_client = AzureOpenAILLMClient(
|
|
480
|
+
azure_client=azure_client,
|
|
481
|
+
config=LLMConfig(model="gpt-5-mini", small_model="gpt-5-mini") # Your Azure deployment name
|
|
482
|
+
)
|
|
483
|
+
embedder_client = AzureOpenAIEmbedderClient(
|
|
484
|
+
azure_client=azure_client,
|
|
485
|
+
model="text-embedding-3-small" # Your Azure embedding deployment name
|
|
356
486
|
)
|
|
357
487
|
|
|
358
488
|
# Initialize Graphiti with Azure OpenAI clients
|
|
@@ -360,32 +490,24 @@ graphiti = Graphiti(
|
|
|
360
490
|
"bolt://localhost:7687",
|
|
361
491
|
"neo4j",
|
|
362
492
|
"password",
|
|
363
|
-
llm_client=
|
|
364
|
-
|
|
365
|
-
client=llm_client_azure
|
|
366
|
-
),
|
|
367
|
-
embedder=OpenAIEmbedder(
|
|
368
|
-
config=OpenAIEmbedderConfig(
|
|
369
|
-
embedding_model="text-embedding-3-small-deployment" # Your Azure embedding deployment name
|
|
370
|
-
),
|
|
371
|
-
client=embedding_client_azure
|
|
372
|
-
),
|
|
373
|
-
cross_encoder=OpenAIRerankerClient(
|
|
374
|
-
llm_config=LLMConfig(
|
|
375
|
-
model=azure_llm_config.small_model # Use small model for reranking
|
|
376
|
-
),
|
|
377
|
-
client=llm_client_azure
|
|
378
|
-
)
|
|
493
|
+
llm_client=llm_client,
|
|
494
|
+
embedder=embedder_client,
|
|
379
495
|
)
|
|
380
496
|
|
|
381
497
|
# Now you can use Graphiti with Azure OpenAI
|
|
382
498
|
```
|
|
383
499
|
|
|
384
|
-
|
|
500
|
+
**Key Points:**
|
|
501
|
+
- Use the standard `AsyncOpenAI` client with Azure's v1 API endpoint format: `https://your-resource-name.openai.azure.com/openai/v1/`
|
|
502
|
+
- The deployment names (e.g., `gpt-5-mini`, `text-embedding-3-small`) should match your Azure OpenAI deployment names
|
|
503
|
+
- See `examples/azure-openai/` for a complete working example
|
|
504
|
+
|
|
505
|
+
Make sure to replace the placeholder values with your actual Azure OpenAI credentials and deployment names.
|
|
385
506
|
|
|
386
507
|
## Using Graphiti with Google Gemini
|
|
387
508
|
|
|
388
|
-
Graphiti supports Google's Gemini models for LLM inference, embeddings, and cross-encoding/reranking. To use Gemini,
|
|
509
|
+
Graphiti supports Google's Gemini models for LLM inference, embeddings, and cross-encoding/reranking. To use Gemini,
|
|
510
|
+
you'll need to configure the LLM client, embedder, and the cross-encoder with your Google API key.
|
|
389
511
|
|
|
390
512
|
Install Graphiti:
|
|
391
513
|
|
|
@@ -426,7 +548,7 @@ graphiti = Graphiti(
|
|
|
426
548
|
cross_encoder=GeminiRerankerClient(
|
|
427
549
|
config=LLMConfig(
|
|
428
550
|
api_key=api_key,
|
|
429
|
-
model="gemini-2.5-flash-lite
|
|
551
|
+
model="gemini-2.5-flash-lite"
|
|
430
552
|
)
|
|
431
553
|
)
|
|
432
554
|
)
|
|
@@ -434,32 +556,40 @@ graphiti = Graphiti(
|
|
|
434
556
|
# Now you can use Graphiti with Google Gemini for all components
|
|
435
557
|
```
|
|
436
558
|
|
|
437
|
-
The Gemini reranker uses the `gemini-2.5-flash-lite
|
|
559
|
+
The Gemini reranker uses the `gemini-2.5-flash-lite` model by default, which is optimized for
|
|
560
|
+
cost-effective and low-latency classification tasks. It uses the same boolean classification approach as the OpenAI
|
|
561
|
+
reranker, leveraging Gemini's log probabilities feature to rank passage relevance.
|
|
438
562
|
|
|
439
563
|
## Using Graphiti with Ollama (Local LLM)
|
|
440
564
|
|
|
441
|
-
Graphiti supports Ollama for running local LLMs and embedding models via Ollama's OpenAI-compatible API. This is ideal
|
|
565
|
+
Graphiti supports Ollama for running local LLMs and embedding models via Ollama's OpenAI-compatible API. This is ideal
|
|
566
|
+
for privacy-focused applications or when you want to avoid API costs.
|
|
567
|
+
|
|
568
|
+
**Note:** Use `OpenAIGenericClient` (not `OpenAIClient`) for Ollama and other OpenAI-compatible providers like LM Studio. The `OpenAIGenericClient` is optimized for local models with a higher default max token limit (16K vs 8K) and full support for structured outputs.
|
|
442
569
|
|
|
443
570
|
Install the models:
|
|
571
|
+
|
|
572
|
+
```bash
|
|
444
573
|
ollama pull deepseek-r1:7b # LLM
|
|
445
574
|
ollama pull nomic-embed-text # embeddings
|
|
575
|
+
```
|
|
446
576
|
|
|
447
577
|
```python
|
|
448
578
|
from graphiti_core import Graphiti
|
|
449
579
|
from graphiti_core.llm_client.config import LLMConfig
|
|
450
|
-
from graphiti_core.llm_client.
|
|
580
|
+
from graphiti_core.llm_client.openai_generic_client import OpenAIGenericClient
|
|
451
581
|
from graphiti_core.embedder.openai import OpenAIEmbedder, OpenAIEmbedderConfig
|
|
452
582
|
from graphiti_core.cross_encoder.openai_reranker_client import OpenAIRerankerClient
|
|
453
583
|
|
|
454
584
|
# Configure Ollama LLM client
|
|
455
585
|
llm_config = LLMConfig(
|
|
456
|
-
api_key="
|
|
586
|
+
api_key="ollama", # Ollama doesn't require a real API key, but some placeholder is needed
|
|
457
587
|
model="deepseek-r1:7b",
|
|
458
588
|
small_model="deepseek-r1:7b",
|
|
459
|
-
base_url="http://localhost:11434/v1",
|
|
589
|
+
base_url="http://localhost:11434/v1", # Ollama's OpenAI-compatible endpoint
|
|
460
590
|
)
|
|
461
591
|
|
|
462
|
-
llm_client =
|
|
592
|
+
llm_client = OpenAIGenericClient(config=llm_config)
|
|
463
593
|
|
|
464
594
|
# Initialize Graphiti with Ollama clients
|
|
465
595
|
graphiti = Graphiti(
|
|
@@ -469,7 +599,7 @@ graphiti = Graphiti(
|
|
|
469
599
|
llm_client=llm_client,
|
|
470
600
|
embedder=OpenAIEmbedder(
|
|
471
601
|
config=OpenAIEmbedderConfig(
|
|
472
|
-
api_key="
|
|
602
|
+
api_key="ollama", # Placeholder API key
|
|
473
603
|
embedding_model="nomic-embed-text",
|
|
474
604
|
embedding_dim=768,
|
|
475
605
|
base_url="http://localhost:11434/v1",
|
|
@@ -487,11 +617,12 @@ Ensure Ollama is running (`ollama serve`) and that you have pulled the models yo
|
|
|
487
617
|
|
|
488
618
|
- [Guides and API documentation](https://help.getzep.com/graphiti).
|
|
489
619
|
- [Quick Start](https://help.getzep.com/graphiti/graphiti/quick-start)
|
|
490
|
-
- [Building an agent with LangChain's LangGraph and Graphiti](https://help.getzep.com/graphiti/
|
|
620
|
+
- [Building an agent with LangChain's LangGraph and Graphiti](https://help.getzep.com/graphiti/integrations/lang-graph-agent)
|
|
491
621
|
|
|
492
622
|
## Telemetry
|
|
493
623
|
|
|
494
|
-
Graphiti collects anonymous usage statistics to help us understand how the framework is being used and improve it for
|
|
624
|
+
Graphiti collects anonymous usage statistics to help us understand how the framework is being used and improve it for
|
|
625
|
+
everyone. We believe transparency is important, so here's exactly what we collect and why.
|
|
495
626
|
|
|
496
627
|
### What We Collect
|
|
497
628
|
|
|
@@ -501,9 +632,9 @@ When you initialize a Graphiti instance, we collect:
|
|
|
501
632
|
- **System information**: Operating system, Python version, and system architecture
|
|
502
633
|
- **Graphiti version**: The version you're using
|
|
503
634
|
- **Configuration choices**:
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
635
|
+
- LLM provider type (OpenAI, Azure, Anthropic, etc.)
|
|
636
|
+
- Database backend (Neo4j, FalkorDB, Kuzu, Amazon Neptune Database or Neptune Analytics)
|
|
637
|
+
- Embedder provider (OpenAI, Azure, Voyage, etc.)
|
|
507
638
|
|
|
508
639
|
### What We Don't Collect
|
|
509
640
|
|
|
@@ -555,10 +686,12 @@ echo 'export GRAPHITI_TELEMETRY_ENABLED=false' >> ~/.zshrc
|
|
|
555
686
|
|
|
556
687
|
```python
|
|
557
688
|
import os
|
|
689
|
+
|
|
558
690
|
os.environ['GRAPHITI_TELEMETRY_ENABLED'] = 'false'
|
|
559
691
|
|
|
560
692
|
# Then initialize Graphiti as usual
|
|
561
693
|
from graphiti_core import Graphiti
|
|
694
|
+
|
|
562
695
|
graphiti = Graphiti(...)
|
|
563
696
|
```
|
|
564
697
|
|
|
@@ -567,7 +700,8 @@ Telemetry is automatically disabled during test runs (when `pytest` is detected)
|
|
|
567
700
|
### Technical Details
|
|
568
701
|
|
|
569
702
|
- Telemetry uses PostHog for anonymous analytics collection
|
|
570
|
-
- All telemetry operations are designed to fail silently - they will never interrupt your application or affect Graphiti
|
|
703
|
+
- All telemetry operations are designed to fail silently - they will never interrupt your application or affect Graphiti
|
|
704
|
+
functionality
|
|
571
705
|
- The anonymous ID is stored locally and is not tied to any personal information
|
|
572
706
|
|
|
573
707
|
## Status and Roadmap
|
|
@@ -575,8 +709,8 @@ Telemetry is automatically disabled during test runs (when `pytest` is detected)
|
|
|
575
709
|
Graphiti is under active development. We aim to maintain API stability while working on:
|
|
576
710
|
|
|
577
711
|
- [x] Supporting custom graph schemas:
|
|
578
|
-
|
|
579
|
-
|
|
712
|
+
- Allow developers to provide their own defined node and edge classes when ingesting episodes
|
|
713
|
+
- Enable more flexible knowledge representation tailored to specific use cases
|
|
580
714
|
- [x] Enhancing retrieval capabilities with more robust and configurable options
|
|
581
715
|
- [x] Graphiti MCP Server
|
|
582
716
|
- [ ] Expanding test coverage to ensure reliability and catch edge cases
|