graphiti-core 0.8.8__tar.gz → 0.9.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

Files changed (64) hide show
  1. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/PKG-INFO +67 -64
  2. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/README.md +63 -62
  3. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/edges.py +9 -9
  4. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/embedder/__init__.py +5 -1
  5. graphiti_core-0.9.1/graphiti_core/embedder/gemini.py +68 -0
  6. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/graphiti.py +1 -4
  7. graphiti_core-0.9.1/graphiti_core/llm_client/gemini_client.py +186 -0
  8. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/llm_client/openai_client.py +2 -0
  9. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/nodes.py +9 -9
  10. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/lib.py +18 -54
  11. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/utils/maintenance/__init__.py +1 -4
  12. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/utils/maintenance/community_operations.py +1 -5
  13. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/utils/maintenance/node_operations.py +1 -5
  14. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/pyproject.toml +6 -2
  15. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/LICENSE +0 -0
  16. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/__init__.py +0 -0
  17. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/cross_encoder/__init__.py +0 -0
  18. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/cross_encoder/bge_reranker_client.py +0 -0
  19. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/cross_encoder/client.py +0 -0
  20. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/cross_encoder/openai_reranker_client.py +0 -0
  21. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/embedder/client.py +0 -0
  22. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/embedder/openai.py +0 -0
  23. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/embedder/voyage.py +0 -0
  24. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/errors.py +0 -0
  25. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/helpers.py +0 -0
  26. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/llm_client/__init__.py +0 -0
  27. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/llm_client/anthropic_client.py +0 -0
  28. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/llm_client/client.py +0 -0
  29. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/llm_client/config.py +0 -0
  30. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/llm_client/errors.py +0 -0
  31. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/llm_client/groq_client.py +0 -0
  32. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/llm_client/openai_generic_client.py +0 -0
  33. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/llm_client/utils.py +0 -0
  34. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/models/__init__.py +0 -0
  35. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/models/edges/__init__.py +0 -0
  36. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/models/edges/edge_db_queries.py +0 -0
  37. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/models/nodes/__init__.py +0 -0
  38. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/models/nodes/node_db_queries.py +0 -0
  39. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/__init__.py +0 -0
  40. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/dedupe_edges.py +0 -0
  41. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/dedupe_nodes.py +0 -0
  42. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/eval.py +0 -0
  43. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/extract_edge_dates.py +0 -0
  44. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/extract_edges.py +0 -0
  45. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/extract_nodes.py +0 -0
  46. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/invalidate_edges.py +0 -0
  47. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/models.py +0 -0
  48. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/prompt_helpers.py +0 -0
  49. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/prompts/summarize_nodes.py +0 -0
  50. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/py.typed +0 -0
  51. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/search/__init__.py +0 -0
  52. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/search/search.py +0 -0
  53. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/search/search_config.py +0 -0
  54. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/search/search_config_recipes.py +0 -0
  55. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/search/search_filters.py +0 -0
  56. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/search/search_utils.py +0 -0
  57. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/utils/__init__.py +0 -0
  58. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/utils/bulk_utils.py +0 -0
  59. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/utils/datetime_utils.py +0 -0
  60. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/utils/maintenance/edge_operations.py +0 -0
  61. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/utils/maintenance/graph_data_operations.py +0 -0
  62. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/utils/maintenance/temporal_operations.py +0 -0
  63. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/utils/maintenance/utils.py +0 -0
  64. {graphiti_core-0.8.8 → graphiti_core-0.9.1}/graphiti_core/utils/ontology_utils/entity_types_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: graphiti-core
3
- Version: 0.8.8
3
+ Version: 0.9.1
4
4
  Summary: A temporal graph building library
5
5
  License: Apache-2.0
6
6
  Author: Paul Paliychuk
@@ -11,7 +11,9 @@ Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
- Requires-Dist: anthropic (>=0.49.0,<0.50.0)
14
+ Provides-Extra: anthropic
15
+ Provides-Extra: google-genai
16
+ Provides-Extra: groq
15
17
  Requires-Dist: diskcache (>=5.6.3,<6.0.0)
16
18
  Requires-Dist: neo4j (>=5.23.0,<6.0.0)
17
19
  Requires-Dist: numpy (>=1.0.0)
@@ -58,8 +60,8 @@ Use Graphiti to:
58
60
 
59
61
  <br />
60
62
 
61
- A knowledge graph is a network of interconnected facts, such as _Kendra loves Adidas shoes.”_ Each fact is a triplet represented by two entities, or
62
- nodes (_”Kendra”_, _“Adidas shoes”_), and their relationship, or edge (_”loves”_). Knowledge Graphs have been explored
63
+ A knowledge graph is a network of interconnected facts, such as _"Kendra loves Adidas shoes."_ Each fact is a "triplet" represented by two entities, or
64
+ nodes ("Kendra", "Adidas shoes"), and their relationship, or edge ("loves"). Knowledge Graphs have been explored
63
65
  extensively for information retrieval. What makes Graphiti unique is its ability to autonomously build a knowledge graph
64
66
  while handling changing relationships and maintaining historical context.
65
67
 
@@ -119,7 +121,7 @@ Requirements:
119
121
 
120
122
  Optional:
121
123
 
122
- - Anthropic or Groq API key (for alternative LLM providers)
124
+ - Google Gemini, Anthropic, or Groq API key (for alternative LLM providers)
123
125
 
124
126
  > [!TIP]
125
127
  > The simplest way to install Neo4j is via [Neo4j Desktop](https://neo4j.com/download/). It provides a user-friendly
@@ -135,6 +137,22 @@ or
135
137
  poetry add graphiti-core
136
138
  ```
137
139
 
140
+ You can also install optional LLM providers as extras:
141
+
142
+ ```bash
143
+ # Install with Anthropic support
144
+ pip install graphiti-core[anthropic]
145
+
146
+ # Install with Groq support
147
+ pip install graphiti-core[groq]
148
+
149
+ # Install with Google Gemini support
150
+ pip install graphiti-core[google-genai]
151
+
152
+ # Install with multiple providers
153
+ pip install graphiti-core[anthropic,groq,google-genai]
154
+ ```
155
+
138
156
  ## Quick Start
139
157
 
140
158
  > [!IMPORTANT]
@@ -142,67 +160,16 @@ poetry add graphiti-core
142
160
  > Support for Anthropic and Groq LLM inferences is available, too. Other LLM providers may be supported via OpenAI
143
161
  > compatible APIs.
144
162
 
145
- ```python
146
- from graphiti_core import Graphiti
147
- from graphiti_core.nodes import EpisodeType
148
- from datetime import datetime, timezone
149
-
150
- # Initialize Graphiti as Your Memory Layer
151
- graphiti = Graphiti("bolt://localhost:7687", "neo4j", "password")
152
-
153
- # Initialize the graph database with Graphiti's indices. This only needs to be done once.
154
- graphiti.build_indices_and_constraints()
155
-
156
- # Add episodes
157
- episodes = [
158
- "Kamala Harris is the Attorney General of California. She was previously "
159
- "the district attorney for San Francisco.",
160
- "As AG, Harris was in office from January 3, 2011 – January 3, 2017",
161
- ]
162
- for i, episode in enumerate(episodes):
163
- await graphiti.add_episode(
164
- name=f"Freakonomics Radio {i}",
165
- episode_body=episode,
166
- source=EpisodeType.text,
167
- source_description="podcast",
168
- reference_time=datetime.now(timezone.utc)
169
- )
163
+ For a complete working example, see the [Quickstart Example](./examples/quickstart/README.md) in the examples directory. The quickstart demonstrates:
170
164
 
171
- # Search the graph for semantic memory retrieval
172
- # Execute a hybrid search combining semantic similarity and BM25 retrieval
173
- # Results are combined and reranked using Reciprocal Rank Fusion
174
- results = await graphiti.search('Who was the California Attorney General?')
175
- [
176
- EntityEdge(
177
- │ uuid = '3133258f738e487383f07b04e15d4ac0',
178
- │ source_node_uuid = '2a85789b318d4e418050506879906e62',
179
- │ target_node_uuid = 'baf7781f445945989d6e4f927f881556',
180
- │ created_at = datetime.datetime(2024, 8, 26, 13, 13, 24, 861097),
181
- │ name = 'HELD_POSITION',
182
- # the fact reflects the updated state that Harris is
183
- # no longer the AG of California
184
- │ fact = 'Kamala Harris was the Attorney General of California',
185
- │ fact_embedding = [
186
- │ │ -0.009955154731869698,
187
- │ ...
188
- │ │ 0.00784289836883545
189
- │],
190
- │ episodes = ['b43e98ad0a904088a76c67985caecc22'],
191
- │ expired_at = datetime.datetime(2024, 8, 26, 20, 18, 1, 53812),
192
- # These dates represent the date this edge was true.
193
- │ valid_at = datetime.datetime(2011, 1, 3, 0, 0, tzinfo= < UTC >),
194
- │ invalid_at = datetime.datetime(2017, 1, 3, 0, 0, tzinfo= < UTC >)
195
- )
196
- ]
165
+ 1. Connecting to a Neo4j database
166
+ 2. Initializing Graphiti indices and constraints
167
+ 3. Adding episodes to the graph (both text and structured JSON)
168
+ 4. Searching for relationships (edges) using hybrid search
169
+ 5. Reranking search results using graph distance
170
+ 6. Searching for nodes using predefined search recipes
197
171
 
198
- # Rerank search results based on graph distance
199
- # Provide a node UUID to prioritize results closer to that node in the graph.
200
- # Results are weighted by their proximity, with distant edges receiving lower scores.
201
- await graphiti.search('Who was the California Attorney General?', center_node_uuid)
202
-
203
- # Close the connection when chat state management is complete
204
- graphiti.close()
205
- ```
172
+ The example is fully documented with clear explanations of each functionality and includes a comprehensive README with setup instructions and next steps.
206
173
 
207
174
  ## Graph Service
208
175
 
@@ -285,6 +252,42 @@ graphiti = Graphiti(
285
252
 
286
253
  Make sure to replace the placeholder values with your actual Azure OpenAI credentials and specify the correct embedding model name that's deployed in your Azure OpenAI service.
287
254
 
255
+ ## Using Graphiti with Google Gemini
256
+
257
+ Graphiti supports Google's Gemini models for both LLM inference and embeddings. To use Gemini, you'll need to configure both the LLM client and embedder with your Google API key.
258
+
259
+ ```python
260
+ from graphiti_core import Graphiti
261
+ from graphiti_core.llm_client.gemini_client import GeminiClient, LLMConfig
262
+ from graphiti_core.embedder.gemini import GeminiEmbedder, GeminiEmbedderConfig
263
+
264
+ # Google API key configuration
265
+ api_key = "<your-google-api-key>"
266
+
267
+ # Initialize Graphiti with Gemini clients
268
+ graphiti = Graphiti(
269
+ "bolt://localhost:7687",
270
+ "neo4j",
271
+ "password",
272
+ llm_client=GeminiClient(
273
+ config=LLMConfig(
274
+ api_key=api_key,
275
+ model="gemini-2.0-flash"
276
+ )
277
+ ),
278
+ embedder=GeminiEmbedder(
279
+ config=GeminiEmbedderConfig(
280
+ api_key=api_key,
281
+ embedding_model="embedding-001"
282
+ )
283
+ )
284
+ )
285
+
286
+ # Now you can use Graphiti with Google Gemini
287
+ ```
288
+
289
+ Make sure to replace the placeholder value with your actual Google API key. You can find more details in the example file at `examples/gemini_example.py`.
290
+
288
291
  ## Documentation
289
292
 
290
293
  - [Guides and API documentation](https://help.getzep.com/graphiti).
@@ -35,8 +35,8 @@ Use Graphiti to:
35
35
 
36
36
  <br />
37
37
 
38
- A knowledge graph is a network of interconnected facts, such as _Kendra loves Adidas shoes.”_ Each fact is a triplet represented by two entities, or
39
- nodes (_”Kendra”_, _“Adidas shoes”_), and their relationship, or edge (_”loves”_). Knowledge Graphs have been explored
38
+ A knowledge graph is a network of interconnected facts, such as _"Kendra loves Adidas shoes."_ Each fact is a "triplet" represented by two entities, or
39
+ nodes ("Kendra", "Adidas shoes"), and their relationship, or edge ("loves"). Knowledge Graphs have been explored
40
40
  extensively for information retrieval. What makes Graphiti unique is its ability to autonomously build a knowledge graph
41
41
  while handling changing relationships and maintaining historical context.
42
42
 
@@ -96,7 +96,7 @@ Requirements:
96
96
 
97
97
  Optional:
98
98
 
99
- - Anthropic or Groq API key (for alternative LLM providers)
99
+ - Google Gemini, Anthropic, or Groq API key (for alternative LLM providers)
100
100
 
101
101
  > [!TIP]
102
102
  > The simplest way to install Neo4j is via [Neo4j Desktop](https://neo4j.com/download/). It provides a user-friendly
@@ -112,6 +112,22 @@ or
112
112
  poetry add graphiti-core
113
113
  ```
114
114
 
115
+ You can also install optional LLM providers as extras:
116
+
117
+ ```bash
118
+ # Install with Anthropic support
119
+ pip install graphiti-core[anthropic]
120
+
121
+ # Install with Groq support
122
+ pip install graphiti-core[groq]
123
+
124
+ # Install with Google Gemini support
125
+ pip install graphiti-core[google-genai]
126
+
127
+ # Install with multiple providers
128
+ pip install graphiti-core[anthropic,groq,google-genai]
129
+ ```
130
+
115
131
  ## Quick Start
116
132
 
117
133
  > [!IMPORTANT]
@@ -119,67 +135,16 @@ poetry add graphiti-core
119
135
  > Support for Anthropic and Groq LLM inferences is available, too. Other LLM providers may be supported via OpenAI
120
136
  > compatible APIs.
121
137
 
122
- ```python
123
- from graphiti_core import Graphiti
124
- from graphiti_core.nodes import EpisodeType
125
- from datetime import datetime, timezone
126
-
127
- # Initialize Graphiti as Your Memory Layer
128
- graphiti = Graphiti("bolt://localhost:7687", "neo4j", "password")
129
-
130
- # Initialize the graph database with Graphiti's indices. This only needs to be done once.
131
- graphiti.build_indices_and_constraints()
132
-
133
- # Add episodes
134
- episodes = [
135
- "Kamala Harris is the Attorney General of California. She was previously "
136
- "the district attorney for San Francisco.",
137
- "As AG, Harris was in office from January 3, 2011 – January 3, 2017",
138
- ]
139
- for i, episode in enumerate(episodes):
140
- await graphiti.add_episode(
141
- name=f"Freakonomics Radio {i}",
142
- episode_body=episode,
143
- source=EpisodeType.text,
144
- source_description="podcast",
145
- reference_time=datetime.now(timezone.utc)
146
- )
138
+ For a complete working example, see the [Quickstart Example](./examples/quickstart/README.md) in the examples directory. The quickstart demonstrates:
147
139
 
148
- # Search the graph for semantic memory retrieval
149
- # Execute a hybrid search combining semantic similarity and BM25 retrieval
150
- # Results are combined and reranked using Reciprocal Rank Fusion
151
- results = await graphiti.search('Who was the California Attorney General?')
152
- [
153
- EntityEdge(
154
- │ uuid = '3133258f738e487383f07b04e15d4ac0',
155
- │ source_node_uuid = '2a85789b318d4e418050506879906e62',
156
- │ target_node_uuid = 'baf7781f445945989d6e4f927f881556',
157
- │ created_at = datetime.datetime(2024, 8, 26, 13, 13, 24, 861097),
158
- │ name = 'HELD_POSITION',
159
- # the fact reflects the updated state that Harris is
160
- # no longer the AG of California
161
- │ fact = 'Kamala Harris was the Attorney General of California',
162
- │ fact_embedding = [
163
- │ │ -0.009955154731869698,
164
- │ ...
165
- │ │ 0.00784289836883545
166
- │],
167
- │ episodes = ['b43e98ad0a904088a76c67985caecc22'],
168
- │ expired_at = datetime.datetime(2024, 8, 26, 20, 18, 1, 53812),
169
- # These dates represent the date this edge was true.
170
- │ valid_at = datetime.datetime(2011, 1, 3, 0, 0, tzinfo= < UTC >),
171
- │ invalid_at = datetime.datetime(2017, 1, 3, 0, 0, tzinfo= < UTC >)
172
- )
173
- ]
140
+ 1. Connecting to a Neo4j database
141
+ 2. Initializing Graphiti indices and constraints
142
+ 3. Adding episodes to the graph (both text and structured JSON)
143
+ 4. Searching for relationships (edges) using hybrid search
144
+ 5. Reranking search results using graph distance
145
+ 6. Searching for nodes using predefined search recipes
174
146
 
175
- # Rerank search results based on graph distance
176
- # Provide a node UUID to prioritize results closer to that node in the graph.
177
- # Results are weighted by their proximity, with distant edges receiving lower scores.
178
- await graphiti.search('Who was the California Attorney General?', center_node_uuid)
179
-
180
- # Close the connection when chat state management is complete
181
- graphiti.close()
182
- ```
147
+ The example is fully documented with clear explanations of each functionality and includes a comprehensive README with setup instructions and next steps.
183
148
 
184
149
  ## Graph Service
185
150
 
@@ -262,6 +227,42 @@ graphiti = Graphiti(
262
227
 
263
228
  Make sure to replace the placeholder values with your actual Azure OpenAI credentials and specify the correct embedding model name that's deployed in your Azure OpenAI service.
264
229
 
230
+ ## Using Graphiti with Google Gemini
231
+
232
+ Graphiti supports Google's Gemini models for both LLM inference and embeddings. To use Gemini, you'll need to configure both the LLM client and embedder with your Google API key.
233
+
234
+ ```python
235
+ from graphiti_core import Graphiti
236
+ from graphiti_core.llm_client.gemini_client import GeminiClient, LLMConfig
237
+ from graphiti_core.embedder.gemini import GeminiEmbedder, GeminiEmbedderConfig
238
+
239
+ # Google API key configuration
240
+ api_key = "<your-google-api-key>"
241
+
242
+ # Initialize Graphiti with Gemini clients
243
+ graphiti = Graphiti(
244
+ "bolt://localhost:7687",
245
+ "neo4j",
246
+ "password",
247
+ llm_client=GeminiClient(
248
+ config=LLMConfig(
249
+ api_key=api_key,
250
+ model="gemini-2.0-flash"
251
+ )
252
+ ),
253
+ embedder=GeminiEmbedder(
254
+ config=GeminiEmbedderConfig(
255
+ api_key=api_key,
256
+ embedding_model="embedding-001"
257
+ )
258
+ )
259
+ )
260
+
261
+ # Now you can use Graphiti with Google Gemini
262
+ ```
263
+
264
+ Make sure to replace the placeholder value with your actual Google API key. You can find more details in the example file at `examples/gemini_example.py`.
265
+
265
266
  ## Documentation
266
267
 
267
268
  - [Guides and API documentation](https://help.getzep.com/graphiti).
@@ -143,9 +143,9 @@ class EpisodicEdge(Edge):
143
143
  driver: AsyncDriver,
144
144
  group_ids: list[str],
145
145
  limit: int | None = None,
146
- created_at: datetime | None = None,
146
+ uuid_cursor: str | None = None,
147
147
  ):
148
- cursor_query: LiteralString = 'AND e.created_at < $created_at' if created_at else ''
148
+ cursor_query: LiteralString = 'AND e.uuid < $uuid' if uuid_cursor else ''
149
149
  limit_query: LiteralString = 'LIMIT $limit' if limit is not None else ''
150
150
 
151
151
  records, _, _ = await driver.execute_query(
@@ -165,7 +165,7 @@ class EpisodicEdge(Edge):
165
165
  """
166
166
  + limit_query,
167
167
  group_ids=group_ids,
168
- created_at=created_at,
168
+ uuid=uuid_cursor,
169
169
  limit=limit,
170
170
  database_=DEFAULT_DATABASE,
171
171
  routing_='r',
@@ -297,9 +297,9 @@ class EntityEdge(Edge):
297
297
  driver: AsyncDriver,
298
298
  group_ids: list[str],
299
299
  limit: int | None = None,
300
- created_at: datetime | None = None,
300
+ uuid_cursor: str | None = None,
301
301
  ):
302
- cursor_query: LiteralString = 'AND e.created_at < $created_at' if created_at else ''
302
+ cursor_query: LiteralString = 'AND e.uuid < $uuid' if uuid_cursor else ''
303
303
  limit_query: LiteralString = 'LIMIT $limit' if limit is not None else ''
304
304
 
305
305
  records, _, _ = await driver.execute_query(
@@ -326,7 +326,7 @@ class EntityEdge(Edge):
326
326
  """
327
327
  + limit_query,
328
328
  group_ids=group_ids,
329
- created_at=created_at,
329
+ uuid=uuid_cursor,
330
330
  limit=limit,
331
331
  database_=DEFAULT_DATABASE,
332
332
  routing_='r',
@@ -430,9 +430,9 @@ class CommunityEdge(Edge):
430
430
  driver: AsyncDriver,
431
431
  group_ids: list[str],
432
432
  limit: int | None = None,
433
- created_at: datetime | None = None,
433
+ uuid_cursor: str | None = None,
434
434
  ):
435
- cursor_query: LiteralString = 'AND e.created_at < $created_at' if created_at else ''
435
+ cursor_query: LiteralString = 'AND e.uuid < $uuid' if uuid_cursor else ''
436
436
  limit_query: LiteralString = 'LIMIT $limit' if limit is not None else ''
437
437
 
438
438
  records, _, _ = await driver.execute_query(
@@ -452,7 +452,7 @@ class CommunityEdge(Edge):
452
452
  """
453
453
  + limit_query,
454
454
  group_ids=group_ids,
455
- created_at=created_at,
455
+ uuid=uuid_cursor,
456
456
  limit=limit,
457
457
  database_=DEFAULT_DATABASE,
458
458
  routing_='r',
@@ -1,4 +1,8 @@
1
1
  from .client import EmbedderClient
2
2
  from .openai import OpenAIEmbedder, OpenAIEmbedderConfig
3
3
 
4
- __all__ = ['EmbedderClient', 'OpenAIEmbedder', 'OpenAIEmbedderConfig']
4
+ __all__ = [
5
+ 'EmbedderClient',
6
+ 'OpenAIEmbedder',
7
+ 'OpenAIEmbedderConfig',
8
+ ]
@@ -0,0 +1,68 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ from collections.abc import Iterable
18
+
19
+ from google import genai # type: ignore
20
+ from google.genai import types # type: ignore
21
+ from pydantic import Field
22
+
23
+ from .client import EmbedderClient, EmbedderConfig
24
+
25
+ DEFAULT_EMBEDDING_MODEL = 'embedding-001'
26
+
27
+
28
+ class GeminiEmbedderConfig(EmbedderConfig):
29
+ embedding_model: str = Field(default=DEFAULT_EMBEDDING_MODEL)
30
+ api_key: str | None = None
31
+
32
+
33
+ class GeminiEmbedder(EmbedderClient):
34
+ """
35
+ Google Gemini Embedder Client
36
+ """
37
+
38
+ def __init__(self, config: GeminiEmbedderConfig | None = None):
39
+ if config is None:
40
+ config = GeminiEmbedderConfig()
41
+ self.config = config
42
+
43
+ # Configure the Gemini API
44
+ self.client = genai.Client(
45
+ api_key=config.api_key,
46
+ )
47
+
48
+ async def create(
49
+ self, input_data: str | list[str] | Iterable[int] | Iterable[Iterable[int]]
50
+ ) -> list[float]:
51
+ """
52
+ Create embeddings for the given input data using Google's Gemini embedding model.
53
+
54
+ Args:
55
+ input_data: The input data to create embeddings for. Can be a string, list of strings,
56
+ or an iterable of integers or iterables of integers.
57
+
58
+ Returns:
59
+ A list of floats representing the embedding vector.
60
+ """
61
+ # Generate embeddings
62
+ result = await self.client.aio.models.embed_content(
63
+ model=self.config.embedding_model or DEFAULT_EMBEDDING_MODEL,
64
+ contents=[input_data],
65
+ config=types.EmbedContentConfig(output_dimensionality=self.config.embedding_dim),
66
+ )
67
+
68
+ return result.embeddings[0].values
@@ -71,10 +71,7 @@ from graphiti_core.utils.maintenance.graph_data_operations import (
71
71
  build_indices_and_constraints,
72
72
  retrieve_episodes,
73
73
  )
74
- from graphiti_core.utils.maintenance.node_operations import (
75
- extract_nodes,
76
- resolve_extracted_nodes,
77
- )
74
+ from graphiti_core.utils.maintenance.node_operations import extract_nodes, resolve_extracted_nodes
78
75
  from graphiti_core.utils.maintenance.temporal_operations import get_edge_contradictions
79
76
  from graphiti_core.utils.ontology_utils.entity_types_utils import validate_entity_types
80
77
 
@@ -0,0 +1,186 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ import json
18
+ import logging
19
+ import typing
20
+
21
+ from google import genai # type: ignore
22
+ from google.genai import types # type: ignore
23
+ from pydantic import BaseModel
24
+
25
+ from ..prompts.models import Message
26
+ from .client import LLMClient
27
+ from .config import DEFAULT_MAX_TOKENS, LLMConfig
28
+ from .errors import RateLimitError
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+ DEFAULT_MODEL = 'gemini-2.0-flash'
33
+
34
+
35
+ class GeminiClient(LLMClient):
36
+ """
37
+ GeminiClient is a client class for interacting with Google's Gemini language models.
38
+
39
+ This class extends the LLMClient and provides methods to initialize the client
40
+ and generate responses from the Gemini language model.
41
+
42
+ Attributes:
43
+ model (str): The model name to use for generating responses.
44
+ temperature (float): The temperature to use for generating responses.
45
+ max_tokens (int): The maximum number of tokens to generate in a response.
46
+
47
+ Methods:
48
+ __init__(config: LLMConfig | None = None, cache: bool = False):
49
+ Initializes the GeminiClient with the provided configuration and cache setting.
50
+
51
+ _generate_response(messages: list[Message]) -> dict[str, typing.Any]:
52
+ Generates a response from the language model based on the provided messages.
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ config: LLMConfig | None = None,
58
+ cache: bool = False,
59
+ max_tokens: int = DEFAULT_MAX_TOKENS,
60
+ ):
61
+ """
62
+ Initialize the GeminiClient with the provided configuration and cache setting.
63
+
64
+ Args:
65
+ config (LLMConfig | None): The configuration for the LLM client, including API key, model, temperature, and max tokens.
66
+ cache (bool): Whether to use caching for responses. Defaults to False.
67
+ """
68
+ if config is None:
69
+ config = LLMConfig()
70
+
71
+ super().__init__(config, cache)
72
+
73
+ self.model = config.model
74
+ # Configure the Gemini API
75
+ self.client = genai.Client(
76
+ api_key=config.api_key,
77
+ )
78
+ self.max_tokens = max_tokens
79
+
80
+ async def _generate_response(
81
+ self,
82
+ messages: list[Message],
83
+ response_model: type[BaseModel] | None = None,
84
+ max_tokens: int = DEFAULT_MAX_TOKENS,
85
+ ) -> dict[str, typing.Any]:
86
+ """
87
+ Generate a response from the Gemini language model.
88
+
89
+ Args:
90
+ messages (list[Message]): A list of messages to send to the language model.
91
+ response_model (type[BaseModel] | None): An optional Pydantic model to parse the response into.
92
+ max_tokens (int): The maximum number of tokens to generate in the response.
93
+
94
+ Returns:
95
+ dict[str, typing.Any]: The response from the language model.
96
+
97
+ Raises:
98
+ RateLimitError: If the API rate limit is exceeded.
99
+ RefusalError: If the content is blocked by the model.
100
+ Exception: If there is an error generating the response.
101
+ """
102
+ try:
103
+ gemini_messages: list[types.Content] = []
104
+ # If a response model is provided, add schema for structured output
105
+ system_prompt = ''
106
+ if response_model is not None:
107
+ # Get the schema from the Pydantic model
108
+ pydantic_schema = response_model.model_json_schema()
109
+
110
+ # Create instruction to output in the desired JSON format
111
+ system_prompt += (
112
+ f'Output ONLY valid JSON matching this schema: {json.dumps(pydantic_schema)}.\n'
113
+ 'Do not include any explanatory text before or after the JSON.\n\n'
114
+ )
115
+
116
+ # Add messages content
117
+ # First check for a system message
118
+ if messages and messages[0].role == 'system':
119
+ system_prompt = f'{messages[0].content}\n\n {system_prompt}'
120
+ messages = messages[1:]
121
+
122
+ # Add the rest of the messages
123
+ for m in messages:
124
+ m.content = self._clean_input(m.content)
125
+ gemini_messages.append(
126
+ types.Content(role=m.role, parts=[types.Part.from_text(text=m.content)])
127
+ )
128
+
129
+ # Create generation config
130
+ generation_config = types.GenerateContentConfig(
131
+ temperature=self.temperature,
132
+ max_output_tokens=max_tokens or self.max_tokens,
133
+ response_mime_type='application/json' if response_model else None,
134
+ response_schema=response_model if response_model else None,
135
+ system_instruction=system_prompt,
136
+ )
137
+
138
+ # Generate content using the simple string approach
139
+ response = await self.client.aio.models.generate_content(
140
+ model=self.model or DEFAULT_MODEL,
141
+ contents=gemini_messages,
142
+ config=generation_config,
143
+ )
144
+
145
+ # If this was a structured output request, parse the response into the Pydantic model
146
+ if response_model is not None:
147
+ try:
148
+ validated_model = response_model.model_validate(json.loads(response.text))
149
+
150
+ # Return as a dictionary for API consistency
151
+ return validated_model.model_dump()
152
+ except Exception as e:
153
+ raise Exception(f'Failed to parse structured response: {e}') from e
154
+
155
+ # Otherwise, return the response text as a dictionary
156
+ return {'content': response.text}
157
+
158
+ except Exception as e:
159
+ # Check if it's a rate limit error
160
+ if 'rate limit' in str(e).lower() or 'quota' in str(e).lower():
161
+ raise RateLimitError from e
162
+ logger.error(f'Error in generating LLM response: {e}')
163
+ raise
164
+
165
+ async def generate_response(
166
+ self,
167
+ messages: list[Message],
168
+ response_model: type[BaseModel] | None = None,
169
+ max_tokens: int = DEFAULT_MAX_TOKENS,
170
+ ) -> dict[str, typing.Any]:
171
+ """
172
+ Generate a response from the Gemini language model.
173
+ This method overrides the parent class method to provide a direct implementation.
174
+
175
+ Args:
176
+ messages (list[Message]): A list of messages to send to the language model.
177
+ response_model (type[BaseModel] | None): An optional Pydantic model to parse the response into.
178
+ max_tokens (int): The maximum number of tokens to generate in the response.
179
+
180
+ Returns:
181
+ dict[str, typing.Any]: The response from the language model.
182
+ """
183
+ # Call the internal _generate_response method
184
+ return await self._generate_response(
185
+ messages=messages, response_model=response_model, max_tokens=max_tokens
186
+ )
@@ -87,6 +87,8 @@ class OpenAIClient(LLMClient):
87
87
  else:
88
88
  self.client = client
89
89
 
90
+ self.max_tokens = max_tokens
91
+
90
92
  async def _generate_response(
91
93
  self,
92
94
  messages: list[Message],
@@ -216,9 +216,9 @@ class EpisodicNode(Node):
216
216
  driver: AsyncDriver,
217
217
  group_ids: list[str],
218
218
  limit: int | None = None,
219
- created_at: datetime | None = None,
219
+ uuid_cursor: str | None = None,
220
220
  ):
221
- cursor_query: LiteralString = 'AND e.created_at < $created_at' if created_at else ''
221
+ cursor_query: LiteralString = 'AND e.uuid < $uuid' if uuid_cursor else ''
222
222
  limit_query: LiteralString = 'LIMIT $limit' if limit is not None else ''
223
223
 
224
224
  records, _, _ = await driver.execute_query(
@@ -241,7 +241,7 @@ class EpisodicNode(Node):
241
241
  """
242
242
  + limit_query,
243
243
  group_ids=group_ids,
244
- created_at=created_at,
244
+ uuid=uuid_cursor,
245
245
  limit=limit,
246
246
  database_=DEFAULT_DATABASE,
247
247
  routing_='r',
@@ -348,9 +348,9 @@ class EntityNode(Node):
348
348
  driver: AsyncDriver,
349
349
  group_ids: list[str],
350
350
  limit: int | None = None,
351
- created_at: datetime | None = None,
351
+ uuid_cursor: str | None = None,
352
352
  ):
353
- cursor_query: LiteralString = 'AND n.created_at < $created_at' if created_at else ''
353
+ cursor_query: LiteralString = 'AND n.uuid < $uuid' if uuid_cursor else ''
354
354
  limit_query: LiteralString = 'LIMIT $limit' if limit is not None else ''
355
355
 
356
356
  records, _, _ = await driver.execute_query(
@@ -372,7 +372,7 @@ class EntityNode(Node):
372
372
  """
373
373
  + limit_query,
374
374
  group_ids=group_ids,
375
- created_at=created_at,
375
+ uuid=uuid_cursor,
376
376
  limit=limit,
377
377
  database_=DEFAULT_DATABASE,
378
378
  routing_='r',
@@ -465,9 +465,9 @@ class CommunityNode(Node):
465
465
  driver: AsyncDriver,
466
466
  group_ids: list[str],
467
467
  limit: int | None = None,
468
- created_at: datetime | None = None,
468
+ uuid_cursor: str | None = None,
469
469
  ):
470
- cursor_query: LiteralString = 'AND n.created_at < $created_at' if created_at else ''
470
+ cursor_query: LiteralString = 'AND n.uuid < $uuid' if uuid_cursor else ''
471
471
  limit_query: LiteralString = 'LIMIT $limit' if limit is not None else ''
472
472
 
473
473
  records, _, _ = await driver.execute_query(
@@ -487,7 +487,7 @@ class CommunityNode(Node):
487
487
  """
488
488
  + limit_query,
489
489
  group_ids=group_ids,
490
- created_at=created_at,
490
+ uuid=uuid_cursor,
491
491
  limit=limit,
492
492
  database_=DEFAULT_DATABASE,
493
493
  routing_='r',
@@ -16,63 +16,27 @@ limitations under the License.
16
16
 
17
17
  from typing import Any, Protocol, TypedDict
18
18
 
19
- from .dedupe_edges import (
20
- Prompt as DedupeEdgesPrompt,
21
- )
22
- from .dedupe_edges import (
23
- Versions as DedupeEdgesVersions,
24
- )
25
- from .dedupe_edges import (
26
- versions as dedupe_edges_versions,
27
- )
28
- from .dedupe_nodes import (
29
- Prompt as DedupeNodesPrompt,
30
- )
31
- from .dedupe_nodes import (
32
- Versions as DedupeNodesVersions,
33
- )
34
- from .dedupe_nodes import (
35
- versions as dedupe_nodes_versions,
36
- )
19
+ from .dedupe_edges import Prompt as DedupeEdgesPrompt
20
+ from .dedupe_edges import Versions as DedupeEdgesVersions
21
+ from .dedupe_edges import versions as dedupe_edges_versions
22
+ from .dedupe_nodes import Prompt as DedupeNodesPrompt
23
+ from .dedupe_nodes import Versions as DedupeNodesVersions
24
+ from .dedupe_nodes import versions as dedupe_nodes_versions
37
25
  from .eval import Prompt as EvalPrompt
38
26
  from .eval import Versions as EvalVersions
39
27
  from .eval import versions as eval_versions
40
- from .extract_edge_dates import (
41
- Prompt as ExtractEdgeDatesPrompt,
42
- )
43
- from .extract_edge_dates import (
44
- Versions as ExtractEdgeDatesVersions,
45
- )
46
- from .extract_edge_dates import (
47
- versions as extract_edge_dates_versions,
48
- )
49
- from .extract_edges import (
50
- Prompt as ExtractEdgesPrompt,
51
- )
52
- from .extract_edges import (
53
- Versions as ExtractEdgesVersions,
54
- )
55
- from .extract_edges import (
56
- versions as extract_edges_versions,
57
- )
58
- from .extract_nodes import (
59
- Prompt as ExtractNodesPrompt,
60
- )
61
- from .extract_nodes import (
62
- Versions as ExtractNodesVersions,
63
- )
64
- from .extract_nodes import (
65
- versions as extract_nodes_versions,
66
- )
67
- from .invalidate_edges import (
68
- Prompt as InvalidateEdgesPrompt,
69
- )
70
- from .invalidate_edges import (
71
- Versions as InvalidateEdgesVersions,
72
- )
73
- from .invalidate_edges import (
74
- versions as invalidate_edges_versions,
75
- )
28
+ from .extract_edge_dates import Prompt as ExtractEdgeDatesPrompt
29
+ from .extract_edge_dates import Versions as ExtractEdgeDatesVersions
30
+ from .extract_edge_dates import versions as extract_edge_dates_versions
31
+ from .extract_edges import Prompt as ExtractEdgesPrompt
32
+ from .extract_edges import Versions as ExtractEdgesVersions
33
+ from .extract_edges import versions as extract_edges_versions
34
+ from .extract_nodes import Prompt as ExtractNodesPrompt
35
+ from .extract_nodes import Versions as ExtractNodesVersions
36
+ from .extract_nodes import versions as extract_nodes_versions
37
+ from .invalidate_edges import Prompt as InvalidateEdgesPrompt
38
+ from .invalidate_edges import Versions as InvalidateEdgesVersions
39
+ from .invalidate_edges import versions as invalidate_edges_versions
76
40
  from .models import Message, PromptFunction
77
41
  from .prompt_helpers import DO_NOT_ESCAPE_UNICODE
78
42
  from .summarize_nodes import Prompt as SummarizeNodesPrompt
@@ -1,8 +1,5 @@
1
1
  from .edge_operations import build_episodic_edges, extract_edges
2
- from .graph_data_operations import (
3
- clear_data,
4
- retrieve_episodes,
5
- )
2
+ from .graph_data_operations import clear_data, retrieve_episodes
6
3
  from .node_operations import extract_nodes
7
4
 
8
5
  __all__ = [
@@ -9,11 +9,7 @@ from graphiti_core.edges import CommunityEdge
9
9
  from graphiti_core.embedder import EmbedderClient
10
10
  from graphiti_core.helpers import DEFAULT_DATABASE, semaphore_gather
11
11
  from graphiti_core.llm_client import LLMClient
12
- from graphiti_core.nodes import (
13
- CommunityNode,
14
- EntityNode,
15
- get_community_node_from_record,
16
- )
12
+ from graphiti_core.nodes import CommunityNode, EntityNode, get_community_node_from_record
17
13
  from graphiti_core.prompts import prompt_library
18
14
  from graphiti_core.prompts.summarize_nodes import Summary, SummaryDescription
19
15
  from graphiti_core.utils.datetime_utils import utc_now
@@ -26,11 +26,7 @@ from graphiti_core.llm_client import LLMClient
26
26
  from graphiti_core.nodes import EntityNode, EpisodeType, EpisodicNode
27
27
  from graphiti_core.prompts import prompt_library
28
28
  from graphiti_core.prompts.dedupe_nodes import NodeDuplicate
29
- from graphiti_core.prompts.extract_nodes import (
30
- EntityClassification,
31
- ExtractedNodes,
32
- MissedEntities,
33
- )
29
+ from graphiti_core.prompts.extract_nodes import EntityClassification, ExtractedNodes, MissedEntities
34
30
  from graphiti_core.prompts.summarize_nodes import Summary
35
31
  from graphiti_core.utils.datetime_utils import utc_now
36
32
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "graphiti-core"
3
- version = "0.8.8"
3
+ version = "0.9.1"
4
4
  description = "A temporal graph building library"
5
5
  authors = [
6
6
  "Paul Paliychuk <paul@getzep.com>",
@@ -21,7 +21,11 @@ openai = "^1.53.0"
21
21
  tenacity = "9.0.0"
22
22
  numpy = ">=1.0.0"
23
23
  python-dotenv = "^1.0.1"
24
- anthropic = "~0.49.0"
24
+
25
+ [tool.poetry.extras]
26
+ anthropic = ["anthropic"]
27
+ groq = ["groq"]
28
+ google-genai = ["google-genai"]
25
29
 
26
30
  [tool.poetry.group.dev.dependencies]
27
31
  mypy = "^1.11.1"
File without changes