graphiti-core 0.2.0__tar.gz → 0.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

Files changed (37) hide show
  1. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/PKG-INFO +4 -2
  2. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/README.md +3 -1
  3. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/graphiti.py +21 -21
  4. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/search/search.py +2 -2
  5. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/search/search_utils.py +171 -47
  6. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/utils/bulk_utils.py +1 -1
  7. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/utils/maintenance/node_operations.py +15 -15
  8. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/pyproject.toml +1 -1
  9. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/LICENSE +0 -0
  10. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/__init__.py +0 -0
  11. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/edges.py +0 -0
  12. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/helpers.py +0 -0
  13. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/llm_client/__init__.py +0 -0
  14. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/llm_client/anthropic_client.py +0 -0
  15. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/llm_client/client.py +0 -0
  16. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/llm_client/config.py +0 -0
  17. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/llm_client/groq_client.py +0 -0
  18. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/llm_client/openai_client.py +0 -0
  19. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/llm_client/utils.py +0 -0
  20. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/nodes.py +0 -0
  21. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/prompts/__init__.py +0 -0
  22. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/prompts/dedupe_edges.py +0 -0
  23. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/prompts/dedupe_nodes.py +0 -0
  24. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/prompts/extract_edge_dates.py +0 -0
  25. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/prompts/extract_edges.py +0 -0
  26. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/prompts/extract_nodes.py +0 -0
  27. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/prompts/invalidate_edges.py +0 -0
  28. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/prompts/lib.py +0 -0
  29. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/prompts/models.py +0 -0
  30. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/search/__init__.py +0 -0
  31. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/utils/__init__.py +0 -0
  32. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/utils/maintenance/__init__.py +0 -0
  33. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/utils/maintenance/edge_operations.py +0 -0
  34. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/utils/maintenance/graph_data_operations.py +0 -0
  35. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/utils/maintenance/temporal_operations.py +0 -0
  36. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/utils/maintenance/utils.py +0 -0
  37. {graphiti_core-0.2.0 → graphiti_core-0.2.1}/graphiti_core/utils/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: graphiti-core
3
- Version: 0.2.0
3
+ Version: 0.2.1
4
4
  Summary: A temporal graph building library
5
5
  License: Apache-2.0
6
6
  Author: Paul Paliychuk
@@ -173,7 +173,9 @@ graphiti.close()
173
173
 
174
174
  ## Documentation
175
175
 
176
- Visit the Zep knowledge base for Graphiti [Guides and API documentation](https://help.getzep.com/Graphiti/Graphiti).
176
+ - [Guides and API documentation](https://help.getzep.com/Graphiti/Graphiti).
177
+ - [Quick Start](https://help.getzep.com/graphiti/graphiti/quick-start)
178
+ - [Building an agent with LangChain's LangGraph and Graphiti](https://help.getzep.com/graphiti/graphiti/lang-graph-agent)
177
179
 
178
180
  ## Status and Roadmap
179
181
 
@@ -151,7 +151,9 @@ graphiti.close()
151
151
 
152
152
  ## Documentation
153
153
 
154
- Visit the Zep knowledge base for Graphiti [Guides and API documentation](https://help.getzep.com/Graphiti/Graphiti).
154
+ - [Guides and API documentation](https://help.getzep.com/Graphiti/Graphiti).
155
+ - [Quick Start](https://help.getzep.com/graphiti/graphiti/quick-start)
156
+ - [Building an agent with LangChain's LangGraph and Graphiti](https://help.getzep.com/graphiti/graphiti/lang-graph-agent)
155
157
 
156
158
  ## Status and Roadmap
157
159
 
@@ -180,9 +180,9 @@ class Graphiti:
180
180
  await build_indices_and_constraints(self.driver)
181
181
 
182
182
  async def retrieve_episodes(
183
- self,
184
- reference_time: datetime,
185
- last_n: int = EPISODE_WINDOW_LEN,
183
+ self,
184
+ reference_time: datetime,
185
+ last_n: int = EPISODE_WINDOW_LEN,
186
186
  ) -> list[EpisodicNode]:
187
187
  """
188
188
  Retrieve the last n episodic nodes from the graph.
@@ -210,14 +210,14 @@ class Graphiti:
210
210
  return await retrieve_episodes(self.driver, reference_time, last_n)
211
211
 
212
212
  async def add_episode(
213
- self,
214
- name: str,
215
- episode_body: str,
216
- source_description: str,
217
- reference_time: datetime,
218
- source: EpisodeType = EpisodeType.message,
219
- success_callback: Callable | None = None,
220
- error_callback: Callable | None = None,
213
+ self,
214
+ name: str,
215
+ episode_body: str,
216
+ source_description: str,
217
+ reference_time: datetime,
218
+ source: EpisodeType = EpisodeType.message,
219
+ success_callback: Callable | None = None,
220
+ error_callback: Callable | None = None,
221
221
  ):
222
222
  """
223
223
  Process an episode and update the graph.
@@ -321,11 +321,11 @@ class Graphiti:
321
321
  await asyncio.gather(
322
322
  *[
323
323
  get_relevant_edges(
324
- [edge],
325
324
  self.driver,
326
- RELEVANT_SCHEMA_LIMIT,
325
+ [edge],
327
326
  edge.source_node_uuid,
328
327
  edge.target_node_uuid,
328
+ RELEVANT_SCHEMA_LIMIT,
329
329
  )
330
330
  for edge in extracted_edges
331
331
  ]
@@ -422,8 +422,8 @@ class Graphiti:
422
422
  raise e
423
423
 
424
424
  async def add_episode_bulk(
425
- self,
426
- bulk_episodes: list[RawEpisode],
425
+ self,
426
+ bulk_episodes: list[RawEpisode],
427
427
  ):
428
428
  """
429
429
  Process multiple episodes in bulk and update the graph.
@@ -587,18 +587,18 @@ class Graphiti:
587
587
  return edges
588
588
 
589
589
  async def _search(
590
- self,
591
- query: str,
592
- timestamp: datetime,
593
- config: SearchConfig,
594
- center_node_uuid: str | None = None,
590
+ self,
591
+ query: str,
592
+ timestamp: datetime,
593
+ config: SearchConfig,
594
+ center_node_uuid: str | None = None,
595
595
  ):
596
596
  return await hybrid_search(
597
597
  self.driver, self.llm_client.get_embedder(), query, timestamp, config, center_node_uuid
598
598
  )
599
599
 
600
600
  async def get_nodes_by_query(
601
- self, query: str, limit: int = RELEVANT_SCHEMA_LIMIT
601
+ self, query: str, limit: int = RELEVANT_SCHEMA_LIMIT
602
602
  ) -> list[EntityNode]:
603
603
  """
604
604
  Retrieve nodes from the graph database based on a text query.
@@ -83,7 +83,7 @@ async def hybrid_search(
83
83
  nodes.extend(await get_mentioned_nodes(driver, episodes))
84
84
 
85
85
  if SearchMethod.bm25 in config.search_methods:
86
- text_search = await edge_fulltext_search(driver, query, 2 * config.num_edges)
86
+ text_search = await edge_fulltext_search(driver, query, None, None, 2 * config.num_edges)
87
87
  search_results.append(text_search)
88
88
 
89
89
  if SearchMethod.cosine_similarity in config.search_methods:
@@ -95,7 +95,7 @@ async def hybrid_search(
95
95
  )
96
96
 
97
97
  similarity_search = await edge_similarity_search(
98
- driver, search_vector, 2 * config.num_edges
98
+ driver, search_vector, None, None, 2 * config.num_edges
99
99
  )
100
100
  search_results.append(similarity_search)
101
101
 
@@ -1,11 +1,11 @@
1
1
  import asyncio
2
2
  import logging
3
3
  import re
4
- import typing
5
4
  from collections import defaultdict
6
5
  from time import time
6
+ from typing import Any
7
7
 
8
- from neo4j import AsyncDriver
8
+ from neo4j import AsyncDriver, Query
9
9
 
10
10
  from graphiti_core.edges import EntityEdge
11
11
  from graphiti_core.helpers import parse_db_date
@@ -66,12 +66,12 @@ async def bfs(node_ids: list[str], driver: AsyncDriver):
66
66
  r.expired_at AS expired_at,
67
67
  r.valid_at AS valid_at,
68
68
  r.invalid_at AS invalid_at
69
-
69
+
70
70
  """,
71
71
  node_ids=node_ids,
72
72
  )
73
73
 
74
- context: dict[str, typing.Any] = {}
74
+ context: dict[str, Any] = {}
75
75
 
76
76
  for record in records:
77
77
  n_uuid = record['source_node_uuid']
@@ -96,15 +96,14 @@ async def bfs(node_ids: list[str], driver: AsyncDriver):
96
96
 
97
97
 
98
98
  async def edge_similarity_search(
99
- driver: AsyncDriver,
100
- search_vector: list[float],
101
- limit: int = RELEVANT_SCHEMA_LIMIT,
102
- source_node_uuid: str = '*',
103
- target_node_uuid: str = '*',
99
+ driver: AsyncDriver,
100
+ search_vector: list[float],
101
+ source_node_uuid: str | None,
102
+ target_node_uuid: str | None,
103
+ limit: int = RELEVANT_SCHEMA_LIMIT,
104
104
  ) -> list[EntityEdge]:
105
105
  # vector similarity search over embedded facts
106
- records, _, _ = await driver.execute_query(
107
- """
106
+ query = Query("""
108
107
  CALL db.index.vector.queryRelationships("fact_embedding", $limit, $search_vector)
109
108
  YIELD relationship AS rel, score
110
109
  MATCH (n:Entity {uuid: $source_uuid})-[r {uuid: rel.uuid}]-(m:Entity {uuid: $target_uuid})
@@ -121,7 +120,68 @@ async def edge_similarity_search(
121
120
  r.valid_at AS valid_at,
122
121
  r.invalid_at AS invalid_at
123
122
  ORDER BY score DESC
124
- """,
123
+ """)
124
+
125
+ if source_node_uuid is None and target_node_uuid is None:
126
+ query = Query("""
127
+ CALL db.index.vector.queryRelationships("fact_embedding", $limit, $search_vector)
128
+ YIELD relationship AS rel, score
129
+ MATCH (n:Entity)-[r {uuid: rel.uuid}]-(m:Entity)
130
+ RETURN
131
+ r.uuid AS uuid,
132
+ n.uuid AS source_node_uuid,
133
+ m.uuid AS target_node_uuid,
134
+ r.created_at AS created_at,
135
+ r.name AS name,
136
+ r.fact AS fact,
137
+ r.fact_embedding AS fact_embedding,
138
+ r.episodes AS episodes,
139
+ r.expired_at AS expired_at,
140
+ r.valid_at AS valid_at,
141
+ r.invalid_at AS invalid_at
142
+ ORDER BY score DESC
143
+ """)
144
+ elif source_node_uuid is None:
145
+ query = Query("""
146
+ CALL db.index.vector.queryRelationships("fact_embedding", $limit, $search_vector)
147
+ YIELD relationship AS rel, score
148
+ MATCH (n:Entity)-[r {uuid: rel.uuid}]-(m:Entity {uuid: $target_uuid})
149
+ RETURN
150
+ r.uuid AS uuid,
151
+ n.uuid AS source_node_uuid,
152
+ m.uuid AS target_node_uuid,
153
+ r.created_at AS created_at,
154
+ r.name AS name,
155
+ r.fact AS fact,
156
+ r.fact_embedding AS fact_embedding,
157
+ r.episodes AS episodes,
158
+ r.expired_at AS expired_at,
159
+ r.valid_at AS valid_at,
160
+ r.invalid_at AS invalid_at
161
+ ORDER BY score DESC
162
+ """)
163
+ elif target_node_uuid is None:
164
+ query = Query("""
165
+ CALL db.index.vector.queryRelationships("fact_embedding", $limit, $search_vector)
166
+ YIELD relationship AS rel, score
167
+ MATCH (n:Entity {uuid: $source_uuid})-[r {uuid: rel.uuid}]-(m:Entity)
168
+ RETURN
169
+ r.uuid AS uuid,
170
+ n.uuid AS source_node_uuid,
171
+ m.uuid AS target_node_uuid,
172
+ r.created_at AS created_at,
173
+ r.name AS name,
174
+ r.fact AS fact,
175
+ r.fact_embedding AS fact_embedding,
176
+ r.episodes AS episodes,
177
+ r.expired_at AS expired_at,
178
+ r.valid_at AS valid_at,
179
+ r.invalid_at AS invalid_at
180
+ ORDER BY score DESC
181
+ """)
182
+
183
+ records, _, _ = await driver.execute_query(
184
+ query,
125
185
  search_vector=search_vector,
126
186
  source_uuid=source_node_uuid,
127
187
  target_uuid=target_node_uuid,
@@ -151,7 +211,7 @@ async def edge_similarity_search(
151
211
 
152
212
 
153
213
  async def entity_similarity_search(
154
- search_vector: list[float], driver: AsyncDriver, limit=RELEVANT_SCHEMA_LIMIT
214
+ search_vector: list[float], driver: AsyncDriver, limit=RELEVANT_SCHEMA_LIMIT
155
215
  ) -> list[EntityNode]:
156
216
  # vector similarity search over entity names
157
217
  records, _, _ = await driver.execute_query(
@@ -161,6 +221,7 @@ async def entity_similarity_search(
161
221
  RETURN
162
222
  n.uuid As uuid,
163
223
  n.name AS name,
224
+ n.name_embeddings AS name_embedding,
164
225
  n.created_at AS created_at,
165
226
  n.summary AS summary
166
227
  ORDER BY score DESC
@@ -175,6 +236,7 @@ async def entity_similarity_search(
175
236
  EntityNode(
176
237
  uuid=record['uuid'],
177
238
  name=record['name'],
239
+ name_embedding=record['name_embedding'],
178
240
  labels=['Entity'],
179
241
  created_at=record['created_at'].to_native(),
180
242
  summary=record['summary'],
@@ -185,7 +247,7 @@ async def entity_similarity_search(
185
247
 
186
248
 
187
249
  async def entity_fulltext_search(
188
- query: str, driver: AsyncDriver, limit=RELEVANT_SCHEMA_LIMIT
250
+ query: str, driver: AsyncDriver, limit=RELEVANT_SCHEMA_LIMIT
189
251
  ) -> list[EntityNode]:
190
252
  # BM25 search to get top nodes
191
253
  fuzzy_query = re.sub(r'[^\w\s]', '', query) + '~'
@@ -193,8 +255,9 @@ async def entity_fulltext_search(
193
255
  """
194
256
  CALL db.index.fulltext.queryNodes("name_and_summary", $query) YIELD node, score
195
257
  RETURN
196
- node.uuid As uuid,
258
+ node.uuid AS uuid,
197
259
  node.name AS name,
260
+ node.name_embeddings AS name_embedding,
198
261
  node.created_at AS created_at,
199
262
  node.summary AS summary
200
263
  ORDER BY score DESC
@@ -210,6 +273,7 @@ async def entity_fulltext_search(
210
273
  EntityNode(
211
274
  uuid=record['uuid'],
212
275
  name=record['name'],
276
+ name_embedding=record['name_embedding'],
213
277
  labels=['Entity'],
214
278
  created_at=record['created_at'].to_native(),
215
279
  summary=record['summary'],
@@ -220,21 +284,18 @@ async def entity_fulltext_search(
220
284
 
221
285
 
222
286
  async def edge_fulltext_search(
223
- driver: AsyncDriver,
224
- query: str,
225
- limit=RELEVANT_SCHEMA_LIMIT,
226
- source_node_uuid: str = '*',
227
- target_node_uuid: str = '*',
287
+ driver: AsyncDriver,
288
+ query: str,
289
+ source_node_uuid: str | None,
290
+ target_node_uuid: str | None,
291
+ limit=RELEVANT_SCHEMA_LIMIT,
228
292
  ) -> list[EntityEdge]:
229
293
  # fulltext search over facts
230
- fuzzy_query = re.sub(r'[^\w\s]', '', query) + '~'
231
-
232
- records, _, _ = await driver.execute_query(
233
- """
234
- CALL db.index.fulltext.queryRelationships("name_and_fact", $query)
235
- YIELD relationship AS rel, score
236
- MATCH (n:Entity {uuid: $source_uuid})-[r {uuid: rel.uuid}]-(m:Entity {uuid: $target_uuid})
237
- RETURN
294
+ cypher_query = Query("""
295
+ CALL db.index.fulltext.queryRelationships("name_and_fact", $query)
296
+ YIELD relationship AS rel, score
297
+ MATCH (n:Entity {uuid: $source_uuid})-[r {uuid: rel.uuid}]-(m:Entity {uuid: $target_uuid})
298
+ RETURN
238
299
  r.uuid AS uuid,
239
300
  n.uuid AS source_node_uuid,
240
301
  m.uuid AS target_node_uuid,
@@ -247,7 +308,70 @@ async def edge_fulltext_search(
247
308
  r.valid_at AS valid_at,
248
309
  r.invalid_at AS invalid_at
249
310
  ORDER BY score DESC LIMIT $limit
250
- """,
311
+ """)
312
+
313
+ if source_node_uuid is None and target_node_uuid is None:
314
+ cypher_query = Query("""
315
+ CALL db.index.fulltext.queryRelationships("name_and_fact", $query)
316
+ YIELD relationship AS rel, score
317
+ MATCH (n:Entity)-[r {uuid: rel.uuid}]-(m:Entity)
318
+ RETURN
319
+ r.uuid AS uuid,
320
+ n.uuid AS source_node_uuid,
321
+ m.uuid AS target_node_uuid,
322
+ r.created_at AS created_at,
323
+ r.name AS name,
324
+ r.fact AS fact,
325
+ r.fact_embedding AS fact_embedding,
326
+ r.episodes AS episodes,
327
+ r.expired_at AS expired_at,
328
+ r.valid_at AS valid_at,
329
+ r.invalid_at AS invalid_at
330
+ ORDER BY score DESC LIMIT $limit
331
+ """)
332
+ elif source_node_uuid is None:
333
+ cypher_query = Query("""
334
+ CALL db.index.fulltext.queryRelationships("name_and_fact", $query)
335
+ YIELD relationship AS rel, score
336
+ MATCH (n:Entity)-[r {uuid: rel.uuid}]-(m:Entity {uuid: $target_uuid})
337
+ RETURN
338
+ r.uuid AS uuid,
339
+ n.uuid AS source_node_uuid,
340
+ m.uuid AS target_node_uuid,
341
+ r.created_at AS created_at,
342
+ r.name AS name,
343
+ r.fact AS fact,
344
+ r.fact_embedding AS fact_embedding,
345
+ r.episodes AS episodes,
346
+ r.expired_at AS expired_at,
347
+ r.valid_at AS valid_at,
348
+ r.invalid_at AS invalid_at
349
+ ORDER BY score DESC LIMIT $limit
350
+ """)
351
+ elif target_node_uuid is None:
352
+ cypher_query = Query("""
353
+ CALL db.index.fulltext.queryRelationships("name_and_fact", $query)
354
+ YIELD relationship AS rel, score
355
+ MATCH (n:Entity {uuid: $source_uuid})-[r {uuid: rel.uuid}]-(m:Entity)
356
+ RETURN
357
+ r.uuid AS uuid,
358
+ n.uuid AS source_node_uuid,
359
+ m.uuid AS target_node_uuid,
360
+ r.created_at AS created_at,
361
+ r.name AS name,
362
+ r.fact AS fact,
363
+ r.fact_embedding AS fact_embedding,
364
+ r.episodes AS episodes,
365
+ r.expired_at AS expired_at,
366
+ r.valid_at AS valid_at,
367
+ r.invalid_at AS invalid_at
368
+ ORDER BY score DESC LIMIT $limit
369
+ """)
370
+
371
+ fuzzy_query = re.sub(r'[^\w\s]', '', query) + '~'
372
+
373
+ records, _, _ = await driver.execute_query(
374
+ cypher_query,
251
375
  query=fuzzy_query,
252
376
  source_uuid=source_node_uuid,
253
377
  target_uuid=target_node_uuid,
@@ -277,16 +401,16 @@ async def edge_fulltext_search(
277
401
 
278
402
 
279
403
  async def hybrid_node_search(
280
- queries: list[str],
281
- embeddings: list[list[float]],
282
- driver: AsyncDriver,
283
- limit: int = RELEVANT_SCHEMA_LIMIT,
404
+ queries: list[str],
405
+ embeddings: list[list[float]],
406
+ driver: AsyncDriver,
407
+ limit: int = RELEVANT_SCHEMA_LIMIT,
284
408
  ) -> list[EntityNode]:
285
409
  """
286
410
  Perform a hybrid search for nodes using both text queries and embeddings.
287
411
 
288
412
  This method combines fulltext search and vector similarity search to find
289
- relevant nodes in the graph database. It uses an rrf reranker.
413
+ relevant nodes in the graph database. It uses a rrf reranker.
290
414
 
291
415
  Parameters
292
416
  ----------
@@ -342,8 +466,8 @@ async def hybrid_node_search(
342
466
 
343
467
 
344
468
  async def get_relevant_nodes(
345
- nodes: list[EntityNode],
346
- driver: AsyncDriver,
469
+ nodes: list[EntityNode],
470
+ driver: AsyncDriver,
347
471
  ) -> list[EntityNode]:
348
472
  """
349
473
  Retrieve relevant nodes based on the provided list of EntityNodes.
@@ -379,11 +503,11 @@ async def get_relevant_nodes(
379
503
 
380
504
 
381
505
  async def get_relevant_edges(
382
- edges: list[EntityEdge],
383
- driver: AsyncDriver,
384
- limit: int = RELEVANT_SCHEMA_LIMIT,
385
- source_node_uuid: str = '*',
386
- target_node_uuid: str = '*',
506
+ driver: AsyncDriver,
507
+ edges: list[EntityEdge],
508
+ source_node_uuid: str | None,
509
+ target_node_uuid: str | None,
510
+ limit: int = RELEVANT_SCHEMA_LIMIT,
387
511
  ) -> list[EntityEdge]:
388
512
  start = time()
389
513
  relevant_edges: list[EntityEdge] = []
@@ -392,13 +516,13 @@ async def get_relevant_edges(
392
516
  results = await asyncio.gather(
393
517
  *[
394
518
  edge_similarity_search(
395
- driver, edge.fact_embedding, limit, source_node_uuid, target_node_uuid
519
+ driver, edge.fact_embedding, source_node_uuid, target_node_uuid, limit
396
520
  )
397
521
  for edge in edges
398
522
  if edge.fact_embedding is not None
399
523
  ],
400
524
  *[
401
- edge_fulltext_search(driver, edge.fact, limit, source_node_uuid, target_node_uuid)
525
+ edge_fulltext_search(driver, edge.fact, source_node_uuid, target_node_uuid, limit)
402
526
  for edge in edges
403
527
  ],
404
528
  )
@@ -433,14 +557,14 @@ def rrf(results: list[list[str]], rank_const=1) -> list[str]:
433
557
 
434
558
 
435
559
  async def node_distance_reranker(
436
- driver: AsyncDriver, results: list[list[str]], center_node_uuid: str
560
+ driver: AsyncDriver, results: list[list[str]], center_node_uuid: str
437
561
  ) -> list[str]:
438
562
  # use rrf as a preliminary ranker
439
563
  sorted_uuids = rrf(results)
440
564
  scores: dict[str, float] = {}
441
565
 
442
566
  for uuid in sorted_uuids:
443
- # Find shortest path to center node
567
+ # Find the shortest path to center node
444
568
  records, _, _ = await driver.execute_query(
445
569
  """
446
570
  MATCH (source:Entity)-[r:RELATES_TO {uuid: $edge_uuid}]->(target:Entity)
@@ -455,8 +579,8 @@ async def node_distance_reranker(
455
579
 
456
580
  for record in records:
457
581
  if (
458
- record['source_uuid'] == center_node_uuid
459
- or record['target_uuid'] == center_node_uuid
582
+ record['source_uuid'] == center_node_uuid
583
+ or record['target_uuid'] == center_node_uuid
460
584
  ):
461
585
  continue
462
586
  distance = record['score']
@@ -158,7 +158,7 @@ async def dedupe_edges_bulk(
158
158
 
159
159
  relevant_edges_chunks: list[list[EntityEdge]] = list(
160
160
  await asyncio.gather(
161
- *[get_relevant_edges(edge_chunk, driver) for edge_chunk in edge_chunks]
161
+ *[get_relevant_edges(driver, edge_chunk, None, None) for edge_chunk in edge_chunks]
162
162
  )
163
163
  )
164
164
 
@@ -28,7 +28,7 @@ logger = logging.getLogger(__name__)
28
28
 
29
29
 
30
30
  async def extract_message_nodes(
31
- llm_client: LLMClient, episode: EpisodicNode, previous_episodes: list[EpisodicNode]
31
+ llm_client: LLMClient, episode: EpisodicNode, previous_episodes: list[EpisodicNode]
32
32
  ) -> list[dict[str, Any]]:
33
33
  # Prepare context for LLM
34
34
  context = {
@@ -49,8 +49,8 @@ async def extract_message_nodes(
49
49
 
50
50
 
51
51
  async def extract_json_nodes(
52
- llm_client: LLMClient,
53
- episode: EpisodicNode,
52
+ llm_client: LLMClient,
53
+ episode: EpisodicNode,
54
54
  ) -> list[dict[str, Any]]:
55
55
  # Prepare context for LLM
56
56
  context = {
@@ -67,9 +67,9 @@ async def extract_json_nodes(
67
67
 
68
68
 
69
69
  async def extract_nodes(
70
- llm_client: LLMClient,
71
- episode: EpisodicNode,
72
- previous_episodes: list[EpisodicNode],
70
+ llm_client: LLMClient,
71
+ episode: EpisodicNode,
72
+ previous_episodes: list[EpisodicNode],
73
73
  ) -> list[EntityNode]:
74
74
  start = time()
75
75
  extracted_node_data: list[dict[str, Any]] = []
@@ -96,9 +96,9 @@ async def extract_nodes(
96
96
 
97
97
 
98
98
  async def dedupe_extracted_nodes(
99
- llm_client: LLMClient,
100
- extracted_nodes: list[EntityNode],
101
- existing_nodes: list[EntityNode],
99
+ llm_client: LLMClient,
100
+ extracted_nodes: list[EntityNode],
101
+ existing_nodes: list[EntityNode],
102
102
  ) -> tuple[list[EntityNode], dict[str, str]]:
103
103
  start = time()
104
104
 
@@ -146,9 +146,9 @@ async def dedupe_extracted_nodes(
146
146
 
147
147
 
148
148
  async def resolve_extracted_nodes(
149
- llm_client: LLMClient,
150
- extracted_nodes: list[EntityNode],
151
- existing_nodes_lists: list[list[EntityNode]],
149
+ llm_client: LLMClient,
150
+ extracted_nodes: list[EntityNode],
151
+ existing_nodes_lists: list[list[EntityNode]],
152
152
  ) -> tuple[list[EntityNode], dict[str, str]]:
153
153
  uuid_map: dict[str, str] = {}
154
154
  resolved_nodes: list[EntityNode] = []
@@ -169,7 +169,7 @@ async def resolve_extracted_nodes(
169
169
 
170
170
 
171
171
  async def resolve_extracted_node(
172
- llm_client: LLMClient, extracted_node: EntityNode, existing_nodes: list[EntityNode]
172
+ llm_client: LLMClient, extracted_node: EntityNode, existing_nodes: list[EntityNode]
173
173
  ) -> tuple[EntityNode, dict[str, str]]:
174
174
  start = time()
175
175
 
@@ -214,8 +214,8 @@ async def resolve_extracted_node(
214
214
 
215
215
 
216
216
  async def dedupe_node_list(
217
- llm_client: LLMClient,
218
- nodes: list[EntityNode],
217
+ llm_client: LLMClient,
218
+ nodes: list[EntityNode],
219
219
  ) -> tuple[list[EntityNode], dict[str, str]]:
220
220
  start = time()
221
221
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "graphiti-core"
3
- version = "0.2.0"
3
+ version = "0.2.1"
4
4
  description = "A temporal graph building library"
5
5
  authors = [
6
6
  "Paul Paliychuk <paul@getzep.com>",
File without changes