graphiti-core 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

graphiti_core/graphiti.py CHANGED
@@ -262,6 +262,7 @@ class Graphiti:
262
262
  group_id: str = '',
263
263
  uuid: str | None = None,
264
264
  update_communities: bool = False,
265
+ entity_types: dict[str, BaseModel] | None = None,
265
266
  ) -> AddEpisodeResults:
266
267
  """
267
268
  Process an episode and update the graph.
@@ -336,7 +337,9 @@ class Graphiti:
336
337
 
337
338
  # Extract entities as nodes
338
339
 
339
- extracted_nodes = await extract_nodes(self.llm_client, episode, previous_episodes)
340
+ extracted_nodes = await extract_nodes(
341
+ self.llm_client, episode, previous_episodes, entity_types
342
+ )
340
343
  logger.debug(f'Extracted nodes: {[(n.name, n.uuid) for n in extracted_nodes]}')
341
344
 
342
345
  # Calculate Embeddings
@@ -362,6 +365,7 @@ class Graphiti:
362
365
  existing_nodes_lists,
363
366
  episode,
364
367
  previous_episodes,
368
+ entity_types,
365
369
  ),
366
370
  extract_edges(
367
371
  self.llm_client, episode, extracted_nodes, previous_episodes, group_id
@@ -724,7 +728,7 @@ class Graphiti:
724
728
  if edge.fact_embedding is None:
725
729
  await edge.generate_embedding(self.embedder)
726
730
 
727
- resolved_nodes, _ = await resolve_extracted_nodes(
731
+ resolved_nodes, uuid_map = await resolve_extracted_nodes(
728
732
  self.llm_client,
729
733
  [source_node, target_node],
730
734
  [
@@ -733,14 +737,16 @@ class Graphiti:
733
737
  ],
734
738
  )
735
739
 
740
+ updated_edge = resolve_edge_pointers([edge], uuid_map)[0]
741
+
736
742
  related_edges = await get_relevant_edges(
737
743
  self.driver,
738
- [edge],
744
+ [updated_edge],
739
745
  source_node_uuid=resolved_nodes[0].uuid,
740
746
  target_node_uuid=resolved_nodes[1].uuid,
741
747
  )
742
748
 
743
- resolved_edge = await dedupe_extracted_edge(self.llm_client, edge, related_edges)
749
+ resolved_edge = await dedupe_extracted_edge(self.llm_client, updated_edge, related_edges)
744
750
 
745
751
  contradicting_edges = await get_edge_contradictions(self.llm_client, edge, related_edges)
746
752
  invalidated_edges = resolve_edge_contradictions(resolved_edge, contradicting_edges)
@@ -31,14 +31,16 @@ EPISODIC_NODE_SAVE_BULK = """
31
31
 
32
32
  ENTITY_NODE_SAVE = """
33
33
  MERGE (n:Entity {uuid: $uuid})
34
- SET n = {uuid: $uuid, name: $name, group_id: $group_id, summary: $summary, created_at: $created_at}
34
+ SET n:$($labels)
35
+ SET n = $entity_data
35
36
  WITH n CALL db.create.setNodeVectorProperty(n, "name_embedding", $name_embedding)
36
37
  RETURN n.uuid AS uuid"""
37
38
 
38
39
  ENTITY_NODE_SAVE_BULK = """
39
40
  UNWIND $nodes AS node
40
41
  MERGE (n:Entity {uuid: node.uuid})
41
- SET n = {uuid: node.uuid, name: node.name, group_id: node.group_id, summary: node.summary, created_at: node.created_at}
42
+ SET n:$(node.labels)
43
+ SET n = node
42
44
  WITH n, node CALL db.create.setNodeVectorProperty(n, "name_embedding", node.name_embedding)
43
45
  RETURN n.uuid AS uuid
44
46
  """
graphiti_core/nodes.py CHANGED
@@ -255,6 +255,9 @@ class EpisodicNode(Node):
255
255
  class EntityNode(Node):
256
256
  name_embedding: list[float] | None = Field(default=None, description='embedding of the name')
257
257
  summary: str = Field(description='regional summary of surrounding edges', default_factory=str)
258
+ attributes: dict[str, Any] = Field(
259
+ default={}, description='Additional attributes of the node. Dependent on node labels'
260
+ )
258
261
 
259
262
  async def generate_name_embedding(self, embedder: EmbedderClient):
260
263
  start = time()
@@ -266,14 +269,21 @@ class EntityNode(Node):
266
269
  return self.name_embedding
267
270
 
268
271
  async def save(self, driver: AsyncDriver):
272
+ entity_data: dict[str, Any] = {
273
+ 'uuid': self.uuid,
274
+ 'name': self.name,
275
+ 'name_embedding': self.name_embedding,
276
+ 'group_id': self.group_id,
277
+ 'summary': self.summary,
278
+ 'created_at': self.created_at,
279
+ }
280
+
281
+ entity_data.update(self.attributes or {})
282
+
269
283
  result = await driver.execute_query(
270
284
  ENTITY_NODE_SAVE,
271
- uuid=self.uuid,
272
- name=self.name,
273
- group_id=self.group_id,
274
- summary=self.summary,
275
- name_embedding=self.name_embedding,
276
- created_at=self.created_at,
285
+ labels=self.labels + ['Entity'],
286
+ entity_data=entity_data,
277
287
  database_=DEFAULT_DATABASE,
278
288
  )
279
289
 
@@ -292,7 +302,9 @@ class EntityNode(Node):
292
302
  n.name_embedding AS name_embedding,
293
303
  n.group_id AS group_id,
294
304
  n.created_at AS created_at,
295
- n.summary AS summary
305
+ n.summary AS summary,
306
+ labels(n) AS labels,
307
+ properties(n) AS attributes
296
308
  """,
297
309
  uuid=uuid,
298
310
  database_=DEFAULT_DATABASE,
@@ -317,7 +329,9 @@ class EntityNode(Node):
317
329
  n.name_embedding AS name_embedding,
318
330
  n.group_id AS group_id,
319
331
  n.created_at AS created_at,
320
- n.summary AS summary
332
+ n.summary AS summary,
333
+ labels(n) AS labels,
334
+ properties(n) AS attributes
321
335
  """,
322
336
  uuids=uuids,
323
337
  database_=DEFAULT_DATABASE,
@@ -351,7 +365,9 @@ class EntityNode(Node):
351
365
  n.name_embedding AS name_embedding,
352
366
  n.group_id AS group_id,
353
367
  n.created_at AS created_at,
354
- n.summary AS summary
368
+ n.summary AS summary,
369
+ labels(n) AS labels,
370
+ properties(n) AS attributes
355
371
  ORDER BY n.uuid DESC
356
372
  """
357
373
  + limit_query,
@@ -503,9 +519,10 @@ def get_entity_node_from_record(record: Any) -> EntityNode:
503
519
  name=record['name'],
504
520
  group_id=record['group_id'],
505
521
  name_embedding=record['name_embedding'],
506
- labels=['Entity'],
522
+ labels=record['labels'],
507
523
  created_at=record['created_at'].to_native(),
508
524
  summary=record['summary'],
525
+ attributes=record['attributes'],
509
526
  )
510
527
 
511
528
 
@@ -30,11 +30,19 @@ class MissedEntities(BaseModel):
30
30
  missed_entities: list[str] = Field(..., description="Names of entities that weren't extracted")
31
31
 
32
32
 
33
+ class EntityClassification(BaseModel):
34
+ entity_classification: str = Field(
35
+ ...,
36
+ description='Dictionary of entity classifications. Key is the entity name and value is the entity type',
37
+ )
38
+
39
+
33
40
  class Prompt(Protocol):
34
41
  extract_message: PromptVersion
35
42
  extract_json: PromptVersion
36
43
  extract_text: PromptVersion
37
44
  reflexion: PromptVersion
45
+ classify_nodes: PromptVersion
38
46
 
39
47
 
40
48
  class Versions(TypedDict):
@@ -42,6 +50,7 @@ class Versions(TypedDict):
42
50
  extract_json: PromptFunction
43
51
  extract_text: PromptFunction
44
52
  reflexion: PromptFunction
53
+ classify_nodes: PromptFunction
45
54
 
46
55
 
47
56
  def extract_message(context: dict[str, Any]) -> list[Message]:
@@ -66,6 +75,7 @@ Guidelines:
66
75
  4. DO NOT create nodes for temporal information like dates, times or years (these will be added to edges later).
67
76
  5. Be as explicit as possible in your node names, using full names.
68
77
  6. DO NOT extract entities mentioned only in PREVIOUS MESSAGES, those messages are only to provide context.
78
+ 7. Extract preferences as their own nodes
69
79
  """
70
80
  return [
71
81
  Message(role='system', content=sys_prompt),
@@ -109,7 +119,7 @@ def extract_text(context: dict[str, Any]) -> list[Message]:
109
119
 
110
120
  {context['custom_prompt']}
111
121
 
112
- Given the following text, extract entity nodes from the TEXT that are explicitly or implicitly mentioned:
122
+ Given the above text, extract entity nodes from the TEXT that are explicitly or implicitly mentioned:
113
123
 
114
124
  Guidelines:
115
125
  1. Extract significant entities, concepts, or actors mentioned in the conversation.
@@ -147,9 +157,41 @@ extracted.
147
157
  ]
148
158
 
149
159
 
160
+ def classify_nodes(context: dict[str, Any]) -> list[Message]:
161
+ sys_prompt = """You are an AI assistant that classifies entity nodes given the context from which they were extracted"""
162
+
163
+ user_prompt = f"""
164
+ <PREVIOUS MESSAGES>
165
+ {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
166
+ </PREVIOUS MESSAGES>
167
+ <CURRENT MESSAGE>
168
+ {context["episode_content"]}
169
+ </CURRENT MESSAGE>
170
+
171
+ <EXTRACTED ENTITIES>
172
+ {context['extracted_entities']}
173
+ </EXTRACTED ENTITIES>
174
+
175
+ <ENTITY TYPES>
176
+ {context['entity_types']}
177
+ </ENTITY TYPES>
178
+
179
+ Given the above conversation, extracted entities, and provided entity types, classify the extracted entities.
180
+
181
+ Guidelines:
182
+ 1. Each entity must have exactly one type
183
+ 2. If none of the provided entity types accurately classify an extracted node, the type should be set to None
184
+ """
185
+ return [
186
+ Message(role='system', content=sys_prompt),
187
+ Message(role='user', content=user_prompt),
188
+ ]
189
+
190
+
150
191
  versions: Versions = {
151
192
  'extract_message': extract_message,
152
193
  'extract_json': extract_json,
153
194
  'extract_text': extract_text,
154
195
  'reflexion': reflexion,
196
+ 'classify_nodes': classify_nodes,
155
197
  }
@@ -24,7 +24,8 @@ from .models import Message, PromptFunction, PromptVersion
24
24
 
25
25
  class Summary(BaseModel):
26
26
  summary: str = Field(
27
- ..., description='Summary containing the important information from both summaries'
27
+ ...,
28
+ description='Summary containing the important information about the entity. Under 500 words',
28
29
  )
29
30
 
30
31
 
@@ -68,7 +69,7 @@ def summarize_context(context: dict[str, Any]) -> list[Message]:
68
69
  return [
69
70
  Message(
70
71
  role='system',
71
- content='You are a helpful assistant that combines summaries with new conversation context.',
72
+ content='You are a helpful assistant that extracts entity properties from the provided text.',
72
73
  ),
73
74
  Message(
74
75
  role='user',
@@ -81,13 +82,21 @@ def summarize_context(context: dict[str, Any]) -> list[Message]:
81
82
 
82
83
  Given the above MESSAGES and the following ENTITY name, create a summary for the ENTITY. Your summary must only use
83
84
  information from the provided MESSAGES. Your summary should also only contain information relevant to the
84
- provided ENTITY.
85
+ provided ENTITY. Summaries must be under 500 words.
85
86
 
86
- Summaries must be under 500 words.
87
+ In addition, extract any values for the provided entity properties based on their descriptions.
87
88
 
88
89
  <ENTITY>
89
90
  {context['node_name']}
90
91
  </ENTITY>
92
+
93
+ <ENTITY CONTEXT>
94
+ {context['node_summary']}
95
+ </ENTITY CONTEXT>
96
+
97
+ <ATTRIBUTES>
98
+ {json.dumps(context['attributes'], indent=2)}
99
+ </ATTRIBUTES>
91
100
  """,
92
101
  ),
93
102
  ]
@@ -97,7 +97,9 @@ async def get_mentioned_nodes(
97
97
  n.name AS name,
98
98
  n.name_embedding AS name_embedding,
99
99
  n.created_at AS created_at,
100
- n.summary AS summary
100
+ n.summary AS summary,
101
+ labels(n) AS labels,
102
+ properties(n) AS attributes
101
103
  """,
102
104
  uuids=episode_uuids,
103
105
  database_=DEFAULT_DATABASE,
@@ -223,8 +225,8 @@ async def edge_similarity_search(
223
225
 
224
226
  query: LiteralString = (
225
227
  """
226
- MATCH (n:Entity)-[r:RELATES_TO]->(m:Entity)
227
- """
228
+ MATCH (n:Entity)-[r:RELATES_TO]->(m:Entity)
229
+ """
228
230
  + group_filter_query
229
231
  + filter_query
230
232
  + """\nWITH DISTINCT r, vector.similarity.cosine(r.fact_embedding, $search_vector) AS score
@@ -341,7 +343,9 @@ async def node_fulltext_search(
341
343
  n.name AS name,
342
344
  n.name_embedding AS name_embedding,
343
345
  n.created_at AS created_at,
344
- n.summary AS summary
346
+ n.summary AS summary,
347
+ labels(n) AS labels,
348
+ properties(n) AS attributes
345
349
  ORDER BY score DESC
346
350
  LIMIT $limit
347
351
  """,
@@ -390,7 +394,9 @@ async def node_similarity_search(
390
394
  n.name AS name,
391
395
  n.name_embedding AS name_embedding,
392
396
  n.created_at AS created_at,
393
- n.summary AS summary
397
+ n.summary AS summary,
398
+ labels(n) AS labels,
399
+ properties(n) AS attributes
394
400
  ORDER BY score DESC
395
401
  LIMIT $limit
396
402
  """,
@@ -427,7 +433,9 @@ async def node_bfs_search(
427
433
  n.name AS name,
428
434
  n.name_embedding AS name_embedding,
429
435
  n.created_at AS created_at,
430
- n.summary AS summary
436
+ n.summary AS summary,
437
+ labels(n) AS labels,
438
+ properties(n) AS attributes
431
439
  LIMIT $limit
432
440
  """,
433
441
  bfs_origin_node_uuids=bfs_origin_node_uuids,
@@ -23,6 +23,7 @@ from math import ceil
23
23
  from neo4j import AsyncDriver, AsyncManagedTransaction
24
24
  from numpy import dot, sqrt
25
25
  from pydantic import BaseModel
26
+ from typing_extensions import Any
26
27
 
27
28
  from graphiti_core.edges import Edge, EntityEdge, EpisodicEdge
28
29
  from graphiti_core.helpers import semaphore_gather
@@ -109,8 +110,23 @@ async def add_nodes_and_edges_bulk_tx(
109
110
  episodes = [dict(episode) for episode in episodic_nodes]
110
111
  for episode in episodes:
111
112
  episode['source'] = str(episode['source'].value)
113
+ nodes: list[dict[str, Any]] = []
114
+ for node in entity_nodes:
115
+ entity_data: dict[str, Any] = {
116
+ 'uuid': node.uuid,
117
+ 'name': node.name,
118
+ 'name_embedding': node.name_embedding,
119
+ 'group_id': node.group_id,
120
+ 'summary': node.summary,
121
+ 'created_at': node.created_at,
122
+ }
123
+
124
+ entity_data.update(node.attributes or {})
125
+ entity_data['labels'] = list(set(node.labels + ['Entity']))
126
+ nodes.append(entity_data)
127
+
112
128
  await tx.run(EPISODIC_NODE_SAVE_BULK, episodes=episodes)
113
- await tx.run(ENTITY_NODE_SAVE_BULK, nodes=[dict(entity) for entity in entity_nodes])
129
+ await tx.run(ENTITY_NODE_SAVE_BULK, nodes=nodes)
114
130
  await tx.run(EPISODIC_EDGE_SAVE_BULK, episodic_edges=[dict(edge) for edge in episodic_edges])
115
131
  await tx.run(ENTITY_EDGE_SAVE_BULK, entity_edges=[dict(edge) for edge in entity_edges])
116
132
 
@@ -14,15 +14,19 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
+ import ast
17
18
  import logging
18
19
  from time import time
19
20
 
21
+ import pydantic
22
+ from pydantic import BaseModel
23
+
20
24
  from graphiti_core.helpers import MAX_REFLEXION_ITERATIONS, semaphore_gather
21
25
  from graphiti_core.llm_client import LLMClient
22
26
  from graphiti_core.nodes import EntityNode, EpisodeType, EpisodicNode
23
27
  from graphiti_core.prompts import prompt_library
24
28
  from graphiti_core.prompts.dedupe_nodes import NodeDuplicate
25
- from graphiti_core.prompts.extract_nodes import ExtractedNodes, MissedEntities
29
+ from graphiti_core.prompts.extract_nodes import EntityClassification, ExtractedNodes, MissedEntities
26
30
  from graphiti_core.prompts.summarize_nodes import Summary
27
31
  from graphiti_core.utils.datetime_utils import utc_now
28
32
 
@@ -114,6 +118,7 @@ async def extract_nodes(
114
118
  llm_client: LLMClient,
115
119
  episode: EpisodicNode,
116
120
  previous_episodes: list[EpisodicNode],
121
+ entity_types: dict[str, BaseModel] | None = None,
117
122
  ) -> list[EntityNode]:
118
123
  start = time()
119
124
  extracted_node_names: list[str] = []
@@ -144,15 +149,35 @@ async def extract_nodes(
144
149
  for entity in missing_entities:
145
150
  custom_prompt += f'\n{entity},'
146
151
 
152
+ node_classification_context = {
153
+ 'episode_content': episode.content,
154
+ 'previous_episodes': [ep.content for ep in previous_episodes],
155
+ 'extracted_entities': extracted_node_names,
156
+ 'entity_types': entity_types.keys() if entity_types is not None else [],
157
+ }
158
+
159
+ node_classifications: dict[str, str | None] = {}
160
+
161
+ if entity_types is not None:
162
+ llm_response = await llm_client.generate_response(
163
+ prompt_library.extract_nodes.classify_nodes(node_classification_context),
164
+ response_model=EntityClassification,
165
+ )
166
+ response_string = llm_response.get('entity_classification', '{}')
167
+ node_classifications.update(ast.literal_eval(response_string))
168
+
147
169
  end = time()
148
170
  logger.debug(f'Extracted new nodes: {extracted_node_names} in {(end - start) * 1000} ms')
149
171
  # Convert the extracted data into EntityNode objects
150
172
  new_nodes = []
151
173
  for name in extracted_node_names:
174
+ entity_type = node_classifications.get(name)
175
+ labels = ['Entity'] if entity_type is None else ['Entity', entity_type]
176
+
152
177
  new_node = EntityNode(
153
178
  name=name,
154
179
  group_id=episode.group_id,
155
- labels=['Entity'],
180
+ labels=labels,
156
181
  summary='',
157
182
  created_at=utc_now(),
158
183
  )
@@ -218,6 +243,7 @@ async def resolve_extracted_nodes(
218
243
  existing_nodes_lists: list[list[EntityNode]],
219
244
  episode: EpisodicNode | None = None,
220
245
  previous_episodes: list[EpisodicNode] | None = None,
246
+ entity_types: dict[str, BaseModel] | None = None,
221
247
  ) -> tuple[list[EntityNode], dict[str, str]]:
222
248
  uuid_map: dict[str, str] = {}
223
249
  resolved_nodes: list[EntityNode] = []
@@ -225,7 +251,12 @@ async def resolve_extracted_nodes(
225
251
  await semaphore_gather(
226
252
  *[
227
253
  resolve_extracted_node(
228
- llm_client, extracted_node, existing_nodes, episode, previous_episodes
254
+ llm_client,
255
+ extracted_node,
256
+ existing_nodes,
257
+ episode,
258
+ previous_episodes,
259
+ entity_types,
229
260
  )
230
261
  for extracted_node, existing_nodes in zip(extracted_nodes, existing_nodes_lists)
231
262
  ]
@@ -245,6 +276,7 @@ async def resolve_extracted_node(
245
276
  existing_nodes: list[EntityNode],
246
277
  episode: EpisodicNode | None = None,
247
278
  previous_episodes: list[EpisodicNode] | None = None,
279
+ entity_types: dict[str, BaseModel] | None = None,
248
280
  ) -> tuple[EntityNode, dict[str, str]]:
249
281
  start = time()
250
282
 
@@ -268,23 +300,44 @@ async def resolve_extracted_node(
268
300
 
269
301
  summary_context = {
270
302
  'node_name': extracted_node.name,
303
+ 'node_summary': extracted_node.summary,
271
304
  'episode_content': episode.content if episode is not None else '',
272
305
  'previous_episodes': [ep.content for ep in previous_episodes]
273
306
  if previous_episodes is not None
274
307
  else [],
308
+ 'attributes': [],
275
309
  }
276
310
 
277
- llm_response, node_summary_response = await semaphore_gather(
311
+ entity_type_classes: tuple[BaseModel, ...] = tuple()
312
+ if entity_types is not None: # type: ignore
313
+ entity_type_classes = entity_type_classes + tuple(
314
+ filter(
315
+ lambda x: x is not None, # type: ignore
316
+ [entity_types.get(entity_type) for entity_type in extracted_node.labels], # type: ignore
317
+ )
318
+ )
319
+
320
+ for entity_type in entity_type_classes:
321
+ for field_name in entity_type.model_fields:
322
+ summary_context.get('attributes', []).append(field_name) # type: ignore
323
+
324
+ entity_attributes_model = pydantic.create_model( # type: ignore
325
+ 'EntityAttributes',
326
+ __base__=entity_type_classes + (Summary,), # type: ignore
327
+ )
328
+
329
+ llm_response, node_attributes_response = await semaphore_gather(
278
330
  llm_client.generate_response(
279
331
  prompt_library.dedupe_nodes.node(context), response_model=NodeDuplicate
280
332
  ),
281
333
  llm_client.generate_response(
282
334
  prompt_library.summarize_nodes.summarize_context(summary_context),
283
- response_model=Summary,
335
+ response_model=entity_attributes_model,
284
336
  ),
285
337
  )
286
338
 
287
- extracted_node.summary = node_summary_response.get('summary', '')
339
+ extracted_node.summary = node_attributes_response.get('summary', '')
340
+ extracted_node.attributes.update(node_attributes_response)
288
341
 
289
342
  is_duplicate: bool = llm_response.get('is_duplicate', False)
290
343
  uuid: str | None = llm_response.get('uuid', None)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: graphiti-core
3
- Version: 0.6.0
3
+ Version: 0.7.0
4
4
  Summary: A temporal graph building library
5
5
  License: Apache-2.0
6
6
  Author: Paul Paliychuk
@@ -9,7 +9,7 @@ graphiti_core/embedder/client.py,sha256=HKIlpPLnzFT81jurPkry6z8F8nxfZVfejdcfxHVU
9
9
  graphiti_core/embedder/openai.py,sha256=FzEM9rtSDK1wTb4iYKjNjjdFf8BEBTDxG2vM_E-5W-8,1621
10
10
  graphiti_core/embedder/voyage.py,sha256=7kqrLG75J3Q6cdA2Nlx1JSYtpk2141ckdl3OtDDw0vU,1882
11
11
  graphiti_core/errors.py,sha256=ddHrHGQxhwkVAtSph4AV84UoOlgwZufMczXPwB7uqPo,1795
12
- graphiti_core/graphiti.py,sha256=QN4YnAfpFPy6Cj5XCdab63yzgAHl-wGAEdVo2xwLxQU,28884
12
+ graphiti_core/graphiti.py,sha256=rvgnw3JnDxMQrh5-Q6RxfnLELcwPiwWHgZw_cRoUGR8,29110
13
13
  graphiti_core/helpers.py,sha256=z7ApOgrm_J7hk5FN_XPAwkKyopEY943BgHjDJbSXr2s,2869
14
14
  graphiti_core/llm_client/__init__.py,sha256=PA80TSMeX-sUXITXEAxMDEt3gtfZgcJrGJUcyds1mSo,207
15
15
  graphiti_core/llm_client/anthropic_client.py,sha256=RlD6e49XvMJsTKU0krpq46gPSFm6-hfLkkq4Sfx27BE,2574
@@ -24,38 +24,38 @@ graphiti_core/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
24
24
  graphiti_core/models/edges/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
25
  graphiti_core/models/edges/edge_db_queries.py,sha256=2UoLkmazO-FJYqjc3g0LuL-pyjekzQxxed_XHVv_HZE,2671
26
26
  graphiti_core/models/nodes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
- graphiti_core/models/nodes/node_db_queries.py,sha256=I0top_N23FN0U5ZbypaS5IXvtfx2zgJmKUCT_7mpUdo,2257
28
- graphiti_core/nodes.py,sha256=_ExaTj2HU-xDczbls4aFcLdpc8zwPZUZ8JgVOrBiEdw,16098
27
+ graphiti_core/models/nodes/node_db_queries.py,sha256=f4_UT6XL8UDt4_CO9YIHeI8pvpw_vrutA9SYrgi6QCU,2121
28
+ graphiti_core/nodes.py,sha256=dKllAYBvNy6uCDxvacvNoVHiEm-wJm_cIK3KKTahVkM,16709
29
29
  graphiti_core/prompts/__init__.py,sha256=EA-x9xUki9l8wnu2l8ek_oNf75-do5tq5hVq7Zbv8Kw,101
30
30
  graphiti_core/prompts/dedupe_edges.py,sha256=EuX8ngeItBzrlMBOgeHrpExzxIFHD2aoDyaX1ZniF6I,3556
31
31
  graphiti_core/prompts/dedupe_nodes.py,sha256=mqvNATL-4Vo33vaxUEZfOq6hXXOiL-ftY0zcx2G-82I,4624
32
32
  graphiti_core/prompts/eval.py,sha256=csW494kKBMvWSm2SYLIRuGgNghhwNR3YwGn3veo3g_Y,3691
33
33
  graphiti_core/prompts/extract_edge_dates.py,sha256=td2yx2wnX-nLioMa0mtla3WcRyO71_wSjemT79YZGQ0,4096
34
34
  graphiti_core/prompts/extract_edges.py,sha256=vyEdW7JAPOT_eLWUi6nRmxbvucyVoyoYX2SxXfknRUg,3467
35
- graphiti_core/prompts/extract_nodes.py,sha256=JXLHeL1VcFo0auGf2roVnoWu1CyZJDWxBCu6BXE9fUQ,5289
35
+ graphiti_core/prompts/extract_nodes.py,sha256=-01MpcVd9drtmMDIpQkkzZe8YwVhedmdbZq7UNGfo24,6651
36
36
  graphiti_core/prompts/invalidate_edges.py,sha256=DV2mEyIhhjc0hdKEMFLQMeG0FiUCkv_X0ctCliYjQ2c,3577
37
37
  graphiti_core/prompts/lib.py,sha256=oxhlpGEgV15VOLEZiwirxmIJBIdfzfiyL58iyzFDskE,4254
38
38
  graphiti_core/prompts/models.py,sha256=cvx_Bv5RMFUD_5IUawYrbpOKLPHogai7_bm7YXrSz84,867
39
39
  graphiti_core/prompts/prompt_helpers.py,sha256=-9TABwIcIQUVHcNANx6wIZd-FT2DgYKyGTfx4IGYq2I,64
40
- graphiti_core/prompts/summarize_nodes.py,sha256=5J_IONG7fHYiQZWnCaUyw7w2zunEaN7V89nEluRP-qY,3461
40
+ graphiti_core/prompts/summarize_nodes.py,sha256=ONDZdkvC7-RPaKx2geWSVjNaJAsHxRisV8tiU2ukw4k,3781
41
41
  graphiti_core/py.typed,sha256=vlmmzQOt7bmeQl9L3XJP4W6Ry0iiELepnOrinKz5KQg,79
42
42
  graphiti_core/search/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
43
  graphiti_core/search/search.py,sha256=4DaeP5aRT7ZOByDO3H5UK0edxfwQ4mzAOdFjnjwaDJs,12454
44
44
  graphiti_core/search/search_config.py,sha256=UZN8jFA4pBlw2O5N1cuhVRBdTwMLR9N3Oyo6sQ4MDVw,3117
45
45
  graphiti_core/search/search_config_recipes.py,sha256=yUqiLnn9vFg39M8eVwjVKfBCL_ptGrfDMQ47m_Blb0g,6885
46
46
  graphiti_core/search/search_filters.py,sha256=_E_Od3hUoZm6H2UVCcxhfS34AqGF2lNx0NJPCw0gAQs,5333
47
- graphiti_core/search/search_utils.py,sha256=GwF7tsvjKgVXtv6q4lXA1tZn1_0izy6rHNwL8d0cYU4,24348
47
+ graphiti_core/search/search_utils.py,sha256=Vm5QMMdhnMVnB_5hSu_x04sBsDTKTc8jg4X8lM1hbLc,24692
48
48
  graphiti_core/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
- graphiti_core/utils/bulk_utils.py,sha256=FYal4tSspGVohNsnDoyW_YjMiscySuYPuQLPSwVCy24,14110
49
+ graphiti_core/utils/bulk_utils.py,sha256=KmMOxLt4pUpNF_Tz_YEUz4RRgXa_o6rV1r0Ig-RJ8kQ,14611
50
50
  graphiti_core/utils/datetime_utils.py,sha256=Ti-2tnrDFRzBsbfblzsHybsM3jaDLP4-VT2t0VhpIzU,1357
51
51
  graphiti_core/utils/maintenance/__init__.py,sha256=TRY3wWWu5kn3Oahk_KKhltrWnh0NACw0FskjqF6OtlA,314
52
52
  graphiti_core/utils/maintenance/community_operations.py,sha256=gIw1M5HGgc2c3TXag5ygPPpAv5WsG-yoC8Lhmfr6FMs,10011
53
53
  graphiti_core/utils/maintenance/edge_operations.py,sha256=tNw56vN586JYZMgie6RLRTiHZ680-kWzDIxW8ucL6SU,12780
54
54
  graphiti_core/utils/maintenance/graph_data_operations.py,sha256=qds9ALk9PhpQs1CNZTZGpi70mqJ93Y2KhIh9X2r8MUI,6533
55
- graphiti_core/utils/maintenance/node_operations.py,sha256=A-6H2ohqcGJRA_sg_-0m_AA7syiP_gVBsyY7VTTbfuA,12036
55
+ graphiti_core/utils/maintenance/node_operations.py,sha256=gihbPEBH6StLQCSd9wSu582d4Owaw3l5JLR1IBDrnVs,14137
56
56
  graphiti_core/utils/maintenance/temporal_operations.py,sha256=RdNtubCyYhOVrvcOIq2WppHls1Q-BEjtsN8r38l-Rtc,3691
57
57
  graphiti_core/utils/maintenance/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
- graphiti_core-0.6.0.dist-info/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
59
- graphiti_core-0.6.0.dist-info/METADATA,sha256=y9SsrlB4L8EVV0G9xYn39WenebenJFnlBvX_jikVh04,10242
60
- graphiti_core-0.6.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
61
- graphiti_core-0.6.0.dist-info/RECORD,,
58
+ graphiti_core-0.7.0.dist-info/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
59
+ graphiti_core-0.7.0.dist-info/METADATA,sha256=vSMHoX_ZET_5ComzZbaVa6ZG5MZUvZ5FCi0zVslfI0Y,10242
60
+ graphiti_core-0.7.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
61
+ graphiti_core-0.7.0.dist-info/RECORD,,