graphiti-core 0.21.0rc11__py3-none-any.whl → 0.21.0rc13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

graphiti_core/graphiti.py CHANGED
@@ -136,7 +136,6 @@ class Graphiti:
136
136
  store_raw_episode_content: bool = True,
137
137
  graph_driver: GraphDriver | None = None,
138
138
  max_coroutines: int | None = None,
139
- ensure_ascii: bool = False,
140
139
  ):
141
140
  """
142
141
  Initialize a Graphiti instance.
@@ -169,10 +168,6 @@ class Graphiti:
169
168
  max_coroutines : int | None, optional
170
169
  The maximum number of concurrent operations allowed. Overrides SEMAPHORE_LIMIT set in the environment.
171
170
  If not set, the Graphiti default is used.
172
- ensure_ascii : bool, optional
173
- Whether to escape non-ASCII characters in JSON serialization for prompts. Defaults to False.
174
- Set as False to preserve non-ASCII characters (e.g., Korean, Japanese, Chinese) in their
175
- original form, making them readable in LLM logs and improving model understanding.
176
171
 
177
172
  Returns
178
173
  -------
@@ -202,7 +197,6 @@ class Graphiti:
202
197
 
203
198
  self.store_raw_episode_content = store_raw_episode_content
204
199
  self.max_coroutines = max_coroutines
205
- self.ensure_ascii = ensure_ascii
206
200
  if llm_client:
207
201
  self.llm_client = llm_client
208
202
  else:
@@ -221,7 +215,6 @@ class Graphiti:
221
215
  llm_client=self.llm_client,
222
216
  embedder=self.embedder,
223
217
  cross_encoder=self.cross_encoder,
224
- ensure_ascii=self.ensure_ascii,
225
218
  )
226
219
 
227
220
  # Capture telemetry event
@@ -559,9 +552,7 @@ class Graphiti:
559
552
  if update_communities:
560
553
  communities, community_edges = await semaphore_gather(
561
554
  *[
562
- update_community(
563
- self.driver, self.llm_client, self.embedder, node, self.ensure_ascii
564
- )
555
+ update_community(self.driver, self.llm_client, self.embedder, node)
565
556
  for node in nodes
566
557
  ],
567
558
  max_coroutines=self.max_coroutines,
@@ -1071,7 +1062,6 @@ class Graphiti:
1071
1062
  ),
1072
1063
  None,
1073
1064
  None,
1074
- self.ensure_ascii,
1075
1065
  )
1076
1066
 
1077
1067
  edges: list[EntityEdge] = [resolved_edge] + invalidated_edges
@@ -27,6 +27,5 @@ class GraphitiClients(BaseModel):
27
27
  llm_client: LLMClient
28
28
  embedder: EmbedderClient
29
29
  cross_encoder: CrossEncoderClient
30
- ensure_ascii: bool = False
31
30
 
32
31
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -25,11 +25,11 @@ from .prompt_helpers import to_prompt_json
25
25
  class EdgeDuplicate(BaseModel):
26
26
  duplicate_facts: list[int] = Field(
27
27
  ...,
28
- description='List of ids of any duplicate facts. If no duplicate facts are found, default to empty list.',
28
+ description='List of idx values of any duplicate facts. If no duplicate facts are found, default to empty list.',
29
29
  )
30
30
  contradicted_facts: list[int] = Field(
31
31
  ...,
32
- description='List of ids of facts that should be invalidated. If no facts should be invalidated, the list should be empty.',
32
+ description='List of idx values of facts that should be invalidated. If no facts should be invalidated, the list should be empty.',
33
33
  )
34
34
  fact_type: str = Field(..., description='One of the provided fact types or DEFAULT')
35
35
 
@@ -67,11 +67,11 @@ def edge(context: dict[str, Any]) -> list[Message]:
67
67
  Given the following context, determine whether the New Edge represents any of the edges in the list of Existing Edges.
68
68
 
69
69
  <EXISTING EDGES>
70
- {to_prompt_json(context['related_edges'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
70
+ {to_prompt_json(context['related_edges'], indent=2)}
71
71
  </EXISTING EDGES>
72
72
 
73
73
  <NEW EDGE>
74
- {to_prompt_json(context['extracted_edges'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
74
+ {to_prompt_json(context['extracted_edges'], indent=2)}
75
75
  </NEW EDGE>
76
76
 
77
77
  Task:
@@ -98,7 +98,7 @@ def edge_list(context: dict[str, Any]) -> list[Message]:
98
98
  Given the following context, find all of the duplicates in a list of facts:
99
99
 
100
100
  Facts:
101
- {to_prompt_json(context['edges'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
101
+ {to_prompt_json(context['edges'], indent=2)}
102
102
 
103
103
  Task:
104
104
  If any facts in Facts is a duplicate of another fact, return a new fact with one of their uuid's.
@@ -124,37 +124,48 @@ def resolve_edge(context: dict[str, Any]) -> list[Message]:
124
124
  Message(
125
125
  role='user',
126
126
  content=f"""
127
- <NEW FACT>
128
- {context['new_edge']}
129
- </NEW FACT>
130
-
127
+ Task:
128
+ You will receive TWO separate lists of facts. Each list uses 'idx' as its index field, starting from 0.
129
+
130
+ 1. DUPLICATE DETECTION:
131
+ - If the NEW FACT represents identical factual information as any fact in EXISTING FACTS, return those idx values in duplicate_facts.
132
+ - Facts with similar information that contain key differences should NOT be marked as duplicates.
133
+ - Return idx values from EXISTING FACTS.
134
+ - If no duplicates, return an empty list for duplicate_facts.
135
+
136
+ 2. FACT TYPE CLASSIFICATION:
137
+ - Given the predefined FACT TYPES, determine if the NEW FACT should be classified as one of these types.
138
+ - Return the fact type as fact_type or DEFAULT if NEW FACT is not one of the FACT TYPES.
139
+
140
+ 3. CONTRADICTION DETECTION:
141
+ - Based on FACT INVALIDATION CANDIDATES and NEW FACT, determine which facts the new fact contradicts.
142
+ - Return idx values from FACT INVALIDATION CANDIDATES.
143
+ - If no contradictions, return an empty list for contradicted_facts.
144
+
145
+ IMPORTANT:
146
+ - duplicate_facts: Use ONLY 'idx' values from EXISTING FACTS
147
+ - contradicted_facts: Use ONLY 'idx' values from FACT INVALIDATION CANDIDATES
148
+ - These are two separate lists with independent idx ranges starting from 0
149
+
150
+ Guidelines:
151
+ 1. Some facts may be very similar but will have key differences, particularly around numeric values in the facts.
152
+ Do not mark these facts as duplicates.
153
+
154
+ <FACT TYPES>
155
+ {context['edge_types']}
156
+ </FACT TYPES>
157
+
131
158
  <EXISTING FACTS>
132
159
  {context['existing_edges']}
133
160
  </EXISTING FACTS>
161
+
134
162
  <FACT INVALIDATION CANDIDATES>
135
163
  {context['edge_invalidation_candidates']}
136
164
  </FACT INVALIDATION CANDIDATES>
137
-
138
- <FACT TYPES>
139
- {context['edge_types']}
140
- </FACT TYPES>
141
-
142
165
 
143
- Task:
144
- If the NEW FACT represents identical factual information of one or more in EXISTING FACTS, return the idx of the duplicate facts.
145
- Facts with similar information that contain key differences should not be marked as duplicates.
146
- If the NEW FACT is not a duplicate of any of the EXISTING FACTS, return an empty list.
147
-
148
- Given the predefined FACT TYPES, determine if the NEW FACT should be classified as one of these types.
149
- Return the fact type as fact_type or DEFAULT if NEW FACT is not one of the FACT TYPES.
150
-
151
- Based on the provided FACT INVALIDATION CANDIDATES and NEW FACT, determine which existing facts the new fact contradicts.
152
- Return a list containing all idx's of the facts that are contradicted by the NEW FACT.
153
- If there are no contradicted facts, return an empty list.
154
-
155
- Guidelines:
156
- 1. Some facts may be very similar but will have key differences, particularly around numeric values in the facts.
157
- Do not mark these facts as duplicates.
166
+ <NEW FACT>
167
+ {context['new_edge']}
168
+ </NEW FACT>
158
169
  """,
159
170
  ),
160
171
  ]
@@ -64,20 +64,20 @@ def node(context: dict[str, Any]) -> list[Message]:
64
64
  role='user',
65
65
  content=f"""
66
66
  <PREVIOUS MESSAGES>
67
- {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
67
+ {to_prompt_json([ep for ep in context['previous_episodes']], indent=2)}
68
68
  </PREVIOUS MESSAGES>
69
69
  <CURRENT MESSAGE>
70
70
  {context['episode_content']}
71
71
  </CURRENT MESSAGE>
72
72
  <NEW ENTITY>
73
- {to_prompt_json(context['extracted_node'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
73
+ {to_prompt_json(context['extracted_node'], indent=2)}
74
74
  </NEW ENTITY>
75
75
  <ENTITY TYPE DESCRIPTION>
76
- {to_prompt_json(context['entity_type_description'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
76
+ {to_prompt_json(context['entity_type_description'], indent=2)}
77
77
  </ENTITY TYPE DESCRIPTION>
78
78
 
79
79
  <EXISTING ENTITIES>
80
- {to_prompt_json(context['existing_nodes'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
80
+ {to_prompt_json(context['existing_nodes'], indent=2)}
81
81
  </EXISTING ENTITIES>
82
82
 
83
83
  Given the above EXISTING ENTITIES and their attributes, MESSAGE, and PREVIOUS MESSAGES; Determine if the NEW ENTITY extracted from the conversation
@@ -125,7 +125,7 @@ def nodes(context: dict[str, Any]) -> list[Message]:
125
125
  role='user',
126
126
  content=f"""
127
127
  <PREVIOUS MESSAGES>
128
- {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
128
+ {to_prompt_json([ep for ep in context['previous_episodes']], indent=2)}
129
129
  </PREVIOUS MESSAGES>
130
130
  <CURRENT MESSAGE>
131
131
  {context['episode_content']}
@@ -142,11 +142,11 @@ def nodes(context: dict[str, Any]) -> list[Message]:
142
142
  }}
143
143
 
144
144
  <ENTITIES>
145
- {to_prompt_json(context['extracted_nodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
145
+ {to_prompt_json(context['extracted_nodes'], indent=2)}
146
146
  </ENTITIES>
147
147
 
148
148
  <EXISTING ENTITIES>
149
- {to_prompt_json(context['existing_nodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
149
+ {to_prompt_json(context['existing_nodes'], indent=2)}
150
150
  </EXISTING ENTITIES>
151
151
 
152
152
  Each entry in EXISTING ENTITIES is an object with the following structure:
@@ -197,7 +197,7 @@ def node_list(context: dict[str, Any]) -> list[Message]:
197
197
  Given the following context, deduplicate a list of nodes:
198
198
 
199
199
  Nodes:
200
- {to_prompt_json(context['nodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
200
+ {to_prompt_json(context['nodes'], indent=2)}
201
201
 
202
202
  Task:
203
203
  1. Group nodes together such that all duplicate nodes are in the same list of uuids
@@ -68,7 +68,7 @@ def query_expansion(context: dict[str, Any]) -> list[Message]:
68
68
  Bob is asking Alice a question, are you able to rephrase the question into a simpler one about Alice in the third person
69
69
  that maintains the relevant context?
70
70
  <QUESTION>
71
- {to_prompt_json(context['query'], ensure_ascii=context.get('ensure_ascii', False))}
71
+ {to_prompt_json(context['query'])}
72
72
  </QUESTION>
73
73
  """
74
74
  return [
@@ -84,10 +84,10 @@ def qa_prompt(context: dict[str, Any]) -> list[Message]:
84
84
  Your task is to briefly answer the question in the way that you think Alice would answer the question.
85
85
  You are given the following entity summaries and facts to help you determine the answer to your question.
86
86
  <ENTITY_SUMMARIES>
87
- {to_prompt_json(context['entity_summaries'], ensure_ascii=context.get('ensure_ascii', False))}
87
+ {to_prompt_json(context['entity_summaries'])}
88
88
  </ENTITY_SUMMARIES>
89
89
  <FACTS>
90
- {to_prompt_json(context['facts'], ensure_ascii=context.get('ensure_ascii', False))}
90
+ {to_prompt_json(context['facts'])}
91
91
  </FACTS>
92
92
  <QUESTION>
93
93
  {context['query']}
@@ -24,9 +24,16 @@ from .prompt_helpers import to_prompt_json
24
24
 
25
25
  class Edge(BaseModel):
26
26
  relation_type: str = Field(..., description='FACT_PREDICATE_IN_SCREAMING_SNAKE_CASE')
27
- source_entity_id: int = Field(..., description='The id of the source entity of the fact.')
28
- target_entity_id: int = Field(..., description='The id of the target entity of the fact.')
29
- fact: str = Field(..., description='')
27
+ source_entity_id: int = Field(
28
+ ..., description='The id of the source entity from the ENTITIES list'
29
+ )
30
+ target_entity_id: int = Field(
31
+ ..., description='The id of the target entity from the ENTITIES list'
32
+ )
33
+ fact: str = Field(
34
+ ...,
35
+ description='A natural language description of the relationship between the entities, paraphrased from the source text',
36
+ )
30
37
  valid_at: str | None = Field(
31
38
  None,
32
39
  description='The date and time when the relationship described by the edge fact became true or was established. Use ISO 8601 format (YYYY-MM-DDTHH:MM:SS.SSSSSSZ)',
@@ -73,7 +80,7 @@ def edge(context: dict[str, Any]) -> list[Message]:
73
80
  </FACT TYPES>
74
81
 
75
82
  <PREVIOUS_MESSAGES>
76
- {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
83
+ {to_prompt_json([ep for ep in context['previous_episodes']], indent=2)}
77
84
  </PREVIOUS_MESSAGES>
78
85
 
79
86
  <CURRENT_MESSAGE>
@@ -81,7 +88,7 @@ def edge(context: dict[str, Any]) -> list[Message]:
81
88
  </CURRENT_MESSAGE>
82
89
 
83
90
  <ENTITIES>
84
- {context['nodes']}
91
+ {to_prompt_json(context['nodes'], indent=2)}
85
92
  </ENTITIES>
86
93
 
87
94
  <REFERENCE_TIME>
@@ -107,11 +114,12 @@ You may use information from the PREVIOUS MESSAGES only to disambiguate referenc
107
114
 
108
115
  # EXTRACTION RULES
109
116
 
110
- 1. Only emit facts where both the subject and object match IDs in ENTITIES.
117
+ 1. **Entity ID Validation**: `source_entity_id` and `target_entity_id` must use only the `id` values from the ENTITIES list provided above.
118
+ - **CRITICAL**: Using IDs not in the list will cause the edge to be rejected
111
119
  2. Each fact must involve two **distinct** entities.
112
120
  3. Use a SCREAMING_SNAKE_CASE string as the `relation_type` (e.g., FOUNDED, WORKS_AT).
113
121
  4. Do not emit duplicate or semantically redundant facts.
114
- 5. The `fact_text` should closely paraphrase the original source sentence(s). Do not verbatim quote the original text.
122
+ 5. The `fact` should closely paraphrase the original source sentence(s). Do not verbatim quote the original text.
115
123
  6. Use `REFERENCE_TIME` to resolve vague or relative temporal expressions (e.g., "last week").
116
124
  7. Do **not** hallucinate or infer temporal bounds from unrelated events.
117
125
 
@@ -133,7 +141,7 @@ def reflexion(context: dict[str, Any]) -> list[Message]:
133
141
 
134
142
  user_prompt = f"""
135
143
  <PREVIOUS MESSAGES>
136
- {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
144
+ {to_prompt_json([ep for ep in context['previous_episodes']], indent=2)}
137
145
  </PREVIOUS MESSAGES>
138
146
  <CURRENT MESSAGE>
139
147
  {context['episode_content']}
@@ -167,7 +175,7 @@ def extract_attributes(context: dict[str, Any]) -> list[Message]:
167
175
  content=f"""
168
176
 
169
177
  <MESSAGE>
170
- {to_prompt_json(context['episode_content'], ensure_ascii=context.get('ensure_ascii', False), indent=2)}
178
+ {to_prompt_json(context['episode_content'], indent=2)}
171
179
  </MESSAGE>
172
180
  <REFERENCE TIME>
173
181
  {context['reference_time']}
@@ -89,7 +89,7 @@ def extract_message(context: dict[str, Any]) -> list[Message]:
89
89
  </ENTITY TYPES>
90
90
 
91
91
  <PREVIOUS MESSAGES>
92
- {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
92
+ {to_prompt_json([ep for ep in context['previous_episodes']], indent=2)}
93
93
  </PREVIOUS MESSAGES>
94
94
 
95
95
  <CURRENT MESSAGE>
@@ -197,7 +197,7 @@ def reflexion(context: dict[str, Any]) -> list[Message]:
197
197
 
198
198
  user_prompt = f"""
199
199
  <PREVIOUS MESSAGES>
200
- {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
200
+ {to_prompt_json([ep for ep in context['previous_episodes']], indent=2)}
201
201
  </PREVIOUS MESSAGES>
202
202
  <CURRENT MESSAGE>
203
203
  {context['episode_content']}
@@ -221,7 +221,7 @@ def classify_nodes(context: dict[str, Any]) -> list[Message]:
221
221
 
222
222
  user_prompt = f"""
223
223
  <PREVIOUS MESSAGES>
224
- {to_prompt_json([ep for ep in context['previous_episodes']], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
224
+ {to_prompt_json([ep for ep in context['previous_episodes']], indent=2)}
225
225
  </PREVIOUS MESSAGES>
226
226
  <CURRENT MESSAGE>
227
227
  {context['episode_content']}
@@ -259,8 +259,8 @@ def extract_attributes(context: dict[str, Any]) -> list[Message]:
259
259
  content=f"""
260
260
 
261
261
  <MESSAGES>
262
- {to_prompt_json(context['previous_episodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
263
- {to_prompt_json(context['episode_content'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
262
+ {to_prompt_json(context['previous_episodes'], indent=2)}
263
+ {to_prompt_json(context['episode_content'], indent=2)}
264
264
  </MESSAGES>
265
265
 
266
266
  Given the above MESSAGES and the following ENTITY, update any of its attributes based on the information provided
@@ -289,8 +289,8 @@ def extract_summary(context: dict[str, Any]) -> list[Message]:
289
289
  content=f"""
290
290
 
291
291
  <MESSAGES>
292
- {to_prompt_json(context['previous_episodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
293
- {to_prompt_json(context['episode_content'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
292
+ {to_prompt_json(context['previous_episodes'], indent=2)}
293
+ {to_prompt_json(context['episode_content'], indent=2)}
294
294
  </MESSAGES>
295
295
 
296
296
  Given the above MESSAGES and the following ENTITY, update the summary that combines relevant information about the entity
@@ -4,20 +4,20 @@ from typing import Any
4
4
  DO_NOT_ESCAPE_UNICODE = '\nDo not escape unicode characters.\n'
5
5
 
6
6
 
7
- def to_prompt_json(data: Any, ensure_ascii: bool = True, indent: int = 2) -> str:
7
+ def to_prompt_json(data: Any, ensure_ascii: bool = False, indent: int = 2) -> str:
8
8
  """
9
9
  Serialize data to JSON for use in prompts.
10
10
 
11
11
  Args:
12
12
  data: The data to serialize
13
- ensure_ascii: If True, escape non-ASCII characters. If False, preserve them.
13
+ ensure_ascii: If True, escape non-ASCII characters. If False (default), preserve them.
14
14
  indent: Number of spaces for indentation
15
15
 
16
16
  Returns:
17
17
  JSON string representation of the data
18
18
 
19
19
  Notes:
20
- When ensure_ascii=False, non-ASCII characters (e.g., Korean, Japanese, Chinese)
20
+ By default (ensure_ascii=False), non-ASCII characters (e.g., Korean, Japanese, Chinese)
21
21
  are preserved in their original form in the prompt, making them readable
22
22
  in LLM logs and improving model understanding.
23
23
  """
@@ -59,7 +59,7 @@ def summarize_pair(context: dict[str, Any]) -> list[Message]:
59
59
  Summaries must be under 250 words.
60
60
 
61
61
  Summaries:
62
- {to_prompt_json(context['node_summaries'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
62
+ {to_prompt_json(context['node_summaries'], indent=2)}
63
63
  """,
64
64
  ),
65
65
  ]
@@ -76,8 +76,8 @@ def summarize_context(context: dict[str, Any]) -> list[Message]:
76
76
  content=f"""
77
77
 
78
78
  <MESSAGES>
79
- {to_prompt_json(context['previous_episodes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
80
- {to_prompt_json(context['episode_content'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
79
+ {to_prompt_json(context['previous_episodes'], indent=2)}
80
+ {to_prompt_json(context['episode_content'], indent=2)}
81
81
  </MESSAGES>
82
82
 
83
83
  Given the above MESSAGES and the following ENTITY name, create a summary for the ENTITY. Your summary must only use
@@ -100,7 +100,7 @@ def summarize_context(context: dict[str, Any]) -> list[Message]:
100
100
  </ENTITY CONTEXT>
101
101
 
102
102
  <ATTRIBUTES>
103
- {to_prompt_json(context['attributes'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
103
+ {to_prompt_json(context['attributes'], indent=2)}
104
104
  </ATTRIBUTES>
105
105
  """,
106
106
  ),
@@ -120,7 +120,7 @@ def summary_description(context: dict[str, Any]) -> list[Message]:
120
120
  Summaries must be under 250 words.
121
121
 
122
122
  Summary:
123
- {to_prompt_json(context['summary'], ensure_ascii=context.get('ensure_ascii', True), indent=2)}
123
+ {to_prompt_json(context['summary'], indent=2)}
124
124
  """,
125
125
  ),
126
126
  ]
@@ -24,9 +24,7 @@ def format_edge_date_range(edge: EntityEdge) -> str:
24
24
  return f'{edge.valid_at if edge.valid_at else "date unknown"} - {(edge.invalid_at if edge.invalid_at else "present")}'
25
25
 
26
26
 
27
- def search_results_to_context_string(
28
- search_results: SearchResults, ensure_ascii: bool = False
29
- ) -> str:
27
+ def search_results_to_context_string(search_results: SearchResults) -> str:
30
28
  """Reformats a set of SearchResults into a single string to pass directly to an LLM as context"""
31
29
  fact_json = [
32
30
  {
@@ -58,16 +56,16 @@ def search_results_to_context_string(
58
56
  These are the most relevant facts and their valid and invalid dates. Facts are considered valid
59
57
  between their valid_at and invalid_at dates. Facts with an invalid_at date of "Present" are considered valid.
60
58
  <FACTS>
61
- {to_prompt_json(fact_json, ensure_ascii=ensure_ascii, indent=12)}
59
+ {to_prompt_json(fact_json, indent=12)}
62
60
  </FACTS>
63
61
  <ENTITIES>
64
- {to_prompt_json(entity_json, ensure_ascii=ensure_ascii, indent=12)}
62
+ {to_prompt_json(entity_json, indent=12)}
65
63
  </ENTITIES>
66
64
  <EPISODES>
67
- {to_prompt_json(episode_json, ensure_ascii=ensure_ascii, indent=12)}
65
+ {to_prompt_json(episode_json, indent=12)}
68
66
  </EPISODES>
69
67
  <COMMUNITIES>
70
- {to_prompt_json(community_json, ensure_ascii=ensure_ascii, indent=12)}
68
+ {to_prompt_json(community_json, indent=12)}
71
69
  </COMMUNITIES>
72
70
  """
73
71
 
@@ -479,7 +479,6 @@ async def dedupe_edges_bulk(
479
479
  episode,
480
480
  edge_types,
481
481
  set(edge_types),
482
- clients.ensure_ascii,
483
482
  )
484
483
  for episode, edge, candidates in dedupe_tuples
485
484
  ]
@@ -131,13 +131,10 @@ def label_propagation(projection: dict[str, list[Neighbor]]) -> list[list[str]]:
131
131
  return clusters
132
132
 
133
133
 
134
- async def summarize_pair(
135
- llm_client: LLMClient, summary_pair: tuple[str, str], ensure_ascii: bool = True
136
- ) -> str:
134
+ async def summarize_pair(llm_client: LLMClient, summary_pair: tuple[str, str]) -> str:
137
135
  # Prepare context for LLM
138
136
  context = {
139
137
  'node_summaries': [{'summary': summary} for summary in summary_pair],
140
- 'ensure_ascii': ensure_ascii,
141
138
  }
142
139
 
143
140
  llm_response = await llm_client.generate_response(
@@ -149,12 +146,9 @@ async def summarize_pair(
149
146
  return pair_summary
150
147
 
151
148
 
152
- async def generate_summary_description(
153
- llm_client: LLMClient, summary: str, ensure_ascii: bool = True
154
- ) -> str:
149
+ async def generate_summary_description(llm_client: LLMClient, summary: str) -> str:
155
150
  context = {
156
151
  'summary': summary,
157
- 'ensure_ascii': ensure_ascii,
158
152
  }
159
153
 
160
154
  llm_response = await llm_client.generate_response(
@@ -168,7 +162,7 @@ async def generate_summary_description(
168
162
 
169
163
 
170
164
  async def build_community(
171
- llm_client: LLMClient, community_cluster: list[EntityNode], ensure_ascii: bool = True
165
+ llm_client: LLMClient, community_cluster: list[EntityNode]
172
166
  ) -> tuple[CommunityNode, list[CommunityEdge]]:
173
167
  summaries = [entity.summary for entity in community_cluster]
174
168
  length = len(summaries)
@@ -180,9 +174,7 @@ async def build_community(
180
174
  new_summaries: list[str] = list(
181
175
  await semaphore_gather(
182
176
  *[
183
- summarize_pair(
184
- llm_client, (str(left_summary), str(right_summary)), ensure_ascii
185
- )
177
+ summarize_pair(llm_client, (str(left_summary), str(right_summary)))
186
178
  for left_summary, right_summary in zip(
187
179
  summaries[: int(length / 2)], summaries[int(length / 2) :], strict=False
188
180
  )
@@ -195,7 +187,7 @@ async def build_community(
195
187
  length = len(summaries)
196
188
 
197
189
  summary = summaries[0]
198
- name = await generate_summary_description(llm_client, summary, ensure_ascii)
190
+ name = await generate_summary_description(llm_client, summary)
199
191
  now = utc_now()
200
192
  community_node = CommunityNode(
201
193
  name=name,
@@ -215,7 +207,6 @@ async def build_communities(
215
207
  driver: GraphDriver,
216
208
  llm_client: LLMClient,
217
209
  group_ids: list[str] | None,
218
- ensure_ascii: bool = True,
219
210
  ) -> tuple[list[CommunityNode], list[CommunityEdge]]:
220
211
  community_clusters = await get_community_clusters(driver, group_ids)
221
212
 
@@ -223,7 +214,7 @@ async def build_communities(
223
214
 
224
215
  async def limited_build_community(cluster):
225
216
  async with semaphore:
226
- return await build_community(llm_client, cluster, ensure_ascii)
217
+ return await build_community(llm_client, cluster)
227
218
 
228
219
  communities: list[tuple[CommunityNode, list[CommunityEdge]]] = list(
229
220
  await semaphore_gather(
@@ -312,17 +303,14 @@ async def update_community(
312
303
  llm_client: LLMClient,
313
304
  embedder: EmbedderClient,
314
305
  entity: EntityNode,
315
- ensure_ascii: bool = True,
316
306
  ) -> tuple[list[CommunityNode], list[CommunityEdge]]:
317
307
  community, is_new = await determine_entity_community(driver, entity)
318
308
 
319
309
  if community is None:
320
310
  return [], []
321
311
 
322
- new_summary = await summarize_pair(
323
- llm_client, (entity.summary, community.summary), ensure_ascii
324
- )
325
- new_name = await generate_summary_description(llm_client, new_summary, ensure_ascii)
312
+ new_summary = await summarize_pair(llm_client, (entity.summary, community.summary))
313
+ new_name = await generate_summary_description(llm_client, new_summary)
326
314
 
327
315
  community.summary = new_summary
328
316
  community.name = new_name
@@ -130,7 +130,6 @@ async def extract_edges(
130
130
  'reference_time': episode.valid_at,
131
131
  'edge_types': edge_types_context,
132
132
  'custom_prompt': '',
133
- 'ensure_ascii': clients.ensure_ascii,
134
133
  }
135
134
 
136
135
  facts_missed = True
@@ -178,15 +177,26 @@ async def extract_edges(
178
177
  valid_at_datetime = None
179
178
  invalid_at_datetime = None
180
179
 
180
+ # Filter out empty edges
181
+ if not edge_data.fact.strip():
182
+ continue
183
+
181
184
  source_node_idx = edge_data.source_entity_id
182
185
  target_node_idx = edge_data.target_entity_id
183
- if not (-1 < source_node_idx < len(nodes) and -1 < target_node_idx < len(nodes)):
186
+
187
+ if len(nodes) == 0:
188
+ logger.warning('No entities provided for edge extraction')
189
+ continue
190
+
191
+ if not (0 <= source_node_idx < len(nodes) and 0 <= target_node_idx < len(nodes)):
184
192
  logger.warning(
185
- f'WARNING: source or target node not filled {edge_data.relation_type}. source_node_uuid: {source_node_idx} and target_node_uuid: {target_node_idx} '
193
+ f'Invalid entity IDs in edge extraction for {edge_data.relation_type}. '
194
+ f'source_entity_id: {source_node_idx}, target_entity_id: {target_node_idx}, '
195
+ f'but only {len(nodes)} entities available (valid range: 0-{len(nodes) - 1})'
186
196
  )
187
197
  continue
188
198
  source_node_uuid = nodes[source_node_idx].uuid
189
- target_node_uuid = nodes[edge_data.target_entity_id].uuid
199
+ target_node_uuid = nodes[target_node_idx].uuid
190
200
 
191
201
  if valid_at:
192
202
  try:
@@ -358,7 +368,6 @@ async def resolve_extracted_edges(
358
368
  episode,
359
369
  extracted_edge_types,
360
370
  custom_type_names,
361
- clients.ensure_ascii,
362
371
  )
363
372
  for extracted_edge, related_edges, existing_edges, extracted_edge_types in zip(
364
373
  extracted_edges,
@@ -431,7 +440,6 @@ async def resolve_extracted_edge(
431
440
  episode: EpisodicNode,
432
441
  edge_type_candidates: dict[str, type[BaseModel]] | None = None,
433
442
  custom_edge_type_names: set[str] | None = None,
434
- ensure_ascii: bool = True,
435
443
  ) -> tuple[EntityEdge, list[EntityEdge], list[EntityEdge]]:
436
444
  """Resolve an extracted edge against existing graph context.
437
445
 
@@ -453,8 +461,6 @@ async def resolve_extracted_edge(
453
461
  Full catalog of registered custom edge names. Used to distinguish
454
462
  between disallowed custom types (which fall back to the default label)
455
463
  and ad-hoc labels emitted by the LLM.
456
- ensure_ascii : bool
457
- Whether prompt payloads should coerce ASCII output.
458
464
 
459
465
  Returns
460
466
  -------
@@ -480,20 +486,19 @@ async def resolve_extracted_edge(
480
486
  start = time()
481
487
 
482
488
  # Prepare context for LLM
483
- related_edges_context = [{'id': i, 'fact': edge.fact} for i, edge in enumerate(related_edges)]
489
+ related_edges_context = [{'idx': i, 'fact': edge.fact} for i, edge in enumerate(related_edges)]
484
490
 
485
491
  invalidation_edge_candidates_context = [
486
- {'id': i, 'fact': existing_edge.fact} for i, existing_edge in enumerate(existing_edges)
492
+ {'idx': i, 'fact': existing_edge.fact} for i, existing_edge in enumerate(existing_edges)
487
493
  ]
488
494
 
489
495
  edge_types_context = (
490
496
  [
491
497
  {
492
- 'fact_type_id': i,
493
498
  'fact_type_name': type_name,
494
499
  'fact_type_description': type_model.__doc__,
495
500
  }
496
- for i, (type_name, type_model) in enumerate(edge_type_candidates.items())
501
+ for type_name, type_model in edge_type_candidates.items()
497
502
  ]
498
503
  if edge_type_candidates is not None
499
504
  else []
@@ -504,9 +509,17 @@ async def resolve_extracted_edge(
504
509
  'new_edge': extracted_edge.fact,
505
510
  'edge_invalidation_candidates': invalidation_edge_candidates_context,
506
511
  'edge_types': edge_types_context,
507
- 'ensure_ascii': ensure_ascii,
508
512
  }
509
513
 
514
+ if related_edges or existing_edges:
515
+ logger.debug(
516
+ 'Resolving edge: sent %d EXISTING FACTS%s and %d INVALIDATION CANDIDATES%s',
517
+ len(related_edges),
518
+ f' (idx 0-{len(related_edges) - 1})' if related_edges else '',
519
+ len(existing_edges),
520
+ f' (idx 0-{len(existing_edges) - 1})' if existing_edges else '',
521
+ )
522
+
510
523
  llm_response = await llm_client.generate_response(
511
524
  prompt_library.dedupe_edges.resolve_edge(context),
512
525
  response_model=EdgeDuplicate,
@@ -515,6 +528,15 @@ async def resolve_extracted_edge(
515
528
  response_object = EdgeDuplicate(**llm_response)
516
529
  duplicate_facts = response_object.duplicate_facts
517
530
 
531
+ # Validate duplicate_facts are in valid range for EXISTING FACTS
532
+ invalid_duplicates = [i for i in duplicate_facts if i < 0 or i >= len(related_edges)]
533
+ if invalid_duplicates:
534
+ logger.warning(
535
+ 'LLM returned invalid duplicate_facts idx values %s (valid range: 0-%d for EXISTING FACTS)',
536
+ invalid_duplicates,
537
+ len(related_edges) - 1,
538
+ )
539
+
518
540
  duplicate_fact_ids: list[int] = [i for i in duplicate_facts if 0 <= i < len(related_edges)]
519
541
 
520
542
  resolved_edge = extracted_edge
@@ -527,6 +549,15 @@ async def resolve_extracted_edge(
527
549
 
528
550
  contradicted_facts: list[int] = response_object.contradicted_facts
529
551
 
552
+ # Validate contradicted_facts are in valid range for INVALIDATION CANDIDATES
553
+ invalid_contradictions = [i for i in contradicted_facts if i < 0 or i >= len(existing_edges)]
554
+ if invalid_contradictions:
555
+ logger.warning(
556
+ 'LLM returned invalid contradicted_facts idx values %s (valid range: 0-%d for INVALIDATION CANDIDATES)',
557
+ invalid_contradictions,
558
+ len(existing_edges) - 1,
559
+ )
560
+
530
561
  invalidation_candidates: list[EntityEdge] = [
531
562
  existing_edges[i] for i in contradicted_facts if 0 <= i < len(existing_edges)
532
563
  ]
@@ -548,7 +579,6 @@ async def resolve_extracted_edge(
548
579
  'episode_content': episode.content,
549
580
  'reference_time': episode.valid_at,
550
581
  'fact': resolved_edge.fact,
551
- 'ensure_ascii': ensure_ascii,
552
582
  }
553
583
 
554
584
  edge_model = edge_type_candidates.get(fact_type) if edge_type_candidates else None
@@ -64,14 +64,12 @@ async def extract_nodes_reflexion(
64
64
  episode: EpisodicNode,
65
65
  previous_episodes: list[EpisodicNode],
66
66
  node_names: list[str],
67
- ensure_ascii: bool = False,
68
67
  ) -> list[str]:
69
68
  # Prepare context for LLM
70
69
  context = {
71
70
  'episode_content': episode.content,
72
71
  'previous_episodes': [ep.content for ep in previous_episodes],
73
72
  'extracted_entities': node_names,
74
- 'ensure_ascii': ensure_ascii,
75
73
  }
76
74
 
77
75
  llm_response = await llm_client.generate_response(
@@ -124,7 +122,6 @@ async def extract_nodes(
124
122
  'custom_prompt': custom_prompt,
125
123
  'entity_types': entity_types_context,
126
124
  'source_description': episode.source_description,
127
- 'ensure_ascii': clients.ensure_ascii,
128
125
  }
129
126
 
130
127
  while entities_missed and reflexion_iterations <= MAX_REFLEXION_ITERATIONS:
@@ -155,7 +152,6 @@ async def extract_nodes(
155
152
  episode,
156
153
  previous_episodes,
157
154
  [entity.name for entity in extracted_entities],
158
- clients.ensure_ascii,
159
155
  )
160
156
 
161
157
  entities_missed = len(missing_entities) != 0
@@ -196,6 +192,7 @@ async def extract_nodes(
196
192
  logger.debug(f'Created new node: {new_node.name} (UUID: {new_node.uuid})')
197
193
 
198
194
  logger.debug(f'Extracted nodes: {[(n.name, n.uuid) for n in extracted_nodes]}')
195
+
199
196
  return extracted_nodes
200
197
 
201
198
 
@@ -239,7 +236,6 @@ async def _resolve_with_llm(
239
236
  extracted_nodes: list[EntityNode],
240
237
  indexes: DedupCandidateIndexes,
241
238
  state: DedupResolutionState,
242
- ensure_ascii: bool,
243
239
  episode: EpisodicNode | None,
244
240
  previous_episodes: list[EpisodicNode] | None,
245
241
  entity_types: dict[str, type[BaseModel]] | None,
@@ -309,7 +305,6 @@ async def _resolve_with_llm(
309
305
  'previous_episodes': (
310
306
  [ep.content for ep in previous_episodes] if previous_episodes is not None else []
311
307
  ),
312
- 'ensure_ascii': ensure_ascii,
313
308
  }
314
309
 
315
310
  llm_response = await llm_client.generate_response(
@@ -416,7 +411,6 @@ async def resolve_extracted_nodes(
416
411
  extracted_nodes,
417
412
  indexes,
418
413
  state,
419
- clients.ensure_ascii,
420
414
  episode,
421
415
  previous_episodes,
422
416
  entity_types,
@@ -465,7 +459,6 @@ async def extract_attributes_from_nodes(
465
459
  if entity_types is not None
466
460
  else None
467
461
  ),
468
- clients.ensure_ascii,
469
462
  should_summarize_node,
470
463
  )
471
464
  for node in nodes
@@ -483,7 +476,6 @@ async def extract_attributes_from_node(
483
476
  episode: EpisodicNode | None = None,
484
477
  previous_episodes: list[EpisodicNode] | None = None,
485
478
  entity_type: type[BaseModel] | None = None,
486
- ensure_ascii: bool = False,
487
479
  should_summarize_node: NodeSummaryFilter | None = None,
488
480
  ) -> EntityNode:
489
481
  node_context: dict[str, Any] = {
@@ -499,7 +491,6 @@ async def extract_attributes_from_node(
499
491
  'previous_episodes': (
500
492
  [ep.content for ep in previous_episodes] if previous_episodes is not None else []
501
493
  ),
502
- 'ensure_ascii': ensure_ascii,
503
494
  }
504
495
 
505
496
  summary_context: dict[str, Any] = {
@@ -508,7 +499,6 @@ async def extract_attributes_from_node(
508
499
  'previous_episodes': (
509
500
  [ep.content for ep in previous_episodes] if previous_episodes is not None else []
510
501
  ),
511
- 'ensure_ascii': ensure_ascii,
512
502
  }
513
503
 
514
504
  has_entity_attributes: bool = bool(
@@ -35,14 +35,12 @@ async def extract_edge_dates(
35
35
  edge: EntityEdge,
36
36
  current_episode: EpisodicNode,
37
37
  previous_episodes: list[EpisodicNode],
38
- ensure_ascii: bool = False,
39
38
  ) -> tuple[datetime | None, datetime | None]:
40
39
  context = {
41
40
  'edge_fact': edge.fact,
42
41
  'current_episode': current_episode.content,
43
42
  'previous_episodes': [ep.content for ep in previous_episodes],
44
43
  'reference_timestamp': current_episode.valid_at.isoformat(),
45
- 'ensure_ascii': ensure_ascii,
46
44
  }
47
45
  llm_response = await llm_client.generate_response(
48
46
  prompt_library.extract_edge_dates.v1(context), response_model=EdgeDates
@@ -75,7 +73,6 @@ async def get_edge_contradictions(
75
73
  llm_client: LLMClient,
76
74
  new_edge: EntityEdge,
77
75
  existing_edges: list[EntityEdge],
78
- ensure_ascii: bool = False,
79
76
  ) -> list[EntityEdge]:
80
77
  start = time()
81
78
 
@@ -87,7 +84,6 @@ async def get_edge_contradictions(
87
84
  context = {
88
85
  'new_edge': new_edge_context,
89
86
  'existing_edges': existing_edge_context,
90
- 'ensure_ascii': ensure_ascii,
91
87
  }
92
88
 
93
89
  llm_response = await llm_client.generate_response(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: graphiti-core
3
- Version: 0.21.0rc11
3
+ Version: 0.21.0rc13
4
4
  Summary: A temporal graph building library
5
5
  Project-URL: Homepage, https://help.getzep.com/graphiti/graphiti/overview
6
6
  Project-URL: Repository, https://github.com/getzep/graphiti
@@ -2,8 +2,8 @@ graphiti_core/__init__.py,sha256=e5SWFkRiaUwfprYIeIgVIh7JDedNiloZvd3roU-0aDY,55
2
2
  graphiti_core/edges.py,sha256=2jA3x-9AGTldB52B5rWUhDtXXsj4PWM-MO1msIPsbdI,21048
3
3
  graphiti_core/errors.py,sha256=cH_v9TPgEPeQE6GFOHIg5TvejpUCBddGarMY2Whxbwc,2707
4
4
  graphiti_core/graph_queries.py,sha256=ZWMqAo5pwb8PO5ddg4zZ0ArhHWuWV42g3R9ULIxsHOs,8058
5
- graphiti_core/graphiti.py,sha256=5Y3SdcC_Ebhp-oqbbIxb0KGshWU24EQx4YYKvK8Id8g,41935
6
- graphiti_core/graphiti_types.py,sha256=C_p2XwScQlCzo7ets097TrSLs9ATxPZQ4WCsxDS7QHc,1066
5
+ graphiti_core/graphiti.py,sha256=msSHl27-N_P9QAMY-pOBiKjOP6eyJGZzrKIBu6gRZpw,41371
6
+ graphiti_core/graphiti_types.py,sha256=rL-9bvnLobunJfXU4hkD6mAj14pofKp_wq8QsFDZwDU,1035
7
7
  graphiti_core/helpers.py,sha256=q8kbL9gz8igdlh-oMUS-ylUyeMlXZb-ccf-HQkrES_0,5184
8
8
  graphiti_core/nodes.py,sha256=ox7uDYpaayc5J_mrbMaP-d-jACFx9R7Fb14tvh9aRI8,30426
9
9
  graphiti_core/py.typed,sha256=vlmmzQOt7bmeQl9L3XJP4W6Ry0iiELepnOrinKz5KQg,79
@@ -43,39 +43,39 @@ graphiti_core/models/edges/edge_db_queries.py,sha256=NWmcWkKyXLY1l81PtcTmv68SrT4
43
43
  graphiti_core/models/nodes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
44
  graphiti_core/models/nodes/node_db_queries.py,sha256=TCHZKG5bQNarV9C5k4hOFFqc-LwTVQ8Pnd6okVVNKbo,12826
45
45
  graphiti_core/prompts/__init__.py,sha256=EA-x9xUki9l8wnu2l8ek_oNf75-do5tq5hVq7Zbv8Kw,101
46
- graphiti_core/prompts/dedupe_edges.py,sha256=WRXQi7JQZdIfKDICWyU7Wbs5WyD_KBblLBSeKdbLyuk,5914
47
- graphiti_core/prompts/dedupe_nodes.py,sha256=42txTc4QrXQDqM2XE8_USAB9vfsWUgvwS1no_EJNcbA,9320
48
- graphiti_core/prompts/eval.py,sha256=ijwxbE87G678imdhfPvRujepQMq_JZ3XHX4vOAcVnVI,5507
46
+ graphiti_core/prompts/dedupe_edges.py,sha256=Zf2Ry5ojOe8dNOY3-YzptBqZ07FfvabdpaNa983UMjM,6237
47
+ graphiti_core/prompts/dedupe_nodes.py,sha256=YNNo19Cq8koLVoLCafpjYJOy5nmRZ-tEWhvIcu39r-Q,8932
48
+ graphiti_core/prompts/eval.py,sha256=GWFkfZoPfY8U7mV8Ngd_5a2S2fHS7KjajChntxv1UEY,5360
49
49
  graphiti_core/prompts/extract_edge_dates.py,sha256=3Drs3CmvP0gJN5BidWSxrNvLet3HPoTybU3BUIAoc0Y,4218
50
- graphiti_core/prompts/extract_edges.py,sha256=S115_KnenGJLjmVMzdarXBRj2wJ6y553UfYJgUTTDZI,6920
51
- graphiti_core/prompts/extract_nodes.py,sha256=Ksf3PRBZ8LoZ5bOStZVRlHvVrdh3rT3xsepL81Ewy3M,11617
50
+ graphiti_core/prompts/extract_edges.py,sha256=-yOIvCPwxIAXeqYpNCzouE6i3WfdsexzRXFmcXpQpAg,7113
51
+ graphiti_core/prompts/extract_nodes.py,sha256=orbZiCqVL-4RNlckpUaQIq7Yb2JsIwT8e3ZAHRAEiLM,11281
52
52
  graphiti_core/prompts/invalidate_edges.py,sha256=yfpcs_pyctnoM77ULPZXEtKW0oHr1MeLsJzC5yrE-o4,3547
53
53
  graphiti_core/prompts/lib.py,sha256=DCyHePM4_q-CptTpEXGO_dBv9k7xDtclEaB1dGu7EcI,4092
54
54
  graphiti_core/prompts/models.py,sha256=NgxdbPHJpBEcpbXovKyScgpBc73Q-GIW-CBDlBtDjto,894
55
- graphiti_core/prompts/prompt_helpers.py,sha256=gMDDNqBpxcnTO9psJQm7QU7M6OQgRumFq4oGYiycrfM,795
56
- graphiti_core/prompts/summarize_nodes.py,sha256=tn6LPEv_nNFLjKuT_FB_st7TAIYOEUOg9QR5YG7PpMA,4437
55
+ graphiti_core/prompts/prompt_helpers.py,sha256=dpWbB8IYAqAZoU5qBx896jozKiQJTng4dGzWewZ_s4c,814
56
+ graphiti_core/prompts/summarize_nodes.py,sha256=p_TNDG66uY71QNDo9hyk4crAfyzyEKlb4_lML3fxeWU,4197
57
57
  graphiti_core/search/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
58
  graphiti_core/search/search.py,sha256=2kj7fybSFv6Fnf_cfEUhJhrpfzNtmkPPZ0hV3BQCDqg,18387
59
59
  graphiti_core/search/search_config.py,sha256=v_rUHsu1yo5OuPfEm21lSuXexQs-o8qYwSSemW2QWhU,4165
60
60
  graphiti_core/search/search_config_recipes.py,sha256=4GquRphHhJlpXQhAZOySYnCzBWYoTwxlJj44eTOavZQ,7443
61
61
  graphiti_core/search/search_filters.py,sha256=DOAmYkc6A0z20EZId5fJZj1RvLz4WeQcoPANk9k-Sh8,10304
62
- graphiti_core/search/search_helpers.py,sha256=wj3ARlCNnZixNNntgCdAqzGoE4de4lW3r4rSG-3WyGw,2877
62
+ graphiti_core/search/search_helpers.py,sha256=o-t6JKNOvgUgyPG-grPbQGsSlUDxzsUOSB7NO1nTlIs,2735
63
63
  graphiti_core/search/search_utils.py,sha256=ak1aBeKNuxS7szydNHwva2ABWSRlQ0S_v8ZOx7k0wc4,76958
64
64
  graphiti_core/telemetry/__init__.py,sha256=5kALLDlU9bb2v19CdN7qVANsJWyfnL9E60J6FFgzm3o,226
65
65
  graphiti_core/telemetry/telemetry.py,sha256=47LrzOVBCcZxsYPsnSxWFiztHoxYKKxPwyRX0hnbDGc,3230
66
66
  graphiti_core/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
67
- graphiti_core/utils/bulk_utils.py,sha256=7qjoqRxY9dFrj16iBMPMtkilxUH26dRuX6NaDaqL82E,20346
67
+ graphiti_core/utils/bulk_utils.py,sha256=YpVs5olzrAWVd8pIQ8xi1Ql_IsPdbVSahV1JPuwmG4o,20308
68
68
  graphiti_core/utils/datetime_utils.py,sha256=J-zYSq7-H-2n9hYOXNIun12kM10vNX9mMATGR_egTmY,1806
69
69
  graphiti_core/utils/maintenance/__init__.py,sha256=vW4H1KyapTl-OOz578uZABYcpND4wPx3Vt6aAPaXh78,301
70
- graphiti_core/utils/maintenance/community_operations.py,sha256=XMiokEemn96GlvjkOvbo9hIX04Fea3eVj408NHG5P4o,11042
70
+ graphiti_core/utils/maintenance/community_operations.py,sha256=3IMxfOacZAYtZKebyYtWJYNZPLOPlS8Il-lzitEkoos,10681
71
71
  graphiti_core/utils/maintenance/dedup_helpers.py,sha256=B7k6KkB6Sii8PZCWNNTvsNiy4BNTNWpoLeGgrPLq6BE,9220
72
- graphiti_core/utils/maintenance/edge_operations.py,sha256=p16cLA2eJeIYS9W0o1i8wYtvUpjt9mGWzRXVemAr7Bk,25305
72
+ graphiti_core/utils/maintenance/edge_operations.py,sha256=_vgUUIE8bOX3fIT9MeBwPRi21KWgDSlFGG59m_zThKw,26498
73
73
  graphiti_core/utils/maintenance/graph_data_operations.py,sha256=42icj3S_ELAJ-NK3jVS_rg_243dmnaZOyUitJj_uJ-M,6085
74
- graphiti_core/utils/maintenance/node_operations.py,sha256=gzIE32D3vCY0RorKwCz6wI9xS95BaiJoO1WyKzuDDKk,19014
75
- graphiti_core/utils/maintenance/temporal_operations.py,sha256=IIaVtShpVkOYe6haxz3a1x3v54-MzaEXG8VsxFUNeoY,3582
74
+ graphiti_core/utils/maintenance/node_operations.py,sha256=3Km0uimYxU2pcaT8a0jm4xTP01zR8bp2z8ZiFItRwr0,18623
75
+ graphiti_core/utils/maintenance/temporal_operations.py,sha256=wq1I4kqeIoswit6sPohug91FEwrGaVnJ06g1vkJjSLY,3442
76
76
  graphiti_core/utils/maintenance/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
77
  graphiti_core/utils/ontology_utils/entity_types_utils.py,sha256=4eVgxLWY6Q8k9cRJ5pW59IYF--U4nXZsZIGOVb_yHfQ,1285
78
- graphiti_core-0.21.0rc11.dist-info/METADATA,sha256=LQry-ruHgk3SEGYON167542JVQ-udutxUGRpA7FbRYc,27085
79
- graphiti_core-0.21.0rc11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
80
- graphiti_core-0.21.0rc11.dist-info/licenses/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
81
- graphiti_core-0.21.0rc11.dist-info/RECORD,,
78
+ graphiti_core-0.21.0rc13.dist-info/METADATA,sha256=DpoXBL7QKCncx28h4rgzZk76DOmqDXobyUlywI593Aw,27085
79
+ graphiti_core-0.21.0rc13.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
80
+ graphiti_core-0.21.0rc13.dist-info/licenses/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
81
+ graphiti_core-0.21.0rc13.dist-info/RECORD,,