graphiti-core 0.10.1__py3-none-any.whl → 0.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

@@ -16,64 +16,317 @@ limitations under the License.
16
16
 
17
17
  import json
18
18
  import logging
19
+ import os
19
20
  import typing
21
+ from json import JSONDecodeError
22
+ from typing import Literal
20
23
 
21
24
  import anthropic
22
25
  from anthropic import AsyncAnthropic
23
- from pydantic import BaseModel
26
+ from anthropic.types import MessageParam, ToolChoiceParam, ToolUnionParam
27
+ from pydantic import BaseModel, ValidationError
24
28
 
25
29
  from ..prompts.models import Message
26
30
  from .client import LLMClient
27
- from .config import LLMConfig
28
- from .errors import RateLimitError
31
+ from .config import DEFAULT_MAX_TOKENS, LLMConfig
32
+ from .errors import RateLimitError, RefusalError
29
33
 
30
34
  logger = logging.getLogger(__name__)
31
35
 
32
- DEFAULT_MODEL = 'claude-3-7-sonnet-latest'
33
- DEFAULT_MAX_TOKENS = 8192
36
+ AnthropicModel = Literal[
37
+ 'claude-3-7-sonnet-latest',
38
+ 'claude-3-7-sonnet-20250219',
39
+ 'claude-3-5-haiku-latest',
40
+ 'claude-3-5-haiku-20241022',
41
+ 'claude-3-5-sonnet-latest',
42
+ 'claude-3-5-sonnet-20241022',
43
+ 'claude-3-5-sonnet-20240620',
44
+ 'claude-3-opus-latest',
45
+ 'claude-3-opus-20240229',
46
+ 'claude-3-sonnet-20240229',
47
+ 'claude-3-haiku-20240307',
48
+ 'claude-2.1',
49
+ 'claude-2.0',
50
+ ]
51
+
52
+ DEFAULT_MODEL: AnthropicModel = 'claude-3-7-sonnet-latest'
34
53
 
35
54
 
36
55
  class AnthropicClient(LLMClient):
37
- def __init__(self, config: LLMConfig | None = None, cache: bool = False):
56
+ """
57
+ A client for the Anthropic LLM.
58
+
59
+ Args:
60
+ config: A configuration object for the LLM.
61
+ cache: Whether to cache the LLM responses.
62
+ client: An optional client instance to use.
63
+ max_tokens: The maximum number of tokens to generate.
64
+
65
+ Methods:
66
+ generate_response: Generate a response from the LLM.
67
+
68
+ Notes:
69
+ - If a LLMConfig is not provided, api_key will be pulled from the ANTHROPIC_API_KEY environment
70
+ variable, and all default values will be used for the LLMConfig.
71
+
72
+ """
73
+
74
+ model: AnthropicModel
75
+
76
+ def __init__(
77
+ self,
78
+ config: LLMConfig | None = None,
79
+ cache: bool = False,
80
+ client: AsyncAnthropic | None = None,
81
+ max_tokens: int = DEFAULT_MAX_TOKENS,
82
+ ) -> None:
38
83
  if config is None:
39
- config = LLMConfig(max_tokens=DEFAULT_MAX_TOKENS)
40
- elif config.max_tokens is None:
41
- config.max_tokens = DEFAULT_MAX_TOKENS
84
+ config = LLMConfig()
85
+ config.api_key = os.getenv('ANTHROPIC_API_KEY')
86
+ config.max_tokens = max_tokens
87
+
88
+ if config.model is None:
89
+ config.model = DEFAULT_MODEL
90
+
42
91
  super().__init__(config, cache)
92
+ # Explicitly set the instance model to the config model to prevent type checking errors
93
+ self.model = typing.cast(AnthropicModel, config.model)
43
94
 
44
- self.client = AsyncAnthropic(
45
- api_key=config.api_key,
46
- # we'll use tenacity to retry
47
- max_retries=1,
48
- )
95
+ if not client:
96
+ self.client = AsyncAnthropic(
97
+ api_key=config.api_key,
98
+ max_retries=1,
99
+ )
100
+ else:
101
+ self.client = client
102
+
103
+ def _extract_json_from_text(self, text: str) -> dict[str, typing.Any]:
104
+ """Extract JSON from text content.
105
+
106
+ A helper method to extract JSON from text content, used when tool use fails or
107
+ no response_model is provided.
108
+
109
+ Args:
110
+ text: The text to extract JSON from
111
+
112
+ Returns:
113
+ Extracted JSON as a dictionary
114
+
115
+ Raises:
116
+ ValueError: If JSON cannot be extracted or parsed
117
+ """
118
+ try:
119
+ json_start = text.find('{')
120
+ json_end = text.rfind('}') + 1
121
+ if json_start >= 0 and json_end > json_start:
122
+ json_str = text[json_start:json_end]
123
+ return json.loads(json_str)
124
+ else:
125
+ raise ValueError(f'Could not extract JSON from model response: {text}')
126
+ except (JSONDecodeError, ValueError) as e:
127
+ raise ValueError(f'Could not extract JSON from model response: {text}') from e
128
+
129
+ def _create_tool(
130
+ self, response_model: type[BaseModel] | None = None
131
+ ) -> tuple[list[ToolUnionParam], ToolChoiceParam]:
132
+ """
133
+ Create a tool definition based on the response_model if provided, or a generic JSON tool if not.
134
+
135
+ Args:
136
+ response_model: Optional Pydantic model to use for structured output.
137
+
138
+ Returns:
139
+ A list containing a single tool definition for use with the Anthropic API.
140
+ """
141
+ if response_model is not None:
142
+ # temporary debug log
143
+ logger.info(f'Creating tool for response_model: {response_model}')
144
+ # Use the response_model to define the tool
145
+ model_schema = response_model.model_json_schema()
146
+ tool_name = response_model.__name__
147
+ description = model_schema.get('description', f'Extract {tool_name} information')
148
+ else:
149
+ # temporary debug log
150
+ logger.info('Creating generic JSON output tool')
151
+ # Create a generic JSON output tool
152
+ tool_name = 'generic_json_output'
153
+ description = 'Output data in JSON format'
154
+ model_schema = {
155
+ 'type': 'object',
156
+ 'additionalProperties': True,
157
+ 'description': 'Any JSON object containing the requested information',
158
+ }
159
+
160
+ tool = {
161
+ 'name': tool_name,
162
+ 'description': description,
163
+ 'input_schema': model_schema,
164
+ }
165
+ tool_list = [tool]
166
+ tool_list_cast = typing.cast(list[ToolUnionParam], tool_list)
167
+ tool_choice = {'type': 'tool', 'name': tool_name}
168
+ tool_choice_cast = typing.cast(ToolChoiceParam, tool_choice)
169
+ return tool_list_cast, tool_choice_cast
49
170
 
50
171
  async def _generate_response(
51
172
  self,
52
173
  messages: list[Message],
53
174
  response_model: type[BaseModel] | None = None,
54
- max_tokens: int = DEFAULT_MAX_TOKENS,
175
+ max_tokens: int | None = None,
55
176
  ) -> dict[str, typing.Any]:
177
+ """
178
+ Generate a response from the Anthropic LLM using tool-based approach for all requests.
179
+
180
+ Args:
181
+ messages: List of message objects to send to the LLM.
182
+ response_model: Optional Pydantic model to use for structured output.
183
+ max_tokens: Maximum number of tokens to generate.
184
+
185
+ Returns:
186
+ Dictionary containing the structured response from the LLM.
187
+
188
+ Raises:
189
+ RateLimitError: If the rate limit is exceeded.
190
+ RefusalError: If the LLM refuses to respond.
191
+ Exception: If an error occurs during the generation process.
192
+ """
56
193
  system_message = messages[0]
57
- user_messages = [{'role': m.role, 'content': m.content} for m in messages[1:]] + [
58
- {'role': 'assistant', 'content': '{'}
59
- ]
194
+ user_messages = [{'role': m.role, 'content': m.content} for m in messages[1:]]
195
+ user_messages_cast = typing.cast(list[MessageParam], user_messages)
60
196
 
61
- # Ensure max_tokens is not greater than config.max_tokens or DEFAULT_MAX_TOKENS
62
- max_tokens = min(max_tokens, self.config.max_tokens, DEFAULT_MAX_TOKENS)
197
+ # TODO: Replace hacky min finding solution after fixing hardcoded EXTRACT_EDGES_MAX_TOKENS = 16384 in
198
+ # edge_operations.py. Throws errors with cheaper models that lower max_tokens.
199
+ max_creation_tokens: int = min(
200
+ max_tokens if max_tokens is not None else self.config.max_tokens,
201
+ DEFAULT_MAX_TOKENS,
202
+ )
63
203
 
64
204
  try:
205
+ # Create the appropriate tool based on whether response_model is provided
206
+ tools, tool_choice = self._create_tool(response_model)
207
+ # temporary debug log
208
+ logger.info(f'using model: {self.model} with max_tokens: {self.max_tokens}')
65
209
  result = await self.client.messages.create(
66
- system='Only include JSON in the response. Do not include any additional text or explanation of the content.\n'
67
- + system_message.content,
68
- max_tokens=max_tokens,
210
+ system=system_message.content,
211
+ max_tokens=max_creation_tokens,
69
212
  temperature=self.temperature,
70
- messages=user_messages, # type: ignore
71
- model=self.model or DEFAULT_MODEL,
213
+ messages=user_messages_cast,
214
+ model=self.model,
215
+ tools=tools,
216
+ tool_choice=tool_choice,
217
+ )
218
+
219
+ # Extract the tool output from the response
220
+ for content_item in result.content:
221
+ if content_item.type == 'tool_use':
222
+ if isinstance(content_item.input, dict):
223
+ tool_args: dict[str, typing.Any] = content_item.input
224
+ else:
225
+ tool_args = json.loads(str(content_item.input))
226
+ return tool_args
227
+
228
+ # If we didn't get a proper tool_use response, try to extract from text
229
+ # logger.debug(
230
+ # f'Did not get a tool_use response, trying to extract json from text. Result: {result.content}'
231
+ # )
232
+ # temporary debug log
233
+ logger.info(
234
+ f'Did not get a tool_use response, trying to extract json from text. Result: {result.content}'
235
+ )
236
+ for content_item in result.content:
237
+ if content_item.type == 'text':
238
+ return self._extract_json_from_text(content_item.text)
239
+ else:
240
+ raise ValueError(
241
+ f'Could not extract structured data from model response: {result.content}'
242
+ )
243
+
244
+ # If we get here, we couldn't parse a structured response
245
+ raise ValueError(
246
+ f'Could not extract structured data from model response: {result.content}'
72
247
  )
73
248
 
74
- return json.loads('{' + result.content[0].text) # type: ignore
75
249
  except anthropic.RateLimitError as e:
76
- raise RateLimitError from e
250
+ raise RateLimitError(f'Rate limit exceeded. Please try again later. Error: {e}') from e
251
+ except anthropic.APIError as e:
252
+ # Special case for content policy violations. We convert these to RefusalError
253
+ # to bypass the retry mechanism, as retrying policy-violating content will always fail.
254
+ # This avoids wasting API calls and provides more specific error messaging to the user.
255
+ if 'refused to respond' in str(e).lower():
256
+ raise RefusalError(str(e)) from e
257
+ raise e
77
258
  except Exception as e:
78
- logger.error(f'Error in generating LLM response: {e}')
79
- raise
259
+ raise e
260
+
261
+ async def generate_response(
262
+ self,
263
+ messages: list[Message],
264
+ response_model: type[BaseModel] | None = None,
265
+ max_tokens: int = DEFAULT_MAX_TOKENS,
266
+ ) -> dict[str, typing.Any]:
267
+ """
268
+ Generate a response from the LLM.
269
+
270
+ Args:
271
+ messages: List of message objects to send to the LLM.
272
+ response_model: Optional Pydantic model to use for structured output.
273
+ max_tokens: Maximum number of tokens to generate.
274
+
275
+ Returns:
276
+ Dictionary containing the structured response from the LLM.
277
+
278
+ Raises:
279
+ RateLimitError: If the rate limit is exceeded.
280
+ RefusalError: If the LLM refuses to respond.
281
+ Exception: If an error occurs during the generation process.
282
+ """
283
+ retry_count = 0
284
+ max_retries = 2
285
+ last_error: Exception | None = None
286
+
287
+ while retry_count <= max_retries:
288
+ try:
289
+ response = await self._generate_response(messages, response_model, max_tokens)
290
+
291
+ # If we have a response_model, attempt to validate the response
292
+ if response_model is not None:
293
+ # Validate the response against the response_model
294
+ model_instance = response_model(**response)
295
+ return model_instance.model_dump()
296
+
297
+ # If no validation needed, return the response
298
+ return response
299
+
300
+ except (RateLimitError, RefusalError):
301
+ # These errors should not trigger retries
302
+ raise
303
+ except Exception as e:
304
+ last_error = e
305
+
306
+ if retry_count >= max_retries:
307
+ if isinstance(e, ValidationError):
308
+ logger.error(
309
+ f'Validation error after {retry_count}/{max_retries} attempts: {e}'
310
+ )
311
+ else:
312
+ logger.error(f'Max retries ({max_retries}) exceeded. Last error: {e}')
313
+ raise e
314
+
315
+ if isinstance(e, ValidationError):
316
+ response_model_cast = typing.cast(type[BaseModel], response_model)
317
+ error_context = f'The previous response was invalid. Please provide a valid {response_model_cast.__name__} object. Error: {e}'
318
+ else:
319
+ error_context = (
320
+ f'The previous response attempt was invalid. '
321
+ f'Error type: {e.__class__.__name__}. '
322
+ f'Error details: {str(e)}. '
323
+ f'Please try again with a valid response.'
324
+ )
325
+
326
+ # Common retry logic
327
+ retry_count += 1
328
+ messages.append(Message(role='user', content=error_context))
329
+ logger.warning(f'Retrying after error (attempt {retry_count}/{max_retries}): {e}')
330
+
331
+ # If we somehow get here, raise the last error
332
+ raise last_error or Exception('Max retries exceeded with no specific error')
@@ -29,3 +29,11 @@ class RefusalError(Exception):
29
29
  def __init__(self, message: str):
30
30
  self.message = message
31
31
  super().__init__(self.message)
32
+
33
+
34
+ class EmptyResponseError(Exception):
35
+ """Exception raised when the LLM returns an empty response."""
36
+
37
+ def __init__(self, message: str):
38
+ self.message = message
39
+ super().__init__(self.message)
@@ -89,6 +89,7 @@ def summarize_context(context: dict[str, Any]) -> list[Message]:
89
89
 
90
90
  Guidelines:
91
91
  1. Do not hallucinate entity property values if they cannot be found in the current context.
92
+ 2. Only use the provided messages, entity, and entity context to set attribute values.
92
93
 
93
94
  <ENTITY>
94
95
  {context['node_name']}
@@ -210,7 +210,6 @@ async def edge_search(
210
210
  query_vector,
211
211
  search_result_uuids_and_vectors,
212
212
  config.mmr_lambda,
213
- min_score=reranker_min_score,
214
213
  )
215
214
  elif config.reranker == EdgeReranker.cross_encoder:
216
215
  search_result_uuids = [[edge.uuid for edge in result] for result in search_results]
@@ -310,7 +309,6 @@ async def node_search(
310
309
  query_vector,
311
310
  search_result_uuids_and_vectors,
312
311
  config.mmr_lambda,
313
- min_score=reranker_min_score,
314
312
  )
315
313
  elif config.reranker == NodeReranker.cross_encoder:
316
314
  # use rrf as a preliminary reranker
@@ -437,7 +435,6 @@ async def community_search(
437
435
  query_vector,
438
436
  search_result_uuids_and_vectors,
439
437
  config.mmr_lambda,
440
- min_score=reranker_min_score,
441
438
  )
442
439
  elif config.reranker == CommunityReranker.cross_encoder:
443
440
  summary_to_uuid_map = {
@@ -230,8 +230,8 @@ async def edge_similarity_search(
230
230
 
231
231
  query: LiteralString = (
232
232
  """
233
- MATCH (n:Entity)-[r:RELATES_TO]->(m:Entity)
234
- """
233
+ MATCH (n:Entity)-[r:RELATES_TO]->(m:Entity)
234
+ """
235
235
  + group_filter_query
236
236
  + filter_query
237
237
  + """\nWITH DISTINCT r, vector.similarity.cosine(r.fact_embedding, $search_vector) AS score
@@ -852,7 +852,6 @@ def maximal_marginal_relevance(
852
852
  query_vector: list[float],
853
853
  candidates: list[tuple[str, list[float]]],
854
854
  mmr_lambda: float = DEFAULT_MMR_LAMBDA,
855
- min_score: float = 0,
856
855
  ):
857
856
  candidates_with_mmr: list[tuple[str, float]] = []
858
857
  for candidate in candidates:
@@ -862,6 +861,4 @@ def maximal_marginal_relevance(
862
861
 
863
862
  candidates_with_mmr.sort(reverse=True, key=lambda c: c[1])
864
863
 
865
- return list(
866
- set([candidate[0] for candidate in candidates_with_mmr if candidate[1] >= min_score])
867
- )
864
+ return list(set([candidate[0] for candidate in candidates_with_mmr]))
@@ -17,6 +17,7 @@ limitations under the License.
17
17
  import logging
18
18
  from contextlib import suppress
19
19
  from time import time
20
+ from typing import Any
20
21
 
21
22
  import pydantic
22
23
  from pydantic import BaseModel
@@ -324,16 +325,17 @@ async def resolve_extracted_node(
324
325
  else [],
325
326
  }
326
327
 
327
- summary_context = {
328
+ summary_context: dict[str, Any] = {
328
329
  'node_name': extracted_node.name,
329
330
  'node_summary': extracted_node.summary,
330
331
  'episode_content': episode.content if episode is not None else '',
331
332
  'previous_episodes': [ep.content for ep in previous_episodes]
332
333
  if previous_episodes is not None
333
334
  else [],
334
- 'attributes': [],
335
335
  }
336
336
 
337
+ attributes: list[dict[str, str]] = []
338
+
337
339
  entity_type_classes: tuple[BaseModel, ...] = tuple()
338
340
  if entity_types is not None: # type: ignore
339
341
  entity_type_classes = entity_type_classes + tuple(
@@ -344,8 +346,15 @@ async def resolve_extracted_node(
344
346
  )
345
347
 
346
348
  for entity_type in entity_type_classes:
347
- for field_name in entity_type.model_fields:
348
- summary_context.get('attributes', []).append(field_name) # type: ignore
349
+ for field_name, field_info in entity_type.model_fields.items():
350
+ attributes.append(
351
+ {
352
+ 'attribute_name': field_name,
353
+ 'attribute_description': field_info.description or '',
354
+ }
355
+ )
356
+
357
+ summary_context['attributes'] = attributes
349
358
 
350
359
  entity_attributes_model = pydantic.create_model( # type: ignore
351
360
  'EntityAttributes',
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: graphiti-core
3
- Version: 0.10.1
3
+ Version: 0.10.3
4
4
  Summary: A temporal graph building library
5
5
  License: Apache-2.0
6
6
  Author: Paul Paliychuk
@@ -13,10 +13,10 @@ graphiti_core/errors.py,sha256=Nib1uQx2cO_VOizupmRjpFfmuRg-hFAVqTtZAuBehR8,2405
13
13
  graphiti_core/graphiti.py,sha256=mdIkciD4o3fjD5PapshKvNv0F6eE76rXZCAB00vcDG8,30807
14
14
  graphiti_core/helpers.py,sha256=7BQzUBFmoBDA2OIDdFtoN4W-vXOhPRIsF0uDb7PsNi0,2913
15
15
  graphiti_core/llm_client/__init__.py,sha256=PA80TSMeX-sUXITXEAxMDEt3gtfZgcJrGJUcyds1mSo,207
16
- graphiti_core/llm_client/anthropic_client.py,sha256=dTM8rKhk9TZAU4O-0jFMivOwJvWM-gHpp5gLmuJHiGQ,2723
16
+ graphiti_core/llm_client/anthropic_client.py,sha256=qVLtRdlYksdl221lIBv7saOmuJtgG5p3af0HUsVxseM,12926
17
17
  graphiti_core/llm_client/client.py,sha256=jLyrn--opI6_ekCgmCaHLvC9XRhU25HYeoKVIivPHgQ,5507
18
18
  graphiti_core/llm_client/config.py,sha256=JO-biZwGwakTKGKyNQQqt9fjqyXxtqP-h0sB4wsJ2Kk,2339
19
- graphiti_core/llm_client/errors.py,sha256=Vk0mj2SgNDg8E8p7m1UyUaerqLPNLCDKPVsMEnOSBdQ,1028
19
+ graphiti_core/llm_client/errors.py,sha256=pn6brRiLW60DAUIXJYKBT6MInrS4ueuH1hNLbn_JbQo,1243
20
20
  graphiti_core/llm_client/gemini_client.py,sha256=uibmwppDgkEv60FsIhS-oakuafTUFgpu3qr5Kdcbhz4,7321
21
21
  graphiti_core/llm_client/groq_client.py,sha256=EesX0_iFOIvvIc3ql6Xa8EOm0dbGJ_o3VpqyDM83mKg,2498
22
22
  graphiti_core/llm_client/openai_client.py,sha256=ZqkBa127i5gCIA2UoiXuPDM-357xFK_uwKlgafDhVyQ,6989
@@ -39,15 +39,15 @@ graphiti_core/prompts/invalidate_edges.py,sha256=DV2mEyIhhjc0hdKEMFLQMeG0FiUCkv_
39
39
  graphiti_core/prompts/lib.py,sha256=DCyHePM4_q-CptTpEXGO_dBv9k7xDtclEaB1dGu7EcI,4092
40
40
  graphiti_core/prompts/models.py,sha256=NgxdbPHJpBEcpbXovKyScgpBc73Q-GIW-CBDlBtDjto,894
41
41
  graphiti_core/prompts/prompt_helpers.py,sha256=-9TABwIcIQUVHcNANx6wIZd-FT2DgYKyGTfx4IGYq2I,64
42
- graphiti_core/prompts/summarize_nodes.py,sha256=PeA1Taov5KBNNBKgrCPeF1tLg4_SMgT-Ilz2P6xbx-M,4051
42
+ graphiti_core/prompts/summarize_nodes.py,sha256=CDXeWCbv34BcHn6I9lkYN-YqQv98bSt0cOZMnVho2Lk,4146
43
43
  graphiti_core/py.typed,sha256=vlmmzQOt7bmeQl9L3XJP4W6Ry0iiELepnOrinKz5KQg,79
44
44
  graphiti_core/search/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
- graphiti_core/search/search.py,sha256=3D460AUKNn6llVkTghpautP925VDKwaQXXutntZTvRg,15745
45
+ graphiti_core/search/search.py,sha256=zI_KVSyu13zdiT0OyIYhownXJMTzQJSXqyCYt4o07Hk,15619
46
46
  graphiti_core/search/search_config.py,sha256=VvKg6AB_RPhoe56DBBXHRBXHThAVJ_OLFCyq_yKof-A,3765
47
47
  graphiti_core/search/search_config_recipes.py,sha256=4GquRphHhJlpXQhAZOySYnCzBWYoTwxlJj44eTOavZQ,7443
48
48
  graphiti_core/search/search_filters.py,sha256=JkP7NbM4Dor27dne5vAuxbJic12dIJDtWJxNqmVuRec,5884
49
49
  graphiti_core/search/search_helpers.py,sha256=G5Ceaq5Pfgx0Weelqgeylp_pUHwiBnINaUYsDbURJbE,2636
50
- graphiti_core/search/search_utils.py,sha256=_cX57B4azdeqHGpZidRj8t5jLFWMb1cOrRW95AKdlqw,27431
50
+ graphiti_core/search/search_utils.py,sha256=rravGcYaWzAYMyeNw8jUztpND4jr7k9M_1H06KJiig4,27370
51
51
  graphiti_core/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
52
  graphiti_core/utils/bulk_utils.py,sha256=P4LKO46Yle4tBdNcQ3hDHcSQFaR8UBLfoL-z1M2Wua0,14690
53
53
  graphiti_core/utils/datetime_utils.py,sha256=Ti-2tnrDFRzBsbfblzsHybsM3jaDLP4-VT2t0VhpIzU,1357
@@ -55,11 +55,11 @@ graphiti_core/utils/maintenance/__init__.py,sha256=vW4H1KyapTl-OOz578uZABYcpND4w
55
55
  graphiti_core/utils/maintenance/community_operations.py,sha256=pQUv0pfInC1Pho7C4BN8gC3_bks7wRAZpJn2bmw6gT8,10008
56
56
  graphiti_core/utils/maintenance/edge_operations.py,sha256=9i0PBgaW3dLPTLmx-9j1W86Rb4sPc1bG4Y3TjPn07Gg,12794
57
57
  graphiti_core/utils/maintenance/graph_data_operations.py,sha256=F3dkXg63nJU7OFLzaiT67a2_kFiWSU9Vr59iIHgsXQs,7030
58
- graphiti_core/utils/maintenance/node_operations.py,sha256=WhZQixx05dAFFQAd5KTXJ8Gc8YCahCAAW9uPCucjyBw,15556
58
+ graphiti_core/utils/maintenance/node_operations.py,sha256=7TLBBJ5m-3_UK53_UNm0hQJh3iiJP0tIVG7Sd6oOioE,15801
59
59
  graphiti_core/utils/maintenance/temporal_operations.py,sha256=RdNtubCyYhOVrvcOIq2WppHls1Q-BEjtsN8r38l-Rtc,3691
60
60
  graphiti_core/utils/maintenance/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
61
61
  graphiti_core/utils/ontology_utils/entity_types_utils.py,sha256=QJX5cG0GSSNF_Mm_yrldr69wjVAbN_MxLhOSznz85Hk,1279
62
- graphiti_core-0.10.1.dist-info/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
63
- graphiti_core-0.10.1.dist-info/METADATA,sha256=fAqTBXkTPCDh-bw0798LlCmjprM3ugqejB-HopOGR4o,14659
64
- graphiti_core-0.10.1.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
65
- graphiti_core-0.10.1.dist-info/RECORD,,
62
+ graphiti_core-0.10.3.dist-info/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
63
+ graphiti_core-0.10.3.dist-info/METADATA,sha256=3tU3LJE6tj6tNi8WvIe32iKCA3QjFjzH7BPSELnu70Y,14659
64
+ graphiti_core-0.10.3.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
65
+ graphiti_core-0.10.3.dist-info/RECORD,,