graphiti-core 0.10.0__tar.gz → 0.10.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

Files changed (66) hide show
  1. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/PKG-INFO +1 -1
  2. graphiti_core-0.10.2/graphiti_core/llm_client/anthropic_client.py +332 -0
  3. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/llm_client/errors.py +8 -0
  4. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/nodes.py +6 -6
  5. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/summarize_nodes.py +1 -0
  6. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/utils/maintenance/node_operations.py +13 -4
  7. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/pyproject.toml +1 -1
  8. graphiti_core-0.10.0/graphiti_core/llm_client/anthropic_client.py +0 -79
  9. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/LICENSE +0 -0
  10. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/README.md +0 -0
  11. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/__init__.py +0 -0
  12. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/cross_encoder/__init__.py +0 -0
  13. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/cross_encoder/bge_reranker_client.py +0 -0
  14. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/cross_encoder/client.py +0 -0
  15. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/cross_encoder/openai_reranker_client.py +0 -0
  16. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/edges.py +0 -0
  17. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/embedder/__init__.py +0 -0
  18. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/embedder/client.py +0 -0
  19. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/embedder/gemini.py +0 -0
  20. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/embedder/openai.py +0 -0
  21. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/embedder/voyage.py +0 -0
  22. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/errors.py +0 -0
  23. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/graphiti.py +0 -0
  24. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/helpers.py +0 -0
  25. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/llm_client/__init__.py +0 -0
  26. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/llm_client/client.py +0 -0
  27. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/llm_client/config.py +0 -0
  28. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/llm_client/gemini_client.py +0 -0
  29. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/llm_client/groq_client.py +0 -0
  30. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/llm_client/openai_client.py +0 -0
  31. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/llm_client/openai_generic_client.py +0 -0
  32. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/llm_client/utils.py +0 -0
  33. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/models/__init__.py +0 -0
  34. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/models/edges/__init__.py +0 -0
  35. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/models/edges/edge_db_queries.py +0 -0
  36. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/models/nodes/__init__.py +0 -0
  37. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/models/nodes/node_db_queries.py +0 -0
  38. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/__init__.py +0 -0
  39. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/dedupe_edges.py +0 -0
  40. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/dedupe_nodes.py +0 -0
  41. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/eval.py +0 -0
  42. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/extract_edge_dates.py +0 -0
  43. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/extract_edges.py +0 -0
  44. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/extract_nodes.py +0 -0
  45. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/invalidate_edges.py +0 -0
  46. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/lib.py +0 -0
  47. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/models.py +0 -0
  48. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/prompts/prompt_helpers.py +0 -0
  49. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/py.typed +0 -0
  50. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/search/__init__.py +0 -0
  51. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/search/search.py +0 -0
  52. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/search/search_config.py +0 -0
  53. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/search/search_config_recipes.py +0 -0
  54. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/search/search_filters.py +0 -0
  55. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/search/search_helpers.py +0 -0
  56. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/search/search_utils.py +0 -0
  57. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/utils/__init__.py +0 -0
  58. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/utils/bulk_utils.py +0 -0
  59. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/utils/datetime_utils.py +0 -0
  60. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/utils/maintenance/__init__.py +0 -0
  61. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/utils/maintenance/community_operations.py +0 -0
  62. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/utils/maintenance/edge_operations.py +0 -0
  63. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/utils/maintenance/graph_data_operations.py +0 -0
  64. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/utils/maintenance/temporal_operations.py +0 -0
  65. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/utils/maintenance/utils.py +0 -0
  66. {graphiti_core-0.10.0 → graphiti_core-0.10.2}/graphiti_core/utils/ontology_utils/entity_types_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: graphiti-core
3
- Version: 0.10.0
3
+ Version: 0.10.2
4
4
  Summary: A temporal graph building library
5
5
  License: Apache-2.0
6
6
  Author: Paul Paliychuk
@@ -0,0 +1,332 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ import json
18
+ import logging
19
+ import os
20
+ import typing
21
+ from json import JSONDecodeError
22
+ from typing import Literal
23
+
24
+ import anthropic
25
+ from anthropic import AsyncAnthropic
26
+ from anthropic.types import MessageParam, ToolChoiceParam, ToolUnionParam
27
+ from pydantic import BaseModel, ValidationError
28
+
29
+ from ..prompts.models import Message
30
+ from .client import LLMClient
31
+ from .config import DEFAULT_MAX_TOKENS, LLMConfig
32
+ from .errors import RateLimitError, RefusalError
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+ AnthropicModel = Literal[
37
+ 'claude-3-7-sonnet-latest',
38
+ 'claude-3-7-sonnet-20250219',
39
+ 'claude-3-5-haiku-latest',
40
+ 'claude-3-5-haiku-20241022',
41
+ 'claude-3-5-sonnet-latest',
42
+ 'claude-3-5-sonnet-20241022',
43
+ 'claude-3-5-sonnet-20240620',
44
+ 'claude-3-opus-latest',
45
+ 'claude-3-opus-20240229',
46
+ 'claude-3-sonnet-20240229',
47
+ 'claude-3-haiku-20240307',
48
+ 'claude-2.1',
49
+ 'claude-2.0',
50
+ ]
51
+
52
+ DEFAULT_MODEL: AnthropicModel = 'claude-3-7-sonnet-latest'
53
+
54
+
55
+ class AnthropicClient(LLMClient):
56
+ """
57
+ A client for the Anthropic LLM.
58
+
59
+ Args:
60
+ config: A configuration object for the LLM.
61
+ cache: Whether to cache the LLM responses.
62
+ client: An optional client instance to use.
63
+ max_tokens: The maximum number of tokens to generate.
64
+
65
+ Methods:
66
+ generate_response: Generate a response from the LLM.
67
+
68
+ Notes:
69
+ - If a LLMConfig is not provided, api_key will be pulled from the ANTHROPIC_API_KEY environment
70
+ variable, and all default values will be used for the LLMConfig.
71
+
72
+ """
73
+
74
+ model: AnthropicModel
75
+
76
+ def __init__(
77
+ self,
78
+ config: LLMConfig | None = None,
79
+ cache: bool = False,
80
+ client: AsyncAnthropic | None = None,
81
+ max_tokens: int = DEFAULT_MAX_TOKENS,
82
+ ) -> None:
83
+ if config is None:
84
+ config = LLMConfig()
85
+ config.api_key = os.getenv('ANTHROPIC_API_KEY')
86
+ config.max_tokens = max_tokens
87
+
88
+ if config.model is None:
89
+ config.model = DEFAULT_MODEL
90
+
91
+ super().__init__(config, cache)
92
+ # Explicitly set the instance model to the config model to prevent type checking errors
93
+ self.model = typing.cast(AnthropicModel, config.model)
94
+
95
+ if not client:
96
+ self.client = AsyncAnthropic(
97
+ api_key=config.api_key,
98
+ max_retries=1,
99
+ )
100
+ else:
101
+ self.client = client
102
+
103
+ def _extract_json_from_text(self, text: str) -> dict[str, typing.Any]:
104
+ """Extract JSON from text content.
105
+
106
+ A helper method to extract JSON from text content, used when tool use fails or
107
+ no response_model is provided.
108
+
109
+ Args:
110
+ text: The text to extract JSON from
111
+
112
+ Returns:
113
+ Extracted JSON as a dictionary
114
+
115
+ Raises:
116
+ ValueError: If JSON cannot be extracted or parsed
117
+ """
118
+ try:
119
+ json_start = text.find('{')
120
+ json_end = text.rfind('}') + 1
121
+ if json_start >= 0 and json_end > json_start:
122
+ json_str = text[json_start:json_end]
123
+ return json.loads(json_str)
124
+ else:
125
+ raise ValueError(f'Could not extract JSON from model response: {text}')
126
+ except (JSONDecodeError, ValueError) as e:
127
+ raise ValueError(f'Could not extract JSON from model response: {text}') from e
128
+
129
+ def _create_tool(
130
+ self, response_model: type[BaseModel] | None = None
131
+ ) -> tuple[list[ToolUnionParam], ToolChoiceParam]:
132
+ """
133
+ Create a tool definition based on the response_model if provided, or a generic JSON tool if not.
134
+
135
+ Args:
136
+ response_model: Optional Pydantic model to use for structured output.
137
+
138
+ Returns:
139
+ A list containing a single tool definition for use with the Anthropic API.
140
+ """
141
+ if response_model is not None:
142
+ # temporary debug log
143
+ logger.info(f'Creating tool for response_model: {response_model}')
144
+ # Use the response_model to define the tool
145
+ model_schema = response_model.model_json_schema()
146
+ tool_name = response_model.__name__
147
+ description = model_schema.get('description', f'Extract {tool_name} information')
148
+ else:
149
+ # temporary debug log
150
+ logger.info('Creating generic JSON output tool')
151
+ # Create a generic JSON output tool
152
+ tool_name = 'generic_json_output'
153
+ description = 'Output data in JSON format'
154
+ model_schema = {
155
+ 'type': 'object',
156
+ 'additionalProperties': True,
157
+ 'description': 'Any JSON object containing the requested information',
158
+ }
159
+
160
+ tool = {
161
+ 'name': tool_name,
162
+ 'description': description,
163
+ 'input_schema': model_schema,
164
+ }
165
+ tool_list = [tool]
166
+ tool_list_cast = typing.cast(list[ToolUnionParam], tool_list)
167
+ tool_choice = {'type': 'tool', 'name': tool_name}
168
+ tool_choice_cast = typing.cast(ToolChoiceParam, tool_choice)
169
+ return tool_list_cast, tool_choice_cast
170
+
171
+ async def _generate_response(
172
+ self,
173
+ messages: list[Message],
174
+ response_model: type[BaseModel] | None = None,
175
+ max_tokens: int | None = None,
176
+ ) -> dict[str, typing.Any]:
177
+ """
178
+ Generate a response from the Anthropic LLM using tool-based approach for all requests.
179
+
180
+ Args:
181
+ messages: List of message objects to send to the LLM.
182
+ response_model: Optional Pydantic model to use for structured output.
183
+ max_tokens: Maximum number of tokens to generate.
184
+
185
+ Returns:
186
+ Dictionary containing the structured response from the LLM.
187
+
188
+ Raises:
189
+ RateLimitError: If the rate limit is exceeded.
190
+ RefusalError: If the LLM refuses to respond.
191
+ Exception: If an error occurs during the generation process.
192
+ """
193
+ system_message = messages[0]
194
+ user_messages = [{'role': m.role, 'content': m.content} for m in messages[1:]]
195
+ user_messages_cast = typing.cast(list[MessageParam], user_messages)
196
+
197
+ # TODO: Replace hacky min finding solution after fixing hardcoded EXTRACT_EDGES_MAX_TOKENS = 16384 in
198
+ # edge_operations.py. Throws errors with cheaper models that lower max_tokens.
199
+ max_creation_tokens: int = min(
200
+ max_tokens if max_tokens is not None else self.config.max_tokens,
201
+ DEFAULT_MAX_TOKENS,
202
+ )
203
+
204
+ try:
205
+ # Create the appropriate tool based on whether response_model is provided
206
+ tools, tool_choice = self._create_tool(response_model)
207
+ # temporary debug log
208
+ logger.info(f'using model: {self.model} with max_tokens: {self.max_tokens}')
209
+ result = await self.client.messages.create(
210
+ system=system_message.content,
211
+ max_tokens=max_creation_tokens,
212
+ temperature=self.temperature,
213
+ messages=user_messages_cast,
214
+ model=self.model,
215
+ tools=tools,
216
+ tool_choice=tool_choice,
217
+ )
218
+
219
+ # Extract the tool output from the response
220
+ for content_item in result.content:
221
+ if content_item.type == 'tool_use':
222
+ if isinstance(content_item.input, dict):
223
+ tool_args: dict[str, typing.Any] = content_item.input
224
+ else:
225
+ tool_args = json.loads(str(content_item.input))
226
+ return tool_args
227
+
228
+ # If we didn't get a proper tool_use response, try to extract from text
229
+ # logger.debug(
230
+ # f'Did not get a tool_use response, trying to extract json from text. Result: {result.content}'
231
+ # )
232
+ # temporary debug log
233
+ logger.info(
234
+ f'Did not get a tool_use response, trying to extract json from text. Result: {result.content}'
235
+ )
236
+ for content_item in result.content:
237
+ if content_item.type == 'text':
238
+ return self._extract_json_from_text(content_item.text)
239
+ else:
240
+ raise ValueError(
241
+ f'Could not extract structured data from model response: {result.content}'
242
+ )
243
+
244
+ # If we get here, we couldn't parse a structured response
245
+ raise ValueError(
246
+ f'Could not extract structured data from model response: {result.content}'
247
+ )
248
+
249
+ except anthropic.RateLimitError as e:
250
+ raise RateLimitError(f'Rate limit exceeded. Please try again later. Error: {e}') from e
251
+ except anthropic.APIError as e:
252
+ # Special case for content policy violations. We convert these to RefusalError
253
+ # to bypass the retry mechanism, as retrying policy-violating content will always fail.
254
+ # This avoids wasting API calls and provides more specific error messaging to the user.
255
+ if 'refused to respond' in str(e).lower():
256
+ raise RefusalError(str(e)) from e
257
+ raise e
258
+ except Exception as e:
259
+ raise e
260
+
261
+ async def generate_response(
262
+ self,
263
+ messages: list[Message],
264
+ response_model: type[BaseModel] | None = None,
265
+ max_tokens: int = DEFAULT_MAX_TOKENS,
266
+ ) -> dict[str, typing.Any]:
267
+ """
268
+ Generate a response from the LLM.
269
+
270
+ Args:
271
+ messages: List of message objects to send to the LLM.
272
+ response_model: Optional Pydantic model to use for structured output.
273
+ max_tokens: Maximum number of tokens to generate.
274
+
275
+ Returns:
276
+ Dictionary containing the structured response from the LLM.
277
+
278
+ Raises:
279
+ RateLimitError: If the rate limit is exceeded.
280
+ RefusalError: If the LLM refuses to respond.
281
+ Exception: If an error occurs during the generation process.
282
+ """
283
+ retry_count = 0
284
+ max_retries = 2
285
+ last_error: Exception | None = None
286
+
287
+ while retry_count <= max_retries:
288
+ try:
289
+ response = await self._generate_response(messages, response_model, max_tokens)
290
+
291
+ # If we have a response_model, attempt to validate the response
292
+ if response_model is not None:
293
+ # Validate the response against the response_model
294
+ model_instance = response_model(**response)
295
+ return model_instance.model_dump()
296
+
297
+ # If no validation needed, return the response
298
+ return response
299
+
300
+ except (RateLimitError, RefusalError):
301
+ # These errors should not trigger retries
302
+ raise
303
+ except Exception as e:
304
+ last_error = e
305
+
306
+ if retry_count >= max_retries:
307
+ if isinstance(e, ValidationError):
308
+ logger.error(
309
+ f'Validation error after {retry_count}/{max_retries} attempts: {e}'
310
+ )
311
+ else:
312
+ logger.error(f'Max retries ({max_retries}) exceeded. Last error: {e}')
313
+ raise e
314
+
315
+ if isinstance(e, ValidationError):
316
+ response_model_cast = typing.cast(type[BaseModel], response_model)
317
+ error_context = f'The previous response was invalid. Please provide a valid {response_model_cast.__name__} object. Error: {e}'
318
+ else:
319
+ error_context = (
320
+ f'The previous response attempt was invalid. '
321
+ f'Error type: {e.__class__.__name__}. '
322
+ f'Error details: {str(e)}. '
323
+ f'Please try again with a valid response.'
324
+ )
325
+
326
+ # Common retry logic
327
+ retry_count += 1
328
+ messages.append(Message(role='user', content=error_context))
329
+ logger.warning(f'Retrying after error (attempt {retry_count}/{max_retries}): {e}')
330
+
331
+ # If we somehow get here, raise the last error
332
+ raise last_error or Exception('Max retries exceeded with no specific error')
@@ -29,3 +29,11 @@ class RefusalError(Exception):
29
29
  def __init__(self, message: str):
30
30
  self.message = message
31
31
  super().__init__(self.message)
32
+
33
+
34
+ class EmptyResponseError(Exception):
35
+ """Exception raised when the LLM returns an empty response."""
36
+
37
+ def __init__(self, message: str):
38
+ self.message = message
39
+ super().__init__(self.message)
@@ -550,12 +550,12 @@ def get_entity_node_from_record(record: Any) -> EntityNode:
550
550
  attributes=record['attributes'],
551
551
  )
552
552
 
553
- del entity_node.attributes['uuid']
554
- del entity_node.attributes['name']
555
- del entity_node.attributes['group_id']
556
- del entity_node.attributes['name_embedding']
557
- del entity_node.attributes['summary']
558
- del entity_node.attributes['created_at']
553
+ entity_node.attributes.pop('uuid', None)
554
+ entity_node.attributes.pop('name', None)
555
+ entity_node.attributes.pop('group_id', None)
556
+ entity_node.attributes.pop('name_embedding', None)
557
+ entity_node.attributes.pop('summary', None)
558
+ entity_node.attributes.pop('created_at', None)
559
559
 
560
560
  return entity_node
561
561
 
@@ -89,6 +89,7 @@ def summarize_context(context: dict[str, Any]) -> list[Message]:
89
89
 
90
90
  Guidelines:
91
91
  1. Do not hallucinate entity property values if they cannot be found in the current context.
92
+ 2. Only use the provided messages, entity, and entity context to set attribute values.
92
93
 
93
94
  <ENTITY>
94
95
  {context['node_name']}
@@ -17,6 +17,7 @@ limitations under the License.
17
17
  import logging
18
18
  from contextlib import suppress
19
19
  from time import time
20
+ from typing import Any
20
21
 
21
22
  import pydantic
22
23
  from pydantic import BaseModel
@@ -324,16 +325,17 @@ async def resolve_extracted_node(
324
325
  else [],
325
326
  }
326
327
 
327
- summary_context = {
328
+ summary_context: dict[str, Any] = {
328
329
  'node_name': extracted_node.name,
329
330
  'node_summary': extracted_node.summary,
330
331
  'episode_content': episode.content if episode is not None else '',
331
332
  'previous_episodes': [ep.content for ep in previous_episodes]
332
333
  if previous_episodes is not None
333
334
  else [],
334
- 'attributes': [],
335
335
  }
336
336
 
337
+ attributes: list[dict[str, str]] = []
338
+
337
339
  entity_type_classes: tuple[BaseModel, ...] = tuple()
338
340
  if entity_types is not None: # type: ignore
339
341
  entity_type_classes = entity_type_classes + tuple(
@@ -344,8 +346,15 @@ async def resolve_extracted_node(
344
346
  )
345
347
 
346
348
  for entity_type in entity_type_classes:
347
- for field_name in entity_type.model_fields:
348
- summary_context.get('attributes', []).append(field_name) # type: ignore
349
+ for field_name, field_info in entity_type.model_fields.items():
350
+ attributes.append(
351
+ {
352
+ 'attribute_name': field_name,
353
+ 'attribute_description': field_info.description or '',
354
+ }
355
+ )
356
+
357
+ summary_context['attributes'] = attributes
349
358
 
350
359
  entity_attributes_model = pydantic.create_model( # type: ignore
351
360
  'EntityAttributes',
@@ -1,7 +1,7 @@
1
1
  [project]
2
2
  name = "graphiti-core"
3
3
  description = "A temporal graph building library"
4
- version = "0.10.0"
4
+ version = "0.10.2"
5
5
  authors = [
6
6
  { "name" = "Paul Paliychuk", "email" = "paul@getzep.com" },
7
7
  { "name" = "Preston Rasmussen", "email" = "preston@getzep.com" },
@@ -1,79 +0,0 @@
1
- """
2
- Copyright 2024, Zep Software, Inc.
3
-
4
- Licensed under the Apache License, Version 2.0 (the "License");
5
- you may not use this file except in compliance with the License.
6
- You may obtain a copy of the License at
7
-
8
- http://www.apache.org/licenses/LICENSE-2.0
9
-
10
- Unless required by applicable law or agreed to in writing, software
11
- distributed under the License is distributed on an "AS IS" BASIS,
12
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- See the License for the specific language governing permissions and
14
- limitations under the License.
15
- """
16
-
17
- import json
18
- import logging
19
- import typing
20
-
21
- import anthropic
22
- from anthropic import AsyncAnthropic
23
- from pydantic import BaseModel
24
-
25
- from ..prompts.models import Message
26
- from .client import LLMClient
27
- from .config import LLMConfig
28
- from .errors import RateLimitError
29
-
30
- logger = logging.getLogger(__name__)
31
-
32
- DEFAULT_MODEL = 'claude-3-7-sonnet-latest'
33
- DEFAULT_MAX_TOKENS = 8192
34
-
35
-
36
- class AnthropicClient(LLMClient):
37
- def __init__(self, config: LLMConfig | None = None, cache: bool = False):
38
- if config is None:
39
- config = LLMConfig(max_tokens=DEFAULT_MAX_TOKENS)
40
- elif config.max_tokens is None:
41
- config.max_tokens = DEFAULT_MAX_TOKENS
42
- super().__init__(config, cache)
43
-
44
- self.client = AsyncAnthropic(
45
- api_key=config.api_key,
46
- # we'll use tenacity to retry
47
- max_retries=1,
48
- )
49
-
50
- async def _generate_response(
51
- self,
52
- messages: list[Message],
53
- response_model: type[BaseModel] | None = None,
54
- max_tokens: int = DEFAULT_MAX_TOKENS,
55
- ) -> dict[str, typing.Any]:
56
- system_message = messages[0]
57
- user_messages = [{'role': m.role, 'content': m.content} for m in messages[1:]] + [
58
- {'role': 'assistant', 'content': '{'}
59
- ]
60
-
61
- # Ensure max_tokens is not greater than config.max_tokens or DEFAULT_MAX_TOKENS
62
- max_tokens = min(max_tokens, self.config.max_tokens, DEFAULT_MAX_TOKENS)
63
-
64
- try:
65
- result = await self.client.messages.create(
66
- system='Only include JSON in the response. Do not include any additional text or explanation of the content.\n'
67
- + system_message.content,
68
- max_tokens=max_tokens,
69
- temperature=self.temperature,
70
- messages=user_messages, # type: ignore
71
- model=self.model or DEFAULT_MODEL,
72
- )
73
-
74
- return json.loads('{' + result.content[0].text) # type: ignore
75
- except anthropic.RateLimitError as e:
76
- raise RateLimitError from e
77
- except Exception as e:
78
- logger.error(f'Error in generating LLM response: {e}')
79
- raise
File without changes
File without changes