graphiti-core 0.12.0rc1__py3-none-any.whl → 0.24.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. graphiti_core/cross_encoder/bge_reranker_client.py +12 -2
  2. graphiti_core/cross_encoder/gemini_reranker_client.py +161 -0
  3. graphiti_core/cross_encoder/openai_reranker_client.py +7 -5
  4. graphiti_core/decorators.py +110 -0
  5. graphiti_core/driver/__init__.py +19 -0
  6. graphiti_core/driver/driver.py +124 -0
  7. graphiti_core/driver/falkordb_driver.py +362 -0
  8. graphiti_core/driver/graph_operations/graph_operations.py +191 -0
  9. graphiti_core/driver/kuzu_driver.py +182 -0
  10. graphiti_core/driver/neo4j_driver.py +117 -0
  11. graphiti_core/driver/neptune_driver.py +305 -0
  12. graphiti_core/driver/search_interface/search_interface.py +89 -0
  13. graphiti_core/edges.py +287 -172
  14. graphiti_core/embedder/azure_openai.py +71 -0
  15. graphiti_core/embedder/client.py +2 -1
  16. graphiti_core/embedder/gemini.py +116 -22
  17. graphiti_core/embedder/voyage.py +13 -2
  18. graphiti_core/errors.py +8 -0
  19. graphiti_core/graph_queries.py +162 -0
  20. graphiti_core/graphiti.py +705 -193
  21. graphiti_core/graphiti_types.py +4 -2
  22. graphiti_core/helpers.py +87 -10
  23. graphiti_core/llm_client/__init__.py +16 -0
  24. graphiti_core/llm_client/anthropic_client.py +159 -56
  25. graphiti_core/llm_client/azure_openai_client.py +115 -0
  26. graphiti_core/llm_client/client.py +98 -21
  27. graphiti_core/llm_client/config.py +1 -1
  28. graphiti_core/llm_client/gemini_client.py +290 -41
  29. graphiti_core/llm_client/groq_client.py +14 -3
  30. graphiti_core/llm_client/openai_base_client.py +261 -0
  31. graphiti_core/llm_client/openai_client.py +56 -132
  32. graphiti_core/llm_client/openai_generic_client.py +91 -56
  33. graphiti_core/models/edges/edge_db_queries.py +259 -35
  34. graphiti_core/models/nodes/node_db_queries.py +311 -32
  35. graphiti_core/nodes.py +420 -205
  36. graphiti_core/prompts/dedupe_edges.py +46 -32
  37. graphiti_core/prompts/dedupe_nodes.py +67 -42
  38. graphiti_core/prompts/eval.py +4 -4
  39. graphiti_core/prompts/extract_edges.py +27 -16
  40. graphiti_core/prompts/extract_nodes.py +74 -31
  41. graphiti_core/prompts/prompt_helpers.py +39 -0
  42. graphiti_core/prompts/snippets.py +29 -0
  43. graphiti_core/prompts/summarize_nodes.py +23 -25
  44. graphiti_core/search/search.py +158 -82
  45. graphiti_core/search/search_config.py +39 -4
  46. graphiti_core/search/search_filters.py +126 -35
  47. graphiti_core/search/search_helpers.py +5 -6
  48. graphiti_core/search/search_utils.py +1405 -485
  49. graphiti_core/telemetry/__init__.py +9 -0
  50. graphiti_core/telemetry/telemetry.py +117 -0
  51. graphiti_core/tracer.py +193 -0
  52. graphiti_core/utils/bulk_utils.py +364 -285
  53. graphiti_core/utils/datetime_utils.py +13 -0
  54. graphiti_core/utils/maintenance/community_operations.py +67 -49
  55. graphiti_core/utils/maintenance/dedup_helpers.py +262 -0
  56. graphiti_core/utils/maintenance/edge_operations.py +339 -197
  57. graphiti_core/utils/maintenance/graph_data_operations.py +50 -114
  58. graphiti_core/utils/maintenance/node_operations.py +319 -238
  59. graphiti_core/utils/maintenance/temporal_operations.py +11 -3
  60. graphiti_core/utils/ontology_utils/entity_types_utils.py +1 -1
  61. graphiti_core/utils/text_utils.py +53 -0
  62. graphiti_core-0.24.3.dist-info/METADATA +726 -0
  63. graphiti_core-0.24.3.dist-info/RECORD +86 -0
  64. {graphiti_core-0.12.0rc1.dist-info → graphiti_core-0.24.3.dist-info}/WHEEL +1 -1
  65. graphiti_core-0.12.0rc1.dist-info/METADATA +0 -350
  66. graphiti_core-0.12.0rc1.dist-info/RECORD +0 -66
  67. /graphiti_core/{utils/maintenance/utils.py → migrations/__init__.py} +0 -0
  68. {graphiti_core-0.12.0rc1.dist-info → graphiti_core-0.24.3.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,261 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ import json
18
+ import logging
19
+ import typing
20
+ from abc import abstractmethod
21
+ from typing import Any, ClassVar
22
+
23
+ import openai
24
+ from openai.types.chat import ChatCompletionMessageParam
25
+ from pydantic import BaseModel
26
+
27
+ from ..prompts.models import Message
28
+ from .client import LLMClient, get_extraction_language_instruction
29
+ from .config import DEFAULT_MAX_TOKENS, LLMConfig, ModelSize
30
+ from .errors import RateLimitError, RefusalError
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+ DEFAULT_MODEL = 'gpt-5-mini'
35
+ DEFAULT_SMALL_MODEL = 'gpt-5-nano'
36
+ DEFAULT_REASONING = 'minimal'
37
+ DEFAULT_VERBOSITY = 'low'
38
+
39
+
40
+ class BaseOpenAIClient(LLMClient):
41
+ """
42
+ Base client class for OpenAI-compatible APIs (OpenAI and Azure OpenAI).
43
+
44
+ This class contains shared logic for both OpenAI and Azure OpenAI clients,
45
+ reducing code duplication while allowing for implementation-specific differences.
46
+ """
47
+
48
+ # Class-level constants
49
+ MAX_RETRIES: ClassVar[int] = 2
50
+
51
+ def __init__(
52
+ self,
53
+ config: LLMConfig | None = None,
54
+ cache: bool = False,
55
+ max_tokens: int = DEFAULT_MAX_TOKENS,
56
+ reasoning: str | None = DEFAULT_REASONING,
57
+ verbosity: str | None = DEFAULT_VERBOSITY,
58
+ ):
59
+ if cache:
60
+ raise NotImplementedError('Caching is not implemented for OpenAI-based clients')
61
+
62
+ if config is None:
63
+ config = LLMConfig()
64
+
65
+ super().__init__(config, cache)
66
+ self.max_tokens = max_tokens
67
+ self.reasoning = reasoning
68
+ self.verbosity = verbosity
69
+
70
+ @abstractmethod
71
+ async def _create_completion(
72
+ self,
73
+ model: str,
74
+ messages: list[ChatCompletionMessageParam],
75
+ temperature: float | None,
76
+ max_tokens: int,
77
+ response_model: type[BaseModel] | None = None,
78
+ ) -> Any:
79
+ """Create a completion using the specific client implementation."""
80
+ pass
81
+
82
+ @abstractmethod
83
+ async def _create_structured_completion(
84
+ self,
85
+ model: str,
86
+ messages: list[ChatCompletionMessageParam],
87
+ temperature: float | None,
88
+ max_tokens: int,
89
+ response_model: type[BaseModel],
90
+ reasoning: str | None,
91
+ verbosity: str | None,
92
+ ) -> Any:
93
+ """Create a structured completion using the specific client implementation."""
94
+ pass
95
+
96
+ def _convert_messages_to_openai_format(
97
+ self, messages: list[Message]
98
+ ) -> list[ChatCompletionMessageParam]:
99
+ """Convert internal Message format to OpenAI ChatCompletionMessageParam format."""
100
+ openai_messages: list[ChatCompletionMessageParam] = []
101
+ for m in messages:
102
+ m.content = self._clean_input(m.content)
103
+ if m.role == 'user':
104
+ openai_messages.append({'role': 'user', 'content': m.content})
105
+ elif m.role == 'system':
106
+ openai_messages.append({'role': 'system', 'content': m.content})
107
+ return openai_messages
108
+
109
+ def _get_model_for_size(self, model_size: ModelSize) -> str:
110
+ """Get the appropriate model name based on the requested size."""
111
+ if model_size == ModelSize.small:
112
+ return self.small_model or DEFAULT_SMALL_MODEL
113
+ else:
114
+ return self.model or DEFAULT_MODEL
115
+
116
+ def _handle_structured_response(self, response: Any) -> dict[str, Any]:
117
+ """Handle structured response parsing and validation."""
118
+ response_object = response.output_text
119
+
120
+ if response_object:
121
+ return json.loads(response_object)
122
+ elif response_object.refusal:
123
+ raise RefusalError(response_object.refusal)
124
+ else:
125
+ raise Exception(f'Invalid response from LLM: {response_object.model_dump()}')
126
+
127
+ def _handle_json_response(self, response: Any) -> dict[str, Any]:
128
+ """Handle JSON response parsing."""
129
+ result = response.choices[0].message.content or '{}'
130
+ return json.loads(result)
131
+
132
+ async def _generate_response(
133
+ self,
134
+ messages: list[Message],
135
+ response_model: type[BaseModel] | None = None,
136
+ max_tokens: int = DEFAULT_MAX_TOKENS,
137
+ model_size: ModelSize = ModelSize.medium,
138
+ ) -> dict[str, Any]:
139
+ """Generate a response using the appropriate client implementation."""
140
+ openai_messages = self._convert_messages_to_openai_format(messages)
141
+ model = self._get_model_for_size(model_size)
142
+
143
+ try:
144
+ if response_model:
145
+ response = await self._create_structured_completion(
146
+ model=model,
147
+ messages=openai_messages,
148
+ temperature=self.temperature,
149
+ max_tokens=max_tokens or self.max_tokens,
150
+ response_model=response_model,
151
+ reasoning=self.reasoning,
152
+ verbosity=self.verbosity,
153
+ )
154
+ return self._handle_structured_response(response)
155
+ else:
156
+ response = await self._create_completion(
157
+ model=model,
158
+ messages=openai_messages,
159
+ temperature=self.temperature,
160
+ max_tokens=max_tokens or self.max_tokens,
161
+ )
162
+ return self._handle_json_response(response)
163
+
164
+ except openai.LengthFinishReasonError as e:
165
+ raise Exception(f'Output length exceeded max tokens {self.max_tokens}: {e}') from e
166
+ except openai.RateLimitError as e:
167
+ raise RateLimitError from e
168
+ except openai.AuthenticationError as e:
169
+ logger.error(
170
+ f'OpenAI Authentication Error: {e}. Please verify your API key is correct.'
171
+ )
172
+ raise
173
+ except Exception as e:
174
+ # Provide more context for connection errors
175
+ error_msg = str(e)
176
+ if 'Connection error' in error_msg or 'connection' in error_msg.lower():
177
+ logger.error(
178
+ f'Connection error communicating with OpenAI API. Please check your network connection and API key. Error: {e}'
179
+ )
180
+ else:
181
+ logger.error(f'Error in generating LLM response: {e}')
182
+ raise
183
+
184
+ async def generate_response(
185
+ self,
186
+ messages: list[Message],
187
+ response_model: type[BaseModel] | None = None,
188
+ max_tokens: int | None = None,
189
+ model_size: ModelSize = ModelSize.medium,
190
+ group_id: str | None = None,
191
+ prompt_name: str | None = None,
192
+ ) -> dict[str, typing.Any]:
193
+ """Generate a response with retry logic and error handling."""
194
+ if max_tokens is None:
195
+ max_tokens = self.max_tokens
196
+
197
+ # Add multilingual extraction instructions
198
+ messages[0].content += get_extraction_language_instruction(group_id)
199
+
200
+ # Wrap entire operation in tracing span
201
+ with self.tracer.start_span('llm.generate') as span:
202
+ attributes = {
203
+ 'llm.provider': 'openai',
204
+ 'model.size': model_size.value,
205
+ 'max_tokens': max_tokens,
206
+ }
207
+ if prompt_name:
208
+ attributes['prompt.name'] = prompt_name
209
+ span.add_attributes(attributes)
210
+
211
+ retry_count = 0
212
+ last_error = None
213
+
214
+ while retry_count <= self.MAX_RETRIES:
215
+ try:
216
+ response = await self._generate_response(
217
+ messages, response_model, max_tokens, model_size
218
+ )
219
+ return response
220
+ except (RateLimitError, RefusalError):
221
+ # These errors should not trigger retries
222
+ span.set_status('error', str(last_error))
223
+ raise
224
+ except (
225
+ openai.APITimeoutError,
226
+ openai.APIConnectionError,
227
+ openai.InternalServerError,
228
+ ):
229
+ # Let OpenAI's client handle these retries
230
+ span.set_status('error', str(last_error))
231
+ raise
232
+ except Exception as e:
233
+ last_error = e
234
+
235
+ # Don't retry if we've hit the max retries
236
+ if retry_count >= self.MAX_RETRIES:
237
+ logger.error(f'Max retries ({self.MAX_RETRIES}) exceeded. Last error: {e}')
238
+ span.set_status('error', str(e))
239
+ span.record_exception(e)
240
+ raise
241
+
242
+ retry_count += 1
243
+
244
+ # Construct a detailed error message for the LLM
245
+ error_context = (
246
+ f'The previous response attempt was invalid. '
247
+ f'Error type: {e.__class__.__name__}. '
248
+ f'Error details: {str(e)}. '
249
+ f'Please try again with a valid response, ensuring the output matches '
250
+ f'the expected format and constraints.'
251
+ )
252
+
253
+ error_message = Message(role='user', content=error_context)
254
+ messages.append(error_message)
255
+ logger.warning(
256
+ f'Retrying after application error (attempt {retry_count}/{self.MAX_RETRIES}): {e}'
257
+ )
258
+
259
+ # If we somehow get here, raise the last error
260
+ span.set_status('error', str(last_error))
261
+ raise last_error or Exception('Max retries exceeded with no specific error')
@@ -14,56 +14,35 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- import logging
18
17
  import typing
19
- from typing import ClassVar
20
18
 
21
- import openai
22
19
  from openai import AsyncOpenAI
23
20
  from openai.types.chat import ChatCompletionMessageParam
24
21
  from pydantic import BaseModel
25
22
 
26
- from ..prompts.models import Message
27
- from .client import MULTILINGUAL_EXTRACTION_RESPONSES, LLMClient
28
- from .config import DEFAULT_MAX_TOKENS, LLMConfig, ModelSize
29
- from .errors import RateLimitError, RefusalError
23
+ from .config import DEFAULT_MAX_TOKENS, LLMConfig
24
+ from .openai_base_client import DEFAULT_REASONING, DEFAULT_VERBOSITY, BaseOpenAIClient
30
25
 
31
- logger = logging.getLogger(__name__)
32
26
 
33
- DEFAULT_MODEL = 'gpt-4.1-mini'
34
- DEFAULT_SMALL_MODEL = 'gpt-4.1-nano'
35
-
36
-
37
- class OpenAIClient(LLMClient):
27
+ class OpenAIClient(BaseOpenAIClient):
38
28
  """
39
29
  OpenAIClient is a client class for interacting with OpenAI's language models.
40
30
 
41
- This class extends the LLMClient and provides methods to initialize the client,
42
- get an embedder, and generate responses from the language model.
31
+ This class extends the BaseOpenAIClient and provides OpenAI-specific implementation
32
+ for creating completions.
43
33
 
44
34
  Attributes:
45
35
  client (AsyncOpenAI): The OpenAI client used to interact with the API.
46
- model (str): The model name to use for generating responses.
47
- temperature (float): The temperature to use for generating responses.
48
- max_tokens (int): The maximum number of tokens to generate in a response.
49
-
50
- Methods:
51
- __init__(config: LLMConfig | None = None, cache: bool = False, client: typing.Any = None):
52
- Initializes the OpenAIClient with the provided configuration, cache setting, and client.
53
-
54
- _generate_response(messages: list[Message]) -> dict[str, typing.Any]:
55
- Generates a response from the language model based on the provided messages.
56
36
  """
57
37
 
58
- # Class-level constants
59
- MAX_RETRIES: ClassVar[int] = 2
60
-
61
38
  def __init__(
62
39
  self,
63
40
  config: LLMConfig | None = None,
64
41
  cache: bool = False,
65
42
  client: typing.Any = None,
66
43
  max_tokens: int = DEFAULT_MAX_TOKENS,
44
+ reasoning: str = DEFAULT_REASONING,
45
+ verbosity: str = DEFAULT_VERBOSITY,
67
46
  ):
68
47
  """
69
48
  Initialize the OpenAIClient with the provided configuration, cache setting, and client.
@@ -72,120 +51,65 @@ class OpenAIClient(LLMClient):
72
51
  config (LLMConfig | None): The configuration for the LLM client, including API key, model, base URL, temperature, and max tokens.
73
52
  cache (bool): Whether to use caching for responses. Defaults to False.
74
53
  client (Any | None): An optional async client instance to use. If not provided, a new AsyncOpenAI client is created.
75
-
76
54
  """
77
- # removed caching to simplify the `generate_response` override
78
- if cache:
79
- raise NotImplementedError('Caching is not implemented for OpenAI')
55
+ super().__init__(config, cache, max_tokens, reasoning, verbosity)
80
56
 
81
57
  if config is None:
82
58
  config = LLMConfig()
83
59
 
84
- super().__init__(config, cache)
85
-
86
60
  if client is None:
87
61
  self.client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
88
62
  else:
89
63
  self.client = client
90
64
 
91
- self.max_tokens = max_tokens
92
-
93
- async def _generate_response(
65
+ async def _create_structured_completion(
94
66
  self,
95
- messages: list[Message],
96
- response_model: type[BaseModel] | None = None,
97
- max_tokens: int = DEFAULT_MAX_TOKENS,
98
- model_size: ModelSize = ModelSize.medium,
99
- ) -> dict[str, typing.Any]:
100
- openai_messages: list[ChatCompletionMessageParam] = []
101
- for m in messages:
102
- m.content = self._clean_input(m.content)
103
- if m.role == 'user':
104
- openai_messages.append({'role': 'user', 'content': m.content})
105
- elif m.role == 'system':
106
- openai_messages.append({'role': 'system', 'content': m.content})
107
- try:
108
- if model_size == ModelSize.small:
109
- model = self.small_model or DEFAULT_SMALL_MODEL
110
- else:
111
- model = self.model or DEFAULT_MODEL
112
-
113
- response = await self.client.beta.chat.completions.parse(
114
- model=model,
115
- messages=openai_messages,
116
- temperature=self.temperature,
117
- max_tokens=max_tokens or self.max_tokens,
118
- response_format=response_model, # type: ignore
119
- )
120
-
121
- response_object = response.choices[0].message
122
-
123
- if response_object.parsed:
124
- return response_object.parsed.model_dump()
125
- elif response_object.refusal:
126
- raise RefusalError(response_object.refusal)
127
- else:
128
- raise Exception(f'Invalid response from LLM: {response_object.model_dump()}')
129
- except openai.LengthFinishReasonError as e:
130
- raise Exception(f'Output length exceeded max tokens {self.max_tokens}: {e}') from e
131
- except openai.RateLimitError as e:
132
- raise RateLimitError from e
133
- except Exception as e:
134
- logger.error(f'Error in generating LLM response: {e}')
135
- raise
136
-
137
- async def generate_response(
67
+ model: str,
68
+ messages: list[ChatCompletionMessageParam],
69
+ temperature: float | None,
70
+ max_tokens: int,
71
+ response_model: type[BaseModel],
72
+ reasoning: str | None = None,
73
+ verbosity: str | None = None,
74
+ ):
75
+ """Create a structured completion using OpenAI's beta parse API."""
76
+ # Reasoning models (gpt-5 family) don't support temperature
77
+ is_reasoning_model = (
78
+ model.startswith('gpt-5') or model.startswith('o1') or model.startswith('o3')
79
+ )
80
+
81
+ response = await self.client.responses.parse(
82
+ model=model,
83
+ input=messages, # type: ignore
84
+ temperature=temperature if not is_reasoning_model else None,
85
+ max_output_tokens=max_tokens,
86
+ text_format=response_model, # type: ignore
87
+ reasoning={'effort': reasoning} if reasoning is not None else None, # type: ignore
88
+ text={'verbosity': verbosity} if verbosity is not None else None, # type: ignore
89
+ )
90
+
91
+ return response
92
+
93
+ async def _create_completion(
138
94
  self,
139
- messages: list[Message],
95
+ model: str,
96
+ messages: list[ChatCompletionMessageParam],
97
+ temperature: float | None,
98
+ max_tokens: int,
140
99
  response_model: type[BaseModel] | None = None,
141
- max_tokens: int | None = None,
142
- model_size: ModelSize = ModelSize.medium,
143
- ) -> dict[str, typing.Any]:
144
- if max_tokens is None:
145
- max_tokens = self.max_tokens
146
-
147
- retry_count = 0
148
- last_error = None
149
-
150
- # Add multilingual extraction instructions
151
- messages[0].content += MULTILINGUAL_EXTRACTION_RESPONSES
152
-
153
- while retry_count <= self.MAX_RETRIES:
154
- try:
155
- response = await self._generate_response(
156
- messages, response_model, max_tokens, model_size
157
- )
158
- return response
159
- except (RateLimitError, RefusalError):
160
- # These errors should not trigger retries
161
- raise
162
- except (openai.APITimeoutError, openai.APIConnectionError, openai.InternalServerError):
163
- # Let OpenAI's client handle these retries
164
- raise
165
- except Exception as e:
166
- last_error = e
167
-
168
- # Don't retry if we've hit the max retries
169
- if retry_count >= self.MAX_RETRIES:
170
- logger.error(f'Max retries ({self.MAX_RETRIES}) exceeded. Last error: {e}')
171
- raise
172
-
173
- retry_count += 1
174
-
175
- # Construct a detailed error message for the LLM
176
- error_context = (
177
- f'The previous response attempt was invalid. '
178
- f'Error type: {e.__class__.__name__}. '
179
- f'Error details: {str(e)}. '
180
- f'Please try again with a valid response, ensuring the output matches '
181
- f'the expected format and constraints.'
182
- )
183
-
184
- error_message = Message(role='user', content=error_context)
185
- messages.append(error_message)
186
- logger.warning(
187
- f'Retrying after application error (attempt {retry_count}/{self.MAX_RETRIES}): {e}'
188
- )
189
-
190
- # If we somehow get here, raise the last error
191
- raise last_error or Exception('Max retries exceeded with no specific error')
100
+ reasoning: str | None = None,
101
+ verbosity: str | None = None,
102
+ ):
103
+ """Create a regular completion with JSON format."""
104
+ # Reasoning models (gpt-5 family) don't support temperature
105
+ is_reasoning_model = (
106
+ model.startswith('gpt-5') or model.startswith('o1') or model.startswith('o3')
107
+ )
108
+
109
+ return await self.client.chat.completions.create(
110
+ model=model,
111
+ messages=messages,
112
+ temperature=temperature if not is_reasoning_model else None,
113
+ max_tokens=max_tokens,
114
+ response_format={'type': 'json_object'},
115
+ )