graphiti-core 0.13.0__py3-none-any.whl → 0.13.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

@@ -14,60 +14,64 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- import json
18
17
  import logging
19
- from typing import Any
18
+ from typing import ClassVar
20
19
 
21
20
  from openai import AsyncAzureOpenAI
22
21
  from openai.types.chat import ChatCompletionMessageParam
23
22
  from pydantic import BaseModel
24
23
 
25
- from ..prompts.models import Message
26
- from .client import LLMClient
27
- from .config import LLMConfig, ModelSize
24
+ from .config import DEFAULT_MAX_TOKENS, LLMConfig
25
+ from .openai_base_client import BaseOpenAIClient
28
26
 
29
27
  logger = logging.getLogger(__name__)
30
28
 
31
29
 
32
- class AzureOpenAILLMClient(LLMClient):
30
+ class AzureOpenAILLMClient(BaseOpenAIClient):
33
31
  """Wrapper class for AsyncAzureOpenAI that implements the LLMClient interface."""
34
32
 
35
- def __init__(self, azure_client: AsyncAzureOpenAI, config: LLMConfig | None = None):
36
- super().__init__(config, cache=False)
37
- self.azure_client = azure_client
33
+ # Class-level constants
34
+ MAX_RETRIES: ClassVar[int] = 2
38
35
 
39
- async def _generate_response(
36
+ def __init__(
40
37
  self,
41
- messages: list[Message],
42
- response_model: type[BaseModel] | None = None,
43
- max_tokens: int = 1024,
44
- model_size: ModelSize = ModelSize.medium,
45
- ) -> dict[str, Any]:
46
- """Generate response using Azure OpenAI client."""
47
- # Convert messages to OpenAI format
48
- openai_messages: list[ChatCompletionMessageParam] = []
49
- for message in messages:
50
- message.content = self._clean_input(message.content)
51
- if message.role == 'user':
52
- openai_messages.append({'role': 'user', 'content': message.content})
53
- elif message.role == 'system':
54
- openai_messages.append({'role': 'system', 'content': message.content})
55
-
56
- # Ensure model is a string
57
- model_name = self.model if self.model else 'gpt-4o-mini'
38
+ azure_client: AsyncAzureOpenAI,
39
+ config: LLMConfig | None = None,
40
+ max_tokens: int = DEFAULT_MAX_TOKENS,
41
+ ):
42
+ super().__init__(config, cache=False, max_tokens=max_tokens)
43
+ self.client = azure_client
58
44
 
59
- try:
60
- response = await self.azure_client.chat.completions.create(
61
- model=model_name,
62
- messages=openai_messages,
63
- temperature=float(self.temperature) if self.temperature is not None else 0.7,
64
- max_tokens=max_tokens,
65
- response_format={'type': 'json_object'},
66
- )
67
- result = response.choices[0].message.content or '{}'
45
+ async def _create_structured_completion(
46
+ self,
47
+ model: str,
48
+ messages: list[ChatCompletionMessageParam],
49
+ temperature: float | None,
50
+ max_tokens: int,
51
+ response_model: type[BaseModel],
52
+ ):
53
+ """Create a structured completion using Azure OpenAI's beta parse API."""
54
+ return await self.client.beta.chat.completions.parse(
55
+ model=model,
56
+ messages=messages,
57
+ temperature=temperature,
58
+ max_tokens=max_tokens,
59
+ response_format=response_model, # type: ignore
60
+ )
68
61
 
69
- # Parse JSON response
70
- return json.loads(result)
71
- except Exception as e:
72
- logger.error(f'Error in Azure OpenAI LLM response: {e}')
73
- raise
62
+ async def _create_completion(
63
+ self,
64
+ model: str,
65
+ messages: list[ChatCompletionMessageParam],
66
+ temperature: float | None,
67
+ max_tokens: int,
68
+ response_model: type[BaseModel] | None = None,
69
+ ):
70
+ """Create a regular completion with JSON format using Azure OpenAI."""
71
+ return await self.client.chat.completions.create(
72
+ model=model,
73
+ messages=messages,
74
+ temperature=temperature,
75
+ max_tokens=max_tokens,
76
+ response_format={'type': 'json_object'},
77
+ )
@@ -0,0 +1,217 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ import json
18
+ import logging
19
+ import typing
20
+ from abc import abstractmethod
21
+ from typing import Any, ClassVar
22
+
23
+ import openai
24
+ from openai.types.chat import ChatCompletionMessageParam
25
+ from pydantic import BaseModel
26
+
27
+ from ..prompts.models import Message
28
+ from .client import MULTILINGUAL_EXTRACTION_RESPONSES, LLMClient
29
+ from .config import DEFAULT_MAX_TOKENS, LLMConfig, ModelSize
30
+ from .errors import RateLimitError, RefusalError
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+ DEFAULT_MODEL = 'gpt-4.1-mini'
35
+ DEFAULT_SMALL_MODEL = 'gpt-4.1-nano'
36
+
37
+
38
+ class BaseOpenAIClient(LLMClient):
39
+ """
40
+ Base client class for OpenAI-compatible APIs (OpenAI and Azure OpenAI).
41
+
42
+ This class contains shared logic for both OpenAI and Azure OpenAI clients,
43
+ reducing code duplication while allowing for implementation-specific differences.
44
+ """
45
+
46
+ # Class-level constants
47
+ MAX_RETRIES: ClassVar[int] = 2
48
+
49
+ def __init__(
50
+ self,
51
+ config: LLMConfig | None = None,
52
+ cache: bool = False,
53
+ max_tokens: int = DEFAULT_MAX_TOKENS,
54
+ ):
55
+ if cache:
56
+ raise NotImplementedError('Caching is not implemented for OpenAI-based clients')
57
+
58
+ if config is None:
59
+ config = LLMConfig()
60
+
61
+ super().__init__(config, cache)
62
+ self.max_tokens = max_tokens
63
+
64
+ @abstractmethod
65
+ async def _create_completion(
66
+ self,
67
+ model: str,
68
+ messages: list[ChatCompletionMessageParam],
69
+ temperature: float | None,
70
+ max_tokens: int,
71
+ response_model: type[BaseModel] | None = None,
72
+ ) -> Any:
73
+ """Create a completion using the specific client implementation."""
74
+ pass
75
+
76
+ @abstractmethod
77
+ async def _create_structured_completion(
78
+ self,
79
+ model: str,
80
+ messages: list[ChatCompletionMessageParam],
81
+ temperature: float | None,
82
+ max_tokens: int,
83
+ response_model: type[BaseModel],
84
+ ) -> Any:
85
+ """Create a structured completion using the specific client implementation."""
86
+ pass
87
+
88
+ def _convert_messages_to_openai_format(
89
+ self, messages: list[Message]
90
+ ) -> list[ChatCompletionMessageParam]:
91
+ """Convert internal Message format to OpenAI ChatCompletionMessageParam format."""
92
+ openai_messages: list[ChatCompletionMessageParam] = []
93
+ for m in messages:
94
+ m.content = self._clean_input(m.content)
95
+ if m.role == 'user':
96
+ openai_messages.append({'role': 'user', 'content': m.content})
97
+ elif m.role == 'system':
98
+ openai_messages.append({'role': 'system', 'content': m.content})
99
+ return openai_messages
100
+
101
+ def _get_model_for_size(self, model_size: ModelSize) -> str:
102
+ """Get the appropriate model name based on the requested size."""
103
+ if model_size == ModelSize.small:
104
+ return self.small_model or DEFAULT_SMALL_MODEL
105
+ else:
106
+ return self.model or DEFAULT_MODEL
107
+
108
+ def _handle_structured_response(self, response: Any) -> dict[str, Any]:
109
+ """Handle structured response parsing and validation."""
110
+ response_object = response.choices[0].message
111
+
112
+ if response_object.parsed:
113
+ return response_object.parsed.model_dump()
114
+ elif response_object.refusal:
115
+ raise RefusalError(response_object.refusal)
116
+ else:
117
+ raise Exception(f'Invalid response from LLM: {response_object.model_dump()}')
118
+
119
+ def _handle_json_response(self, response: Any) -> dict[str, Any]:
120
+ """Handle JSON response parsing."""
121
+ result = response.choices[0].message.content or '{}'
122
+ return json.loads(result)
123
+
124
+ async def _generate_response(
125
+ self,
126
+ messages: list[Message],
127
+ response_model: type[BaseModel] | None = None,
128
+ max_tokens: int = DEFAULT_MAX_TOKENS,
129
+ model_size: ModelSize = ModelSize.medium,
130
+ ) -> dict[str, Any]:
131
+ """Generate a response using the appropriate client implementation."""
132
+ openai_messages = self._convert_messages_to_openai_format(messages)
133
+ model = self._get_model_for_size(model_size)
134
+
135
+ try:
136
+ if response_model:
137
+ response = await self._create_structured_completion(
138
+ model=model,
139
+ messages=openai_messages,
140
+ temperature=self.temperature,
141
+ max_tokens=max_tokens or self.max_tokens,
142
+ response_model=response_model,
143
+ )
144
+ return self._handle_structured_response(response)
145
+ else:
146
+ response = await self._create_completion(
147
+ model=model,
148
+ messages=openai_messages,
149
+ temperature=self.temperature,
150
+ max_tokens=max_tokens or self.max_tokens,
151
+ )
152
+ return self._handle_json_response(response)
153
+
154
+ except openai.LengthFinishReasonError as e:
155
+ raise Exception(f'Output length exceeded max tokens {self.max_tokens}: {e}') from e
156
+ except openai.RateLimitError as e:
157
+ raise RateLimitError from e
158
+ except Exception as e:
159
+ logger.error(f'Error in generating LLM response: {e}')
160
+ raise
161
+
162
+ async def generate_response(
163
+ self,
164
+ messages: list[Message],
165
+ response_model: type[BaseModel] | None = None,
166
+ max_tokens: int | None = None,
167
+ model_size: ModelSize = ModelSize.medium,
168
+ ) -> dict[str, typing.Any]:
169
+ """Generate a response with retry logic and error handling."""
170
+ if max_tokens is None:
171
+ max_tokens = self.max_tokens
172
+
173
+ retry_count = 0
174
+ last_error = None
175
+
176
+ # Add multilingual extraction instructions
177
+ messages[0].content += MULTILINGUAL_EXTRACTION_RESPONSES
178
+
179
+ while retry_count <= self.MAX_RETRIES:
180
+ try:
181
+ response = await self._generate_response(
182
+ messages, response_model, max_tokens, model_size
183
+ )
184
+ return response
185
+ except (RateLimitError, RefusalError):
186
+ # These errors should not trigger retries
187
+ raise
188
+ except (openai.APITimeoutError, openai.APIConnectionError, openai.InternalServerError):
189
+ # Let OpenAI's client handle these retries
190
+ raise
191
+ except Exception as e:
192
+ last_error = e
193
+
194
+ # Don't retry if we've hit the max retries
195
+ if retry_count >= self.MAX_RETRIES:
196
+ logger.error(f'Max retries ({self.MAX_RETRIES}) exceeded. Last error: {e}')
197
+ raise
198
+
199
+ retry_count += 1
200
+
201
+ # Construct a detailed error message for the LLM
202
+ error_context = (
203
+ f'The previous response attempt was invalid. '
204
+ f'Error type: {e.__class__.__name__}. '
205
+ f'Error details: {str(e)}. '
206
+ f'Please try again with a valid response, ensuring the output matches '
207
+ f'the expected format and constraints.'
208
+ )
209
+
210
+ error_message = Message(role='user', content=error_context)
211
+ messages.append(error_message)
212
+ logger.warning(
213
+ f'Retrying after application error (attempt {retry_count}/{self.MAX_RETRIES}): {e}'
214
+ )
215
+
216
+ # If we somehow get here, raise the last error
217
+ raise last_error or Exception('Max retries exceeded with no specific error')
@@ -14,50 +14,27 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- import logging
18
17
  import typing
19
- from typing import ClassVar
20
18
 
21
- import openai
22
19
  from openai import AsyncOpenAI
23
20
  from openai.types.chat import ChatCompletionMessageParam
24
21
  from pydantic import BaseModel
25
22
 
26
- from ..prompts.models import Message
27
- from .client import MULTILINGUAL_EXTRACTION_RESPONSES, LLMClient
28
- from .config import DEFAULT_MAX_TOKENS, LLMConfig, ModelSize
29
- from .errors import RateLimitError, RefusalError
23
+ from .config import DEFAULT_MAX_TOKENS, LLMConfig
24
+ from .openai_base_client import BaseOpenAIClient
30
25
 
31
- logger = logging.getLogger(__name__)
32
26
 
33
- DEFAULT_MODEL = 'gpt-4.1-mini'
34
- DEFAULT_SMALL_MODEL = 'gpt-4.1-nano'
35
-
36
-
37
- class OpenAIClient(LLMClient):
27
+ class OpenAIClient(BaseOpenAIClient):
38
28
  """
39
29
  OpenAIClient is a client class for interacting with OpenAI's language models.
40
30
 
41
- This class extends the LLMClient and provides methods to initialize the client,
42
- get an embedder, and generate responses from the language model.
31
+ This class extends the BaseOpenAIClient and provides OpenAI-specific implementation
32
+ for creating completions.
43
33
 
44
34
  Attributes:
45
35
  client (AsyncOpenAI): The OpenAI client used to interact with the API.
46
- model (str): The model name to use for generating responses.
47
- temperature (float): The temperature to use for generating responses.
48
- max_tokens (int): The maximum number of tokens to generate in a response.
49
-
50
- Methods:
51
- __init__(config: LLMConfig | None = None, cache: bool = False, client: typing.Any = None):
52
- Initializes the OpenAIClient with the provided configuration, cache setting, and client.
53
-
54
- _generate_response(messages: list[Message]) -> dict[str, typing.Any]:
55
- Generates a response from the language model based on the provided messages.
56
36
  """
57
37
 
58
- # Class-level constants
59
- MAX_RETRIES: ClassVar[int] = 2
60
-
61
38
  def __init__(
62
39
  self,
63
40
  config: LLMConfig | None = None,
@@ -72,120 +49,47 @@ class OpenAIClient(LLMClient):
72
49
  config (LLMConfig | None): The configuration for the LLM client, including API key, model, base URL, temperature, and max tokens.
73
50
  cache (bool): Whether to use caching for responses. Defaults to False.
74
51
  client (Any | None): An optional async client instance to use. If not provided, a new AsyncOpenAI client is created.
75
-
76
52
  """
77
- # removed caching to simplify the `generate_response` override
78
- if cache:
79
- raise NotImplementedError('Caching is not implemented for OpenAI')
53
+ super().__init__(config, cache, max_tokens)
80
54
 
81
55
  if config is None:
82
56
  config = LLMConfig()
83
57
 
84
- super().__init__(config, cache)
85
-
86
58
  if client is None:
87
59
  self.client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
88
60
  else:
89
61
  self.client = client
90
62
 
91
- self.max_tokens = max_tokens
92
-
93
- async def _generate_response(
63
+ async def _create_structured_completion(
94
64
  self,
95
- messages: list[Message],
96
- response_model: type[BaseModel] | None = None,
97
- max_tokens: int = DEFAULT_MAX_TOKENS,
98
- model_size: ModelSize = ModelSize.medium,
99
- ) -> dict[str, typing.Any]:
100
- openai_messages: list[ChatCompletionMessageParam] = []
101
- for m in messages:
102
- m.content = self._clean_input(m.content)
103
- if m.role == 'user':
104
- openai_messages.append({'role': 'user', 'content': m.content})
105
- elif m.role == 'system':
106
- openai_messages.append({'role': 'system', 'content': m.content})
107
- try:
108
- if model_size == ModelSize.small:
109
- model = self.small_model or DEFAULT_SMALL_MODEL
110
- else:
111
- model = self.model or DEFAULT_MODEL
112
-
113
- response = await self.client.beta.chat.completions.parse(
114
- model=model,
115
- messages=openai_messages,
116
- temperature=self.temperature,
117
- max_tokens=max_tokens or self.max_tokens,
118
- response_format=response_model, # type: ignore
119
- )
120
-
121
- response_object = response.choices[0].message
122
-
123
- if response_object.parsed:
124
- return response_object.parsed.model_dump()
125
- elif response_object.refusal:
126
- raise RefusalError(response_object.refusal)
127
- else:
128
- raise Exception(f'Invalid response from LLM: {response_object.model_dump()}')
129
- except openai.LengthFinishReasonError as e:
130
- raise Exception(f'Output length exceeded max tokens {self.max_tokens}: {e}') from e
131
- except openai.RateLimitError as e:
132
- raise RateLimitError from e
133
- except Exception as e:
134
- logger.error(f'Error in generating LLM response: {e}')
135
- raise
136
-
137
- async def generate_response(
65
+ model: str,
66
+ messages: list[ChatCompletionMessageParam],
67
+ temperature: float | None,
68
+ max_tokens: int,
69
+ response_model: type[BaseModel],
70
+ ):
71
+ """Create a structured completion using OpenAI's beta parse API."""
72
+ return await self.client.beta.chat.completions.parse(
73
+ model=model,
74
+ messages=messages,
75
+ temperature=temperature,
76
+ max_tokens=max_tokens,
77
+ response_format=response_model, # type: ignore
78
+ )
79
+
80
+ async def _create_completion(
138
81
  self,
139
- messages: list[Message],
82
+ model: str,
83
+ messages: list[ChatCompletionMessageParam],
84
+ temperature: float | None,
85
+ max_tokens: int,
140
86
  response_model: type[BaseModel] | None = None,
141
- max_tokens: int | None = None,
142
- model_size: ModelSize = ModelSize.medium,
143
- ) -> dict[str, typing.Any]:
144
- if max_tokens is None:
145
- max_tokens = self.max_tokens
146
-
147
- retry_count = 0
148
- last_error = None
149
-
150
- # Add multilingual extraction instructions
151
- messages[0].content += MULTILINGUAL_EXTRACTION_RESPONSES
152
-
153
- while retry_count <= self.MAX_RETRIES:
154
- try:
155
- response = await self._generate_response(
156
- messages, response_model, max_tokens, model_size
157
- )
158
- return response
159
- except (RateLimitError, RefusalError):
160
- # These errors should not trigger retries
161
- raise
162
- except (openai.APITimeoutError, openai.APIConnectionError, openai.InternalServerError):
163
- # Let OpenAI's client handle these retries
164
- raise
165
- except Exception as e:
166
- last_error = e
167
-
168
- # Don't retry if we've hit the max retries
169
- if retry_count >= self.MAX_RETRIES:
170
- logger.error(f'Max retries ({self.MAX_RETRIES}) exceeded. Last error: {e}')
171
- raise
172
-
173
- retry_count += 1
174
-
175
- # Construct a detailed error message for the LLM
176
- error_context = (
177
- f'The previous response attempt was invalid. '
178
- f'Error type: {e.__class__.__name__}. '
179
- f'Error details: {str(e)}. '
180
- f'Please try again with a valid response, ensuring the output matches '
181
- f'the expected format and constraints.'
182
- )
183
-
184
- error_message = Message(role='user', content=error_context)
185
- messages.append(error_message)
186
- logger.warning(
187
- f'Retrying after application error (attempt {retry_count}/{self.MAX_RETRIES}): {e}'
188
- )
189
-
190
- # If we somehow get here, raise the last error
191
- raise last_error or Exception('Max retries exceeded with no specific error')
87
+ ):
88
+ """Create a regular completion with JSON format."""
89
+ return await self.client.chat.completions.create(
90
+ model=model,
91
+ messages=messages,
92
+ temperature=temperature,
93
+ max_tokens=max_tokens,
94
+ response_format={'type': 'json_object'},
95
+ )
@@ -141,6 +141,7 @@ def resolve_edge(context: dict[str, Any]) -> list[Message]:
141
141
 
142
142
  Task:
143
143
  If the NEW FACT represents the same factual information as any fact in EXISTING FACTS, return the idx of the duplicate fact.
144
+ Facts with similar information that contain key differences should not be marked as duplicates.
144
145
  If the NEW FACT is not a duplicate of any of the EXISTING FACTS, return -1.
145
146
 
146
147
  Given the predefined FACT TYPES, determine if the NEW FACT should be classified as one of these types.
@@ -152,6 +153,7 @@ def resolve_edge(context: dict[str, Any]) -> list[Message]:
152
153
 
153
154
  Guidelines:
154
155
  1. The facts do not need to be completely identical to be duplicates, they just need to express the same information.
156
+ 2. Some facts may be very similar but will have key differences, particularly around numeric values in the facts.
155
157
  """,
156
158
  ),
157
159
  ]
@@ -63,7 +63,7 @@ MAX_QUERY_LENGTH = 32
63
63
 
64
64
  def fulltext_query(query: str, group_ids: list[str] | None = None):
65
65
  group_ids_filter_list = (
66
- [f"group_id-'{lucene_sanitize(g)}'" for g in group_ids] if group_ids is not None else []
66
+ [f'group_id:"{lucene_sanitize(g)}"' for g in group_ids] if group_ids is not None else []
67
67
  )
68
68
  group_ids_filter = ''
69
69
  for f in group_ids_filter_list:
@@ -301,12 +301,12 @@ async def edge_bfs_search(
301
301
 
302
302
  query = (
303
303
  """
304
- UNWIND $bfs_origin_node_uuids AS origin_uuid
305
- MATCH path = (origin:Entity|Episodic {uuid: origin_uuid})-[:RELATES_TO|MENTIONS]->{1,3}(n:Entity)
306
- UNWIND relationships(path) AS rel
307
- MATCH (n:Entity)-[r:RELATES_TO]-(m:Entity)
308
- WHERE r.uuid = rel.uuid
309
- """
304
+ UNWIND $bfs_origin_node_uuids AS origin_uuid
305
+ MATCH path = (origin:Entity|Episodic {uuid: origin_uuid})-[:RELATES_TO|MENTIONS]->{1,3}(n:Entity)
306
+ UNWIND relationships(path) AS rel
307
+ MATCH (n:Entity)-[r:RELATES_TO]-(m:Entity)
308
+ WHERE r.uuid = rel.uuid
309
+ """
310
310
  + filter_query
311
311
  + """
312
312
  RETURN DISTINCT
@@ -455,10 +455,10 @@ async def node_bfs_search(
455
455
 
456
456
  query = (
457
457
  """
458
- UNWIND $bfs_origin_node_uuids AS origin_uuid
459
- MATCH (origin:Entity|Episodic {uuid: origin_uuid})-[:RELATES_TO|MENTIONS]->{1,3}(n:Entity)
460
- WHERE n.group_id = origin.group_id
461
- """
458
+ UNWIND $bfs_origin_node_uuids AS origin_uuid
459
+ MATCH (origin:Entity|Episodic {uuid: origin_uuid})-[:RELATES_TO|MENTIONS]->{1,3}(n:Entity)
460
+ WHERE n.group_id = origin.group_id
461
+ """
462
462
  + filter_query
463
463
  + ENTITY_NODE_RETURN
464
464
  + """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: graphiti-core
3
- Version: 0.13.0
3
+ Version: 0.13.2
4
4
  Summary: A temporal graph building library
5
5
  License: Apache-2.0
6
6
  Author: Paul Paliychuk
@@ -23,7 +23,7 @@ Requires-Dist: google-genai (>=1.8.0) ; extra == "google-genai"
23
23
  Requires-Dist: groq (>=0.2.0) ; extra == "groq"
24
24
  Requires-Dist: neo4j (>=5.26.0)
25
25
  Requires-Dist: numpy (>=1.0.0)
26
- Requires-Dist: openai (>=1.53.0)
26
+ Requires-Dist: openai (>=1.91.0)
27
27
  Requires-Dist: pydantic (>=2.11.5)
28
28
  Requires-Dist: python-dotenv (>=1.0.1)
29
29
  Requires-Dist: tenacity (>=9.0.0)
@@ -331,6 +331,55 @@ graphiti = Graphiti(
331
331
  # Now you can use Graphiti with Google Gemini
332
332
  ```
333
333
 
334
+ ## Using Graphiti with Ollama (Local LLM)
335
+
336
+ Graphiti supports Ollama for running local LLMs and embedding models via Ollama's OpenAI-compatible API. This is ideal for privacy-focused applications or when you want to avoid API costs.
337
+
338
+
339
+ Install the models:
340
+ ollama pull deepseek-r1:7b # LLM
341
+ ollama pull nomic-embed-text # embeddings
342
+
343
+ ```python
344
+ from graphiti_core import Graphiti
345
+ from graphiti_core.llm_client.config import LLMConfig
346
+ from graphiti_core.llm_client.openai_client import OpenAIClient
347
+ from graphiti_core.embedder.openai import OpenAIEmbedder, OpenAIEmbedderConfig
348
+ from graphiti_core.cross_encoder.openai_reranker_client import OpenAIRerankerClient
349
+
350
+ # Configure Ollama LLM client
351
+ llm_config = LLMConfig(
352
+ api_key="abc", # Ollama doesn't require a real API key
353
+ model="deepseek-r1:7b",
354
+ small_model="deepseek-r1:7b",
355
+ base_url="http://localhost:11434/v1", # Ollama provides this port
356
+ )
357
+
358
+ llm_client = OpenAIClient(config=llm_config)
359
+
360
+ # Initialize Graphiti with Ollama clients
361
+ graphiti = Graphiti(
362
+ "bolt://localhost:7687",
363
+ "neo4j",
364
+ "password",
365
+ llm_client=llm_client,
366
+ embedder=OpenAIEmbedder(
367
+ config=OpenAIEmbedderConfig(
368
+ api_key="abc",
369
+ embedding_model="nomic-embed-text",
370
+ embedding_dim=768,
371
+ base_url="http://localhost:11434/v1",
372
+ )
373
+ ),
374
+ cross_encoder=OpenAIRerankerClient(client=llm_client, config=llm_config),
375
+ )
376
+
377
+ # Now you can use Graphiti with local Ollama models
378
+ ```
379
+
380
+ Ensure Ollama is running (`ollama serve`) and that you have pulled the models you want to use.
381
+
382
+
334
383
  ## Documentation
335
384
 
336
385
  - [Guides and API documentation](https://help.getzep.com/graphiti).
@@ -21,13 +21,14 @@ graphiti_core/graphiti_types.py,sha256=rL-9bvnLobunJfXU4hkD6mAj14pofKp_wq8QsFDZw
21
21
  graphiti_core/helpers.py,sha256=0qmGnKxxYk27JGQbx6PlM7E6nRghUrEKBym0d3WSJY4,3875
22
22
  graphiti_core/llm_client/__init__.py,sha256=QgBWUiCeBp6YiA_xqyrDvJ9jIyy1hngH8g7FWahN3nw,776
23
23
  graphiti_core/llm_client/anthropic_client.py,sha256=392rtkH_I7yOJUlQvjoOnS8Lz14WBP8egQ3OfRH0nFs,12481
24
- graphiti_core/llm_client/azure_openai_client.py,sha256=B6EbNIktP9FBqiFrGunVQlego2e3C5zBAbcHI55Y-OY,2680
24
+ graphiti_core/llm_client/azure_openai_client.py,sha256=ekERggAekbb7enes1RJqdRChf_mjaZTFXsnMbxO7azQ,2497
25
25
  graphiti_core/llm_client/client.py,sha256=v_w5TBbDJYYADCXSs2r287g5Ami2Urma-GGEbHSI_Jg,5826
26
26
  graphiti_core/llm_client/config.py,sha256=90IgSBxZE_3nWdaEONVLUznI8lytPA7ZyexQz-_c55U,2560
27
27
  graphiti_core/llm_client/errors.py,sha256=pn6brRiLW60DAUIXJYKBT6MInrS4ueuH1hNLbn_JbQo,1243
28
28
  graphiti_core/llm_client/gemini_client.py,sha256=OdRAB2bWlXAi3gRmE1xVljYJ0T7JTZC82VK71wHyZi8,7722
29
29
  graphiti_core/llm_client/groq_client.py,sha256=k7zbXHfOpb4jhvvKFsccVYTq4yGGpxmY7xzNA02N2zk,2559
30
- graphiti_core/llm_client/openai_client.py,sha256=lLTZkd-PxEicTBmQefGoWLGTCb4QSU2Cq3x5W4kRYXg,7412
30
+ graphiti_core/llm_client/openai_base_client.py,sha256=gfMcKPyLrylz_ouRdoenDWXyitmgfFZ17Zthbkq3Qs4,8126
31
+ graphiti_core/llm_client/openai_client.py,sha256=ykBK94gxzE7iXux5rvOzVNA8q0Sqzq-8njPB75XcRe8,3240
31
32
  graphiti_core/llm_client/openai_generic_client.py,sha256=WElMnPqdb1CxzYH4p2-m_9rVMr5M93-eXnc3yVxBgFg,7001
32
33
  graphiti_core/llm_client/utils.py,sha256=zKpxXEbKa369m4W7RDEf-m56kH46V1Mx3RowcWZEWWs,1000
33
34
  graphiti_core/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -37,7 +38,7 @@ graphiti_core/models/nodes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
37
38
  graphiti_core/models/nodes/node_db_queries.py,sha256=AQgRGVO-GgFWfLq1G6k8s86WItwpXruy3Mj4DBli-vM,2145
38
39
  graphiti_core/nodes.py,sha256=kdJY-Ugyk6J2x70w4EF_EoFNgy7D3TMOMVSUfEth6rE,18665
39
40
  graphiti_core/prompts/__init__.py,sha256=EA-x9xUki9l8wnu2l8ek_oNf75-do5tq5hVq7Zbv8Kw,101
40
- graphiti_core/prompts/dedupe_edges.py,sha256=AFVC1EQ0TvNkSp0G7QZmIh3YpGg9FVXo1_sT3TlRqA8,5473
41
+ graphiti_core/prompts/dedupe_edges.py,sha256=-Fq8YlCPHOEnjJceSOy68dya3VIbmvMtcS8V9u9Tv6g,5699
41
42
  graphiti_core/prompts/dedupe_nodes.py,sha256=WdSnqu6O4TkEE_z1u2CEnNH0sWgBNDl4dUx20gSp464,7852
42
43
  graphiti_core/prompts/eval.py,sha256=gnBQTmwsCl3Qvwpcm7aieVszzo6y1sMCUT8jQiKTvvE,5317
43
44
  graphiti_core/prompts/extract_edge_dates.py,sha256=3Drs3CmvP0gJN5BidWSxrNvLet3HPoTybU3BUIAoc0Y,4218
@@ -55,7 +56,7 @@ graphiti_core/search/search_config.py,sha256=VvKg6AB_RPhoe56DBBXHRBXHThAVJ_OLFCy
55
56
  graphiti_core/search/search_config_recipes.py,sha256=4GquRphHhJlpXQhAZOySYnCzBWYoTwxlJj44eTOavZQ,7443
56
57
  graphiti_core/search/search_filters.py,sha256=jG30nMWX03xoT9ohgyHNu_Xes8GwjIF2eTv6QaiWMqw,6466
57
58
  graphiti_core/search/search_helpers.py,sha256=G5Ceaq5Pfgx0Weelqgeylp_pUHwiBnINaUYsDbURJbE,2636
58
- graphiti_core/search/search_utils.py,sha256=k9KKN4sYde0Hqw9BKb5T-8q-3hInIPwq9aYoGfheq6E,34877
59
+ graphiti_core/search/search_utils.py,sha256=q-FMbSFf7mPXWdgnKQDKQIACrHI8NwqknGDnMy4dJzs,34957
59
60
  graphiti_core/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
60
61
  graphiti_core/utils/bulk_utils.py,sha256=RPPTAqBRg6iR1T6g6TDpfUGvkYmTEyVNrVPz_y91f-s,16196
61
62
  graphiti_core/utils/datetime_utils.py,sha256=Ti-2tnrDFRzBsbfblzsHybsM3jaDLP4-VT2t0VhpIzU,1357
@@ -67,7 +68,7 @@ graphiti_core/utils/maintenance/node_operations.py,sha256=-PC1N-Hf2GElhLcTTSjTaC
67
68
  graphiti_core/utils/maintenance/temporal_operations.py,sha256=mJkw9xLB4W2BsLfC5POr0r-PHWL9SIfNj_l_xu0B5ug,3410
68
69
  graphiti_core/utils/maintenance/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
69
70
  graphiti_core/utils/ontology_utils/entity_types_utils.py,sha256=QJX5cG0GSSNF_Mm_yrldr69wjVAbN_MxLhOSznz85Hk,1279
70
- graphiti_core-0.13.0.dist-info/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
71
- graphiti_core-0.13.0.dist-info/METADATA,sha256=tuU6XGD8_1ReBZw_SXoCfy9ejUP3E82jpXMMz-QvwHY,15641
72
- graphiti_core-0.13.0.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
73
- graphiti_core-0.13.0.dist-info/RECORD,,
71
+ graphiti_core-0.13.2.dist-info/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
72
+ graphiti_core-0.13.2.dist-info/METADATA,sha256=nC4MmJ04hSsPebERWE2oaerz5j-0F2hFBE8sAYV9Wzo,17229
73
+ graphiti_core-0.13.2.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
74
+ graphiti_core-0.13.2.dist-info/RECORD,,