graphiti-core 0.14.0__py3-none-any.whl → 0.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

@@ -15,6 +15,7 @@ limitations under the License.
15
15
  """
16
16
 
17
17
  from .client import CrossEncoderClient
18
+ from .gemini_reranker_client import GeminiRerankerClient
18
19
  from .openai_reranker_client import OpenAIRerankerClient
19
20
 
20
- __all__ = ['CrossEncoderClient', 'OpenAIRerankerClient']
21
+ __all__ = ['CrossEncoderClient', 'GeminiRerankerClient', 'OpenAIRerankerClient']
@@ -0,0 +1,146 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ import logging
18
+ import re
19
+
20
+ from google import genai # type: ignore
21
+ from google.genai import types # type: ignore
22
+
23
+ from ..helpers import semaphore_gather
24
+ from ..llm_client import LLMConfig, RateLimitError
25
+ from .client import CrossEncoderClient
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ DEFAULT_MODEL = 'gemini-2.5-flash-lite-preview-06-17'
30
+
31
+
32
+ class GeminiRerankerClient(CrossEncoderClient):
33
+ def __init__(
34
+ self,
35
+ config: LLMConfig | None = None,
36
+ client: genai.Client | None = None,
37
+ ):
38
+ """
39
+ Initialize the GeminiRerankerClient with the provided configuration and client.
40
+
41
+ The Gemini Developer API does not yet support logprobs. Unlike the OpenAI reranker,
42
+ this reranker uses the Gemini API to perform direct relevance scoring of passages.
43
+ Each passage is scored individually on a 0-100 scale.
44
+
45
+ Args:
46
+ config (LLMConfig | None): The configuration for the LLM client, including API key, model, base URL, temperature, and max tokens.
47
+ client (genai.Client | None): An optional async client instance to use. If not provided, a new genai.Client is created.
48
+ """
49
+ if config is None:
50
+ config = LLMConfig()
51
+
52
+ self.config = config
53
+ if client is None:
54
+ self.client = genai.Client(api_key=config.api_key)
55
+ else:
56
+ self.client = client
57
+
58
+ async def rank(self, query: str, passages: list[str]) -> list[tuple[str, float]]:
59
+ """
60
+ Rank passages based on their relevance to the query using direct scoring.
61
+
62
+ Each passage is scored individually on a 0-100 scale, then normalized to [0,1].
63
+ """
64
+ if len(passages) <= 1:
65
+ return [(passage, 1.0) for passage in passages]
66
+
67
+ # Generate scoring prompts for each passage
68
+ scoring_prompts = []
69
+ for passage in passages:
70
+ prompt = f"""Rate how well this passage answers or relates to the query. Use a scale from 0 to 100.
71
+
72
+ Query: {query}
73
+
74
+ Passage: {passage}
75
+
76
+ Provide only a number between 0 and 100 (no explanation, just the number):"""
77
+
78
+ scoring_prompts.append(
79
+ [
80
+ types.Content(
81
+ role='user',
82
+ parts=[types.Part.from_text(text=prompt)],
83
+ ),
84
+ ]
85
+ )
86
+
87
+ try:
88
+ # Execute all scoring requests concurrently - O(n) API calls
89
+ responses = await semaphore_gather(
90
+ *[
91
+ self.client.aio.models.generate_content(
92
+ model=self.config.model or DEFAULT_MODEL,
93
+ contents=prompt_messages, # type: ignore
94
+ config=types.GenerateContentConfig(
95
+ system_instruction='You are an expert at rating passage relevance. Respond with only a number from 0-100.',
96
+ temperature=0.0,
97
+ max_output_tokens=3,
98
+ ),
99
+ )
100
+ for prompt_messages in scoring_prompts
101
+ ]
102
+ )
103
+
104
+ # Extract scores and create results
105
+ results = []
106
+ for passage, response in zip(passages, responses, strict=True):
107
+ try:
108
+ if hasattr(response, 'text') and response.text:
109
+ # Extract numeric score from response
110
+ score_text = response.text.strip()
111
+ # Handle cases where model might return non-numeric text
112
+ score_match = re.search(r'\b(\d{1,3})\b', score_text)
113
+ if score_match:
114
+ score = float(score_match.group(1))
115
+ # Normalize to [0, 1] range and clamp to valid range
116
+ normalized_score = max(0.0, min(1.0, score / 100.0))
117
+ results.append((passage, normalized_score))
118
+ else:
119
+ logger.warning(
120
+ f'Could not extract numeric score from response: {score_text}'
121
+ )
122
+ results.append((passage, 0.0))
123
+ else:
124
+ logger.warning('Empty response from Gemini for passage scoring')
125
+ results.append((passage, 0.0))
126
+ except (ValueError, AttributeError) as e:
127
+ logger.warning(f'Error parsing score from Gemini response: {e}')
128
+ results.append((passage, 0.0))
129
+
130
+ # Sort by score in descending order (highest relevance first)
131
+ results.sort(reverse=True, key=lambda x: x[1])
132
+ return results
133
+
134
+ except Exception as e:
135
+ # Check if it's a rate limit error based on Gemini API error codes
136
+ error_message = str(e).lower()
137
+ if (
138
+ 'rate limit' in error_message
139
+ or 'quota' in error_message
140
+ or 'resource_exhausted' in error_message
141
+ or '429' in str(e)
142
+ ):
143
+ raise RateLimitError from e
144
+
145
+ logger.error(f'Error in generating LLM response: {e}')
146
+ raise
@@ -14,4 +14,7 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- __all__ = ['GraphDriver', 'Neo4jDriver', 'FalkorDriver']
17
+ from falkordb import FalkorDB
18
+ from neo4j import Neo4jDriver
19
+
20
+ __all__ = ['Neo4jDriver', 'FalkorDB']
@@ -15,7 +15,6 @@ limitations under the License.
15
15
  """
16
16
 
17
17
  import logging
18
- from collections.abc import Coroutine
19
18
  from datetime import datetime
20
19
  from typing import Any
21
20
 
@@ -52,11 +51,11 @@ class FalkorDriverSession(GraphDriverSession):
52
51
  if isinstance(query, list):
53
52
  for cypher, params in query:
54
53
  params = convert_datetimes_to_strings(params)
55
- await self.graph.query(str(cypher), params)
54
+ await self.graph.query(str(cypher), params) # type: ignore[reportUnknownArgumentType]
56
55
  else:
57
56
  params = dict(kwargs)
58
57
  params = convert_datetimes_to_strings(params)
59
- await self.graph.query(str(query), params)
58
+ await self.graph.query(str(query), params) # type: ignore[reportUnknownArgumentType]
60
59
  # Assuming `graph.query` is async (ideal); otherwise, wrap in executor
61
60
  return None
62
61
 
@@ -66,22 +65,30 @@ class FalkorDriver(GraphDriver):
66
65
 
67
66
  def __init__(
68
67
  self,
69
- uri: str,
70
- user: str,
71
- password: str,
68
+ host: str = 'localhost',
69
+ port: int = 6379,
70
+ username: str | None = None,
71
+ password: str | None = None,
72
+ falkor_db: FalkorDB | None = None,
72
73
  ):
73
- super().__init__()
74
- uri_parts = uri.split('://', 1)
75
- uri = f'{uri_parts[0]}://{user}:{password}@{uri_parts[1]}'
74
+ """
75
+ Initialize the FalkorDB driver.
76
76
 
77
- self.client = FalkorDB(
78
- host='your-db.falkor.cloud', port=6380, password='your_password', ssl=True
79
- )
77
+ FalkorDB is a multi-tenant graph database.
78
+ To connect, provide the host and port.
79
+ The default parameters assume a local (on-premises) FalkorDB instance.
80
+ """
81
+ super().__init__()
82
+ if falkor_db is not None:
83
+ # If a FalkorDB instance is provided, use it directly
84
+ self.client = falkor_db
85
+ else:
86
+ self.client = FalkorDB(host=host, port=port, username=username, password=password)
80
87
 
81
88
  def _get_graph(self, graph_name: str | None) -> FalkorGraph:
82
- # FalkorDB requires a non-None database name for multi-tenant graphs; the default is "DEFAULT_DATABASE"
89
+ # FalkorDB requires a non-None database name for multi-tenant graphs; the default is DEFAULT_DATABASE
83
90
  if graph_name is None:
84
- graph_name = 'DEFAULT_DATABASE'
91
+ graph_name = DEFAULT_DATABASE
85
92
  return self.client.select_graph(graph_name)
86
93
 
87
94
  async def execute_query(self, cypher_query_, **kwargs: Any):
@@ -92,7 +99,7 @@ class FalkorDriver(GraphDriver):
92
99
  params = convert_datetimes_to_strings(dict(kwargs))
93
100
 
94
101
  try:
95
- result = await graph.query(cypher_query_, params)
102
+ result = await graph.query(cypher_query_, params) # type: ignore[reportUnknownArgumentType]
96
103
  except Exception as e:
97
104
  if 'already indexed' in str(e):
98
105
  # check if index already exists
@@ -102,17 +109,36 @@ class FalkorDriver(GraphDriver):
102
109
  raise
103
110
 
104
111
  # Convert the result header to a list of strings
105
- header = [h[1].decode('utf-8') for h in result.header]
106
- return result.result_set, header, None
112
+ header = [h[1] for h in result.header]
113
+
114
+ # Convert FalkorDB's result format (list of lists) to the format expected by Graphiti (list of dicts)
115
+ records = []
116
+ for row in result.result_set:
117
+ record = {}
118
+ for i, field_name in enumerate(header):
119
+ if i < len(row):
120
+ record[field_name] = row[i]
121
+ else:
122
+ # If there are more fields in header than values in row, set to None
123
+ record[field_name] = None
124
+ records.append(record)
125
+
126
+ return records, header, None
107
127
 
108
128
  def session(self, database: str | None) -> GraphDriverSession:
109
129
  return FalkorDriverSession(self._get_graph(database))
110
130
 
111
131
  async def close(self) -> None:
112
- await self.client.connection.close()
113
-
114
- async def delete_all_indexes(self, database_: str = DEFAULT_DATABASE) -> Coroutine:
115
- return self.execute_query(
132
+ """Close the driver connection."""
133
+ if hasattr(self.client, 'aclose'):
134
+ await self.client.aclose() # type: ignore[reportUnknownMemberType]
135
+ elif hasattr(self.client.connection, 'aclose'):
136
+ await self.client.connection.aclose()
137
+ elif hasattr(self.client.connection, 'close'):
138
+ await self.client.connection.close()
139
+
140
+ async def delete_all_indexes(self, database_: str = DEFAULT_DATABASE) -> None:
141
+ await self.execute_query(
116
142
  'CALL db.indexes() YIELD name DROP INDEX name',
117
143
  database_=database_,
118
144
  )
@@ -18,7 +18,7 @@ import logging
18
18
  from collections.abc import Coroutine
19
19
  from typing import Any
20
20
 
21
- from neo4j import AsyncGraphDatabase
21
+ from neo4j import AsyncGraphDatabase, EagerResult
22
22
  from typing_extensions import LiteralString
23
23
 
24
24
  from graphiti_core.driver.driver import GraphDriver, GraphDriverSession
@@ -42,7 +42,7 @@ class Neo4jDriver(GraphDriver):
42
42
  auth=(user or '', password or ''),
43
43
  )
44
44
 
45
- async def execute_query(self, cypher_query_: LiteralString, **kwargs: Any) -> Coroutine:
45
+ async def execute_query(self, cypher_query_: LiteralString, **kwargs: Any) -> EagerResult:
46
46
  params = kwargs.pop('params', None)
47
47
  result = await self.client.execute_query(cypher_query_, parameters_=params, **kwargs)
48
48
 
@@ -54,7 +54,9 @@ class Neo4jDriver(GraphDriver):
54
54
  async def close(self) -> None:
55
55
  return await self.client.close()
56
56
 
57
- def delete_all_indexes(self, database_: str = DEFAULT_DATABASE) -> Coroutine:
57
+ def delete_all_indexes(
58
+ self, database_: str = DEFAULT_DATABASE
59
+ ) -> Coroutine[Any, Any, EagerResult]:
58
60
  return self.client.execute_query(
59
61
  'CALL db.indexes() YIELD name DROP INDEX name',
60
62
  database_=database_,
@@ -38,7 +38,7 @@ class VoyageAIEmbedder(EmbedderClient):
38
38
  if config is None:
39
39
  config = VoyageAIEmbedderConfig()
40
40
  self.config = config
41
- self.client = voyageai.AsyncClient(api_key=config.api_key)
41
+ self.client = voyageai.AsyncClient(api_key=config.api_key) # type: ignore[reportUnknownMemberType]
42
42
 
43
43
  async def create(
44
44
  self, input_data: str | list[str] | Iterable[int] | Iterable[Iterable[int]]
graphiti_core/graphiti.py CHANGED
@@ -101,7 +101,7 @@ class AddEpisodeResults(BaseModel):
101
101
  class Graphiti:
102
102
  def __init__(
103
103
  self,
104
- uri: str,
104
+ uri: str | None = None,
105
105
  user: str | None = None,
106
106
  password: str | None = None,
107
107
  llm_client: LLMClient | None = None,
@@ -162,7 +162,12 @@ class Graphiti:
162
162
  Graphiti if you're using the default OpenAIClient.
163
163
  """
164
164
 
165
- self.driver = graph_driver if graph_driver else Neo4jDriver(uri, user, password)
165
+ if graph_driver:
166
+ self.driver = graph_driver
167
+ else:
168
+ if uri is None:
169
+ raise ValueError("uri must be provided when graph_driver is None")
170
+ self.driver = Neo4jDriver(uri, user, password)
166
171
 
167
172
  self.database = DEFAULT_DATABASE
168
173
  self.store_raw_episode_content = store_raw_episode_content
graphiti_core/helpers.py CHANGED
@@ -19,6 +19,7 @@ import os
19
19
  import re
20
20
  from collections.abc import Coroutine
21
21
  from datetime import datetime
22
+ from typing import Any
22
23
 
23
24
  import numpy as np
24
25
  from dotenv import load_dotenv
@@ -31,7 +32,7 @@ from graphiti_core.errors import GroupIdValidationError
31
32
 
32
33
  load_dotenv()
33
34
 
34
- DEFAULT_DATABASE = os.getenv('DEFAULT_DATABASE', 'neo4j')
35
+ DEFAULT_DATABASE = os.getenv('DEFAULT_DATABASE', 'default_db')
35
36
  USE_PARALLEL_RUNTIME = bool(os.getenv('USE_PARALLEL_RUNTIME', False))
36
37
  SEMAPHORE_LIMIT = int(os.getenv('SEMAPHORE_LIMIT', 20))
37
38
  MAX_REFLEXION_ITERATIONS = int(os.getenv('MAX_REFLEXION_ITERATIONS', 0))
@@ -99,7 +100,7 @@ def normalize_l2(embedding: list[float]) -> NDArray:
99
100
  async def semaphore_gather(
100
101
  *coroutines: Coroutine,
101
102
  max_coroutines: int | None = None,
102
- ):
103
+ ) -> list[Any]:
103
104
  semaphore = asyncio.Semaphore(max_coroutines or SEMAPHORE_LIMIT)
104
105
 
105
106
  async def _wrap_coroutine(coroutine):
@@ -17,19 +17,21 @@ limitations under the License.
17
17
  import json
18
18
  import logging
19
19
  import typing
20
+ from typing import ClassVar
20
21
 
21
22
  from google import genai # type: ignore
22
23
  from google.genai import types # type: ignore
23
24
  from pydantic import BaseModel
24
25
 
25
26
  from ..prompts.models import Message
26
- from .client import LLMClient
27
+ from .client import MULTILINGUAL_EXTRACTION_RESPONSES, LLMClient
27
28
  from .config import DEFAULT_MAX_TOKENS, LLMConfig, ModelSize
28
29
  from .errors import RateLimitError
29
30
 
30
31
  logger = logging.getLogger(__name__)
31
32
 
32
- DEFAULT_MODEL = 'gemini-2.0-flash'
33
+ DEFAULT_MODEL = 'gemini-2.5-flash'
34
+ DEFAULT_SMALL_MODEL = 'models/gemini-2.5-flash-lite-preview-06-17'
33
35
 
34
36
 
35
37
  class GeminiClient(LLMClient):
@@ -43,27 +45,34 @@ class GeminiClient(LLMClient):
43
45
  model (str): The model name to use for generating responses.
44
46
  temperature (float): The temperature to use for generating responses.
45
47
  max_tokens (int): The maximum number of tokens to generate in a response.
46
-
48
+ thinking_config (types.ThinkingConfig | None): Optional thinking configuration for models that support it.
47
49
  Methods:
48
- __init__(config: LLMConfig | None = None, cache: bool = False):
49
- Initializes the GeminiClient with the provided configuration and cache setting.
50
+ __init__(config: LLMConfig | None = None, cache: bool = False, thinking_config: types.ThinkingConfig | None = None):
51
+ Initializes the GeminiClient with the provided configuration, cache setting, and optional thinking config.
50
52
 
51
53
  _generate_response(messages: list[Message]) -> dict[str, typing.Any]:
52
54
  Generates a response from the language model based on the provided messages.
53
55
  """
54
56
 
57
+ # Class-level constants
58
+ MAX_RETRIES: ClassVar[int] = 2
59
+
55
60
  def __init__(
56
61
  self,
57
62
  config: LLMConfig | None = None,
58
63
  cache: bool = False,
59
64
  max_tokens: int = DEFAULT_MAX_TOKENS,
65
+ thinking_config: types.ThinkingConfig | None = None,
60
66
  ):
61
67
  """
62
- Initialize the GeminiClient with the provided configuration and cache setting.
68
+ Initialize the GeminiClient with the provided configuration, cache setting, and optional thinking config.
63
69
 
64
70
  Args:
65
71
  config (LLMConfig | None): The configuration for the LLM client, including API key, model, temperature, and max tokens.
66
72
  cache (bool): Whether to use caching for responses. Defaults to False.
73
+ thinking_config (types.ThinkingConfig | None): Optional thinking configuration for models that support it.
74
+ Only use with models that support thinking (gemini-2.5+). Defaults to None.
75
+
67
76
  """
68
77
  if config is None:
69
78
  config = LLMConfig()
@@ -76,6 +85,50 @@ class GeminiClient(LLMClient):
76
85
  api_key=config.api_key,
77
86
  )
78
87
  self.max_tokens = max_tokens
88
+ self.thinking_config = thinking_config
89
+
90
+ def _check_safety_blocks(self, response) -> None:
91
+ """Check if response was blocked for safety reasons and raise appropriate exceptions."""
92
+ # Check if the response was blocked for safety reasons
93
+ if not (hasattr(response, 'candidates') and response.candidates):
94
+ return
95
+
96
+ candidate = response.candidates[0]
97
+ if not (hasattr(candidate, 'finish_reason') and candidate.finish_reason == 'SAFETY'):
98
+ return
99
+
100
+ # Content was blocked for safety reasons - collect safety details
101
+ safety_info = []
102
+ safety_ratings = getattr(candidate, 'safety_ratings', None)
103
+
104
+ if safety_ratings:
105
+ for rating in safety_ratings:
106
+ if getattr(rating, 'blocked', False):
107
+ category = getattr(rating, 'category', 'Unknown')
108
+ probability = getattr(rating, 'probability', 'Unknown')
109
+ safety_info.append(f'{category}: {probability}')
110
+
111
+ safety_details = (
112
+ ', '.join(safety_info) if safety_info else 'Content blocked for safety reasons'
113
+ )
114
+ raise Exception(f'Response blocked by Gemini safety filters: {safety_details}')
115
+
116
+ def _check_prompt_blocks(self, response) -> None:
117
+ """Check if prompt was blocked and raise appropriate exceptions."""
118
+ prompt_feedback = getattr(response, 'prompt_feedback', None)
119
+ if not prompt_feedback:
120
+ return
121
+
122
+ block_reason = getattr(prompt_feedback, 'block_reason', None)
123
+ if block_reason:
124
+ raise Exception(f'Prompt blocked by Gemini: {block_reason}')
125
+
126
+ def _get_model_for_size(self, model_size: ModelSize) -> str:
127
+ """Get the appropriate model name based on the requested size."""
128
+ if model_size == ModelSize.small:
129
+ return self.small_model or DEFAULT_SMALL_MODEL
130
+ else:
131
+ return self.model or DEFAULT_MODEL
79
132
 
80
133
  async def _generate_response(
81
134
  self,
@@ -91,17 +144,17 @@ class GeminiClient(LLMClient):
91
144
  messages (list[Message]): A list of messages to send to the language model.
92
145
  response_model (type[BaseModel] | None): An optional Pydantic model to parse the response into.
93
146
  max_tokens (int): The maximum number of tokens to generate in the response.
147
+ model_size (ModelSize): The size of the model to use (small or medium).
94
148
 
95
149
  Returns:
96
150
  dict[str, typing.Any]: The response from the language model.
97
151
 
98
152
  Raises:
99
153
  RateLimitError: If the API rate limit is exceeded.
100
- RefusalError: If the content is blocked by the model.
101
- Exception: If there is an error generating the response.
154
+ Exception: If there is an error generating the response or content is blocked.
102
155
  """
103
156
  try:
104
- gemini_messages: list[types.Content] = []
157
+ gemini_messages: typing.Any = []
105
158
  # If a response model is provided, add schema for structured output
106
159
  system_prompt = ''
107
160
  if response_model is not None:
@@ -127,6 +180,9 @@ class GeminiClient(LLMClient):
127
180
  types.Content(role=m.role, parts=[types.Part.from_text(text=m.content)])
128
181
  )
129
182
 
183
+ # Get the appropriate model for the requested size
184
+ model = self._get_model_for_size(model_size)
185
+
130
186
  # Create generation config
131
187
  generation_config = types.GenerateContentConfig(
132
188
  temperature=self.temperature,
@@ -134,15 +190,20 @@ class GeminiClient(LLMClient):
134
190
  response_mime_type='application/json' if response_model else None,
135
191
  response_schema=response_model if response_model else None,
136
192
  system_instruction=system_prompt,
193
+ thinking_config=self.thinking_config,
137
194
  )
138
195
 
139
196
  # Generate content using the simple string approach
140
197
  response = await self.client.aio.models.generate_content(
141
- model=self.model or DEFAULT_MODEL,
142
- contents=gemini_messages, # type: ignore[arg-type] # mypy fails on broad union type
198
+ model=model,
199
+ contents=gemini_messages,
143
200
  config=generation_config,
144
201
  )
145
202
 
203
+ # Check for safety and prompt blocks
204
+ self._check_safety_blocks(response)
205
+ self._check_prompt_blocks(response)
206
+
146
207
  # If this was a structured output request, parse the response into the Pydantic model
147
208
  if response_model is not None:
148
209
  try:
@@ -160,9 +221,16 @@ class GeminiClient(LLMClient):
160
221
  return {'content': response.text}
161
222
 
162
223
  except Exception as e:
163
- # Check if it's a rate limit error
164
- if 'rate limit' in str(e).lower() or 'quota' in str(e).lower():
224
+ # Check if it's a rate limit error based on Gemini API error codes
225
+ error_message = str(e).lower()
226
+ if (
227
+ 'rate limit' in error_message
228
+ or 'quota' in error_message
229
+ or 'resource_exhausted' in error_message
230
+ or '429' in str(e)
231
+ ):
165
232
  raise RateLimitError from e
233
+
166
234
  logger.error(f'Error in generating LLM response: {e}')
167
235
  raise
168
236
 
@@ -174,13 +242,14 @@ class GeminiClient(LLMClient):
174
242
  model_size: ModelSize = ModelSize.medium,
175
243
  ) -> dict[str, typing.Any]:
176
244
  """
177
- Generate a response from the Gemini language model.
178
- This method overrides the parent class method to provide a direct implementation.
245
+ Generate a response from the Gemini language model with retry logic and error handling.
246
+ This method overrides the parent class method to provide a direct implementation with advanced retry logic.
179
247
 
180
248
  Args:
181
249
  messages (list[Message]): A list of messages to send to the language model.
182
250
  response_model (type[BaseModel] | None): An optional Pydantic model to parse the response into.
183
- max_tokens (int): The maximum number of tokens to generate in the response.
251
+ max_tokens (int | None): The maximum number of tokens to generate in the response.
252
+ model_size (ModelSize): The size of the model to use (small or medium).
184
253
 
185
254
  Returns:
186
255
  dict[str, typing.Any]: The response from the language model.
@@ -188,10 +257,53 @@ class GeminiClient(LLMClient):
188
257
  if max_tokens is None:
189
258
  max_tokens = self.max_tokens
190
259
 
191
- # Call the internal _generate_response method
192
- return await self._generate_response(
193
- messages=messages,
194
- response_model=response_model,
195
- max_tokens=max_tokens,
196
- model_size=model_size,
197
- )
260
+ retry_count = 0
261
+ last_error = None
262
+
263
+ # Add multilingual extraction instructions
264
+ messages[0].content += MULTILINGUAL_EXTRACTION_RESPONSES
265
+
266
+ while retry_count <= self.MAX_RETRIES:
267
+ try:
268
+ response = await self._generate_response(
269
+ messages=messages,
270
+ response_model=response_model,
271
+ max_tokens=max_tokens,
272
+ model_size=model_size,
273
+ )
274
+ return response
275
+ except RateLimitError:
276
+ # Rate limit errors should not trigger retries (fail fast)
277
+ raise
278
+ except Exception as e:
279
+ last_error = e
280
+
281
+ # Check if this is a safety block - these typically shouldn't be retried
282
+ if 'safety' in str(e).lower() or 'blocked' in str(e).lower():
283
+ logger.warning(f'Content blocked by safety filters: {e}')
284
+ raise
285
+
286
+ # Don't retry if we've hit the max retries
287
+ if retry_count >= self.MAX_RETRIES:
288
+ logger.error(f'Max retries ({self.MAX_RETRIES}) exceeded. Last error: {e}')
289
+ raise
290
+
291
+ retry_count += 1
292
+
293
+ # Construct a detailed error message for the LLM
294
+ error_context = (
295
+ f'The previous response attempt was invalid. '
296
+ f'Error type: {e.__class__.__name__}. '
297
+ f'Error details: {str(e)}. '
298
+ f'Please try again with a valid response, ensuring the output matches '
299
+ f'the expected format and constraints.'
300
+ )
301
+
302
+ error_message = Message(role='user', content=error_context)
303
+ messages.append(error_message)
304
+ logger.warning(
305
+ f'Retrying after application error (attempt {retry_count}/{self.MAX_RETRIES}): {e}'
306
+ )
307
+
308
+ # If we somehow get here, raise the last error
309
+ raise last_error or Exception('Max retries exceeded with no specific error')
graphiti_core/nodes.py CHANGED
@@ -540,10 +540,18 @@ class CommunityNode(Node):
540
540
 
541
541
  # Node helpers
542
542
  def get_episodic_node_from_record(record: Any) -> EpisodicNode:
543
+ created_at = parse_db_date(record['created_at'])
544
+ valid_at = parse_db_date(record['valid_at'])
545
+
546
+ if created_at is None:
547
+ raise ValueError(f"created_at cannot be None for episode {record.get('uuid', 'unknown')}")
548
+ if valid_at is None:
549
+ raise ValueError(f"valid_at cannot be None for episode {record.get('uuid', 'unknown')}")
550
+
543
551
  return EpisodicNode(
544
552
  content=record['content'],
545
- created_at=parse_db_date(record['created_at']), # type: ignore
546
- valid_at=parse_db_date(record['valid_at']), # type: ignore
553
+ created_at=created_at,
554
+ valid_at=valid_at,
547
555
  uuid=record['uuid'],
548
556
  group_id=record['group_id'],
549
557
  source=EpisodeType.from_str(record['source']),
@@ -19,7 +19,6 @@ from enum import Enum
19
19
  from typing import Any
20
20
 
21
21
  from pydantic import BaseModel, Field
22
- from typing_extensions import LiteralString
23
22
 
24
23
 
25
24
  class ComparisonOperator(Enum):
@@ -53,8 +52,8 @@ class SearchFilters(BaseModel):
53
52
 
54
53
  def node_search_filter_query_constructor(
55
54
  filters: SearchFilters,
56
- ) -> tuple[LiteralString, dict[str, Any]]:
57
- filter_query: LiteralString = ''
55
+ ) -> tuple[str, dict[str, Any]]:
56
+ filter_query: str = ''
58
57
  filter_params: dict[str, Any] = {}
59
58
 
60
59
  if filters.node_labels is not None:
@@ -67,8 +66,8 @@ def node_search_filter_query_constructor(
67
66
 
68
67
  def edge_search_filter_query_constructor(
69
68
  filters: SearchFilters,
70
- ) -> tuple[LiteralString, dict[str, Any]]:
71
- filter_query: LiteralString = ''
69
+ ) -> tuple[str, dict[str, Any]]:
70
+ filter_query: str = ''
72
71
  filter_params: dict[str, Any] = {}
73
72
 
74
73
  if filters.edge_types is not None:
@@ -278,9 +278,6 @@ async def edge_similarity_search(
278
278
  routing_='r',
279
279
  )
280
280
 
281
- if driver.provider == 'falkordb':
282
- records = [dict(zip(header, row, strict=True)) for row in records]
283
-
284
281
  edges = [get_entity_edge_from_record(record) for record in records]
285
282
 
286
283
  return edges
@@ -377,8 +374,6 @@ async def node_fulltext_search(
377
374
  database_=DEFAULT_DATABASE,
378
375
  routing_='r',
379
376
  )
380
- if driver.provider == 'falkordb':
381
- records = [dict(zip(header, row, strict=True)) for row in records]
382
377
 
383
378
  nodes = [get_entity_node_from_record(record) for record in records]
384
379
 
@@ -433,8 +428,7 @@ async def node_similarity_search(
433
428
  database_=DEFAULT_DATABASE,
434
429
  routing_='r',
435
430
  )
436
- if driver.provider == 'falkordb':
437
- records = [dict(zip(header, row, strict=True)) for row in records]
431
+
438
432
  nodes = [get_entity_node_from_record(record) for record in records]
439
433
 
440
434
  return nodes
@@ -40,7 +40,7 @@ async def get_community_clusters(
40
40
  database_=DEFAULT_DATABASE,
41
41
  )
42
42
 
43
- group_ids = group_id_values[0]['group_ids']
43
+ group_ids = group_id_values[0]['group_ids'] if group_id_values else []
44
44
 
45
45
  for group_id in group_ids:
46
46
  projection: dict[str, list[Neighbor]] = {}
@@ -297,7 +297,7 @@ async def resolve_extracted_edges(
297
297
  embedder = clients.embedder
298
298
  await create_entity_edge_embeddings(embedder, extracted_edges)
299
299
 
300
- search_results: tuple[list[list[EntityEdge]], list[list[EntityEdge]]] = await semaphore_gather(
300
+ search_results = await semaphore_gather(
301
301
  get_relevant_edges(driver, extracted_edges, SearchFilters()),
302
302
  get_edge_invalidation_candidates(driver, extracted_edges, SearchFilters(), 0.2),
303
303
  )
@@ -21,7 +21,7 @@ from typing_extensions import LiteralString
21
21
 
22
22
  from graphiti_core.driver.driver import GraphDriver
23
23
  from graphiti_core.graph_queries import get_fulltext_indices, get_range_indices
24
- from graphiti_core.helpers import DEFAULT_DATABASE, semaphore_gather
24
+ from graphiti_core.helpers import DEFAULT_DATABASE, parse_db_date, semaphore_gather
25
25
  from graphiti_core.nodes import EpisodeType, EpisodicNode
26
26
 
27
27
  EPISODE_WINDOW_LEN = 3
@@ -140,10 +140,8 @@ async def retrieve_episodes(
140
140
  episodes = [
141
141
  EpisodicNode(
142
142
  content=record['content'],
143
- created_at=datetime.fromtimestamp(
144
- record['created_at'].to_native().timestamp(), timezone.utc
145
- ),
146
- valid_at=(record['valid_at'].to_native()),
143
+ created_at=parse_db_date(record['created_at']) or datetime.min.replace(tzinfo=timezone.utc),
144
+ valid_at=parse_db_date(record['valid_at']) or datetime.min.replace(tzinfo=timezone.utc),
147
145
  uuid=record['uuid'],
148
146
  group_id=record['group_id'],
149
147
  source=EpisodeType.from_str(record['source']),
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: graphiti-core
3
- Version: 0.14.0
3
+ Version: 0.15.0
4
4
  Summary: A temporal graph building library
5
5
  Project-URL: Homepage, https://help.getzep.com/graphiti/graphiti/overview
6
6
  Project-URL: Repository, https://github.com/getzep/graphiti
@@ -29,7 +29,7 @@ Requires-Dist: langchain-anthropic>=0.2.4; extra == 'dev'
29
29
  Requires-Dist: langchain-openai>=0.2.6; extra == 'dev'
30
30
  Requires-Dist: langgraph>=0.2.15; extra == 'dev'
31
31
  Requires-Dist: langsmith>=0.1.108; extra == 'dev'
32
- Requires-Dist: mypy>=1.11.1; extra == 'dev'
32
+ Requires-Dist: pyright>=1.1.380; extra == 'dev'
33
33
  Requires-Dist: pytest-asyncio>=0.24.0; extra == 'dev'
34
34
  Requires-Dist: pytest-xdist>=3.6.1; extra == 'dev'
35
35
  Requires-Dist: pytest>=8.3.3; extra == 'dev'
@@ -153,7 +153,7 @@ Requirements:
153
153
 
154
154
  - Python 3.10 or higher
155
155
  - Neo4j 5.26 / FalkorDB 1.1.2 or higher (serves as the embeddings storage backend)
156
- - OpenAI API key (for LLM inference and embedding)
156
+ - OpenAI API key (Graphiti defaults to OpenAI for LLM inference and embedding)
157
157
 
158
158
  > [!IMPORTANT]
159
159
  > Graphiti works best with LLM services that support Structured Output (such as OpenAI and Gemini).
@@ -167,6 +167,12 @@ Optional:
167
167
  > [!TIP]
168
168
  > The simplest way to install Neo4j is via [Neo4j Desktop](https://neo4j.com/download/). It provides a user-friendly
169
169
  > interface to manage Neo4j instances and databases.
170
+ > Alternatively, you can use FalkorDB on-premises via Docker and instantly start with the quickstart example:
171
+
172
+ ```bash
173
+ docker run -p 6379:6379 -p 3000:3000 -it --rm falkordb/falkordb:latest
174
+
175
+ ```
170
176
 
171
177
  ```bash
172
178
  pip install graphiti-core
@@ -197,13 +203,13 @@ pip install graphiti-core[anthropic,groq,google-genai]
197
203
  ## Quick Start
198
204
 
199
205
  > [!IMPORTANT]
200
- > Graphiti uses OpenAI for LLM inference and embedding. Ensure that an `OPENAI_API_KEY` is set in your environment.
206
+ > Graphiti defaults to using OpenAI for LLM inference and embedding. Ensure that an `OPENAI_API_KEY` is set in your environment.
201
207
  > Support for Anthropic and Groq LLM inferences is available, too. Other LLM providers may be supported via OpenAI
202
208
  > compatible APIs.
203
209
 
204
210
  For a complete working example, see the [Quickstart Example](./examples/quickstart/README.md) in the examples directory. The quickstart demonstrates:
205
211
 
206
- 1. Connecting to a Neo4j database
212
+ 1. Connecting to a Neo4j or FalkorDB database
207
213
  2. Initializing Graphiti indices and constraints
208
214
  3. Adding episodes to the graph (both text and structured JSON)
209
215
  4. Searching for relationships (edges) using hybrid search
@@ -247,7 +253,7 @@ as such this feature is off by default.
247
253
 
248
254
  ## Using Graphiti with Azure OpenAI
249
255
 
250
- Graphiti supports Azure OpenAI for both LLM inference and embeddings. To use Azure OpenAI, you'll need to configure both the LLM client and embedder with your Azure OpenAI credentials.
256
+ Graphiti supports Azure OpenAI for both LLM inference and embeddings. Azure deployments often require different endpoints for LLM and embedding services, and separate deployments for default and small models.
251
257
 
252
258
  ```python
253
259
  from openai import AsyncAzureOpenAI
@@ -256,19 +262,26 @@ from graphiti_core.llm_client import LLMConfig, OpenAIClient
256
262
  from graphiti_core.embedder.openai import OpenAIEmbedder, OpenAIEmbedderConfig
257
263
  from graphiti_core.cross_encoder.openai_reranker_client import OpenAIRerankerClient
258
264
 
259
- # Azure OpenAI configuration
265
+ # Azure OpenAI configuration - use separate endpoints for different services
260
266
  api_key = "<your-api-key>"
261
267
  api_version = "<your-api-version>"
262
- azure_endpoint = "<your-azure-endpoint>"
268
+ llm_endpoint = "<your-llm-endpoint>" # e.g., "https://your-llm-resource.openai.azure.com/"
269
+ embedding_endpoint = "<your-embedding-endpoint>" # e.g., "https://your-embedding-resource.openai.azure.com/"
270
+
271
+ # Create separate Azure OpenAI clients for different services
272
+ llm_client_azure = AsyncAzureOpenAI(
273
+ api_key=api_key,
274
+ api_version=api_version,
275
+ azure_endpoint=llm_endpoint
276
+ )
263
277
 
264
- # Create Azure OpenAI client for LLM
265
- azure_openai_client = AsyncAzureOpenAI(
278
+ embedding_client_azure = AsyncAzureOpenAI(
266
279
  api_key=api_key,
267
280
  api_version=api_version,
268
- azure_endpoint=azure_endpoint
281
+ azure_endpoint=embedding_endpoint
269
282
  )
270
283
 
271
- # Create LLM Config with your Azure deployed model names
284
+ # Create LLM Config with your Azure deployment names
272
285
  azure_llm_config = LLMConfig(
273
286
  small_model="gpt-4.1-nano",
274
287
  model="gpt-4.1-mini",
@@ -281,29 +294,30 @@ graphiti = Graphiti(
281
294
  "password",
282
295
  llm_client=OpenAIClient(
283
296
  llm_config=azure_llm_config,
284
- client=azure_openai_client
297
+ client=llm_client_azure
285
298
  ),
286
299
  embedder=OpenAIEmbedder(
287
300
  config=OpenAIEmbedderConfig(
288
- embedding_model="text-embedding-3-small" # Use your Azure deployed embedding model name
301
+ embedding_model="text-embedding-3-small-deployment" # Your Azure embedding deployment name
289
302
  ),
290
- client=azure_openai_client
303
+ client=embedding_client_azure
291
304
  ),
292
- # Optional: Configure the OpenAI cross encoder with Azure OpenAI
293
305
  cross_encoder=OpenAIRerankerClient(
294
- llm_config=azure_llm_config,
295
- client=azure_openai_client
306
+ llm_config=LLMConfig(
307
+ model=azure_llm_config.small_model # Use small model for reranking
308
+ ),
309
+ client=llm_client_azure
296
310
  )
297
311
  )
298
312
 
299
313
  # Now you can use Graphiti with Azure OpenAI
300
314
  ```
301
315
 
302
- Make sure to replace the placeholder values with your actual Azure OpenAI credentials and specify the correct embedding model name that's deployed in your Azure OpenAI service.
316
+ Make sure to replace the placeholder values with your actual Azure OpenAI credentials and deployment names that match your Azure OpenAI service configuration.
303
317
 
304
318
  ## Using Graphiti with Google Gemini
305
319
 
306
- Graphiti supports Google's Gemini models for both LLM inference and embeddings. To use Gemini, you'll need to configure both the LLM client and embedder with your Google API key.
320
+ Graphiti supports Google's Gemini models for LLM inference, embeddings, and cross-encoding/reranking. To use Gemini, you'll need to configure the LLM client, embedder, and the cross-encoder with your Google API key.
307
321
 
308
322
  Install Graphiti:
309
323
 
@@ -319,6 +333,7 @@ pip install "graphiti-core[google-genai]"
319
333
  from graphiti_core import Graphiti
320
334
  from graphiti_core.llm_client.gemini_client import GeminiClient, LLMConfig
321
335
  from graphiti_core.embedder.gemini import GeminiEmbedder, GeminiEmbedderConfig
336
+ from graphiti_core.cross_encoder.gemini_reranker_client import GeminiRerankerClient
322
337
 
323
338
  # Google API key configuration
324
339
  api_key = "<your-google-api-key>"
@@ -339,12 +354,20 @@ graphiti = Graphiti(
339
354
  api_key=api_key,
340
355
  embedding_model="embedding-001"
341
356
  )
357
+ ),
358
+ cross_encoder=GeminiRerankerClient(
359
+ config=LLMConfig(
360
+ api_key=api_key,
361
+ model="gemini-2.5-flash-lite-preview-06-17"
362
+ )
342
363
  )
343
364
  )
344
365
 
345
- # Now you can use Graphiti with Google Gemini
366
+ # Now you can use Graphiti with Google Gemini for all components
346
367
  ```
347
368
 
369
+ The Gemini reranker uses the `gemini-2.5-flash-lite-preview-06-17` model by default, which is optimized for cost-effective and low-latency classification tasks. It uses the same boolean classification approach as the OpenAI reranker, leveraging Gemini's log probabilities feature to rank passage relevance.
370
+
348
371
  ## Using Graphiti with Ollama (Local LLM)
349
372
 
350
373
  Graphiti supports Ollama for running local LLMs and embedding models via Ollama's OpenAI-compatible API. This is ideal for privacy-focused applications or when you want to avoid API costs.
@@ -2,32 +2,33 @@ graphiti_core/__init__.py,sha256=e5SWFkRiaUwfprYIeIgVIh7JDedNiloZvd3roU-0aDY,55
2
2
  graphiti_core/edges.py,sha256=h67vyXYhZYqlwaOmaqjHiGns6nEjuBVSIAFBMveNVo8,16257
3
3
  graphiti_core/errors.py,sha256=cH_v9TPgEPeQE6GFOHIg5TvejpUCBddGarMY2Whxbwc,2707
4
4
  graphiti_core/graph_queries.py,sha256=KfWDp8xDnPa9bcHskw8NeMpeeHBtZWBCosVdu1Iwv34,7076
5
- graphiti_core/graphiti.py,sha256=FzSTwU5zK6aFOETXLzdEvGn8yuf5cEjrBfnwabYY-xw,32990
5
+ graphiti_core/graphiti.py,sha256=nEs8hQI4O_3eu8_RjT8nXAiVB1PdqeSglltxnzSdCgI,33163
6
6
  graphiti_core/graphiti_types.py,sha256=rL-9bvnLobunJfXU4hkD6mAj14pofKp_wq8QsFDZwDU,1035
7
- graphiti_core/helpers.py,sha256=xHSlDlu5cCLOw40EeJSzshUqdsbqsNqv9AGGIiI-7qI,4907
8
- graphiti_core/nodes.py,sha256=WG7czM-neIeUDjLc5JCS1k0xRDANMY1lT9rDBc7Ms8U,18724
7
+ graphiti_core/helpers.py,sha256=ixUOfWN_GJVRvdiK-RzgAYJD18nM1CLmLBDNmVrIboQ,4948
8
+ graphiti_core/nodes.py,sha256=34X5cyXLBFTq9o2MxG2xk419ZSFz0i_CyAV-dDu7Mbg,19002
9
9
  graphiti_core/py.typed,sha256=vlmmzQOt7bmeQl9L3XJP4W6Ry0iiELepnOrinKz5KQg,79
10
- graphiti_core/cross_encoder/__init__.py,sha256=hry59vz21x-AtGZ0MJ7ugw0HTwJkXiddpp_Yqnwsen0,723
10
+ graphiti_core/cross_encoder/__init__.py,sha256=_F2F1eEIogkrD6QBOnMZc_SIJ2d4JsLr136gpM59r-Y,804
11
11
  graphiti_core/cross_encoder/bge_reranker_client.py,sha256=sY7RKsCp90vTjYxv6vmIHT4p3oCsFCRYWH-H0Ia0vN0,1449
12
12
  graphiti_core/cross_encoder/client.py,sha256=KLsbfWKOEaAV3adFe3XZlAeb-gje9_sVKCVZTaJP3ac,1441
13
+ graphiti_core/cross_encoder/gemini_reranker_client.py,sha256=7ePrgEAF_bw4AFqmqJDTd6y_Yc0prm2zZ4hGFyayDgI,5833
13
14
  graphiti_core/cross_encoder/openai_reranker_client.py,sha256=_Hftiz250HbEkY_26z6A1oxg4pzM8Sbr8CwnbJEsggc,4522
14
- graphiti_core/driver/__init__.py,sha256=DumfxIEY3z_nkz5YGaYH1GM50HgeAdEowNK189jcdAg,626
15
+ graphiti_core/driver/__init__.py,sha256=VRr-znMYmo6Sxdh-5UrNwAxY0Af7k2JqnKlxHi8K8cg,668
15
16
  graphiti_core/driver/driver.py,sha256=-FHAA2gM8FA0re-q6udmjQ6pNFdFGRQrMRuAiqX_1A4,1829
16
- graphiti_core/driver/falkordb_driver.py,sha256=Iz3wnfoJIO7EslqZvG6mduyZ5C-DWxFDPM5Q4QJRCuo,4686
17
- graphiti_core/driver/neo4j_driver.py,sha256=D8CV5GbhKoHIQ78BA9ozlwdvXPLUbBmFSfT2lww8PJk,1910
17
+ graphiti_core/driver/falkordb_driver.py,sha256=9myO1CqJUl-fu-yw2hZcwM51-Rt2Lluv9f-cTzvcnv8,5958
18
+ graphiti_core/driver/neo4j_driver.py,sha256=f8cSkcaCDyQLyI85JBprw0rdrarpd5Tq1mlX-Mz3kls,1962
18
19
  graphiti_core/embedder/__init__.py,sha256=EL564ZuE-DZjcuKNUK_exMn_XHXm2LdO9fzdXePVKL4,179
19
20
  graphiti_core/embedder/azure_openai.py,sha256=OyomPwC1fIsddI-3n6g00kQFdQznZorBhHwkQKCLUok,2384
20
21
  graphiti_core/embedder/client.py,sha256=qEpSHceL_Gc4QQPJWIOnuNLemNuR_TYA4r28t2Vldbg,1115
21
22
  graphiti_core/embedder/gemini.py,sha256=7En-W46YxqC5qL3vYB5Ed-Xm0hqLxi7-LgZ95c4M7ME,3263
22
23
  graphiti_core/embedder/openai.py,sha256=bIThUoLMeGlHG2-3VikzK6JZfOHKn4PKvUMx5sHxJy8,2192
23
- graphiti_core/embedder/voyage.py,sha256=gQhdcz2IYPSyOcDn3w8aHToVS3KQhyZrUBm4vqr3WcE,2224
24
+ graphiti_core/embedder/voyage.py,sha256=IxWFGW_SxisF1WxBuBvGV82BPBZrKgesiF3g54jQwpQ,2265
24
25
  graphiti_core/llm_client/__init__.py,sha256=QgBWUiCeBp6YiA_xqyrDvJ9jIyy1hngH8g7FWahN3nw,776
25
26
  graphiti_core/llm_client/anthropic_client.py,sha256=392rtkH_I7yOJUlQvjoOnS8Lz14WBP8egQ3OfRH0nFs,12481
26
27
  graphiti_core/llm_client/azure_openai_client.py,sha256=ekERggAekbb7enes1RJqdRChf_mjaZTFXsnMbxO7azQ,2497
27
28
  graphiti_core/llm_client/client.py,sha256=v_w5TBbDJYYADCXSs2r287g5Ami2Urma-GGEbHSI_Jg,5826
28
29
  graphiti_core/llm_client/config.py,sha256=90IgSBxZE_3nWdaEONVLUznI8lytPA7ZyexQz-_c55U,2560
29
30
  graphiti_core/llm_client/errors.py,sha256=pn6brRiLW60DAUIXJYKBT6MInrS4ueuH1hNLbn_JbQo,1243
30
- graphiti_core/llm_client/gemini_client.py,sha256=OdRAB2bWlXAi3gRmE1xVljYJ0T7JTZC82VK71wHyZi8,7722
31
+ graphiti_core/llm_client/gemini_client.py,sha256=EhuqknKgPMAq1H-ILw5-bTvMsY1DHVdSeGYBrUxCzqE,12871
31
32
  graphiti_core/llm_client/groq_client.py,sha256=k7zbXHfOpb4jhvvKFsccVYTq4yGGpxmY7xzNA02N2zk,2559
32
33
  graphiti_core/llm_client/openai_base_client.py,sha256=gfMcKPyLrylz_ouRdoenDWXyitmgfFZ17Zthbkq3Qs4,8126
33
34
  graphiti_core/llm_client/openai_client.py,sha256=ykBK94gxzE7iXux5rvOzVNA8q0Sqzq-8njPB75XcRe8,3240
@@ -54,23 +55,23 @@ graphiti_core/search/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
54
55
  graphiti_core/search/search.py,sha256=bJCFaNApu5396pXTa-xciu8ORDdRFJqfE3j2ieRVd7Y,15162
55
56
  graphiti_core/search/search_config.py,sha256=VvKg6AB_RPhoe56DBBXHRBXHThAVJ_OLFCyq_yKof-A,3765
56
57
  graphiti_core/search/search_config_recipes.py,sha256=4GquRphHhJlpXQhAZOySYnCzBWYoTwxlJj44eTOavZQ,7443
57
- graphiti_core/search/search_filters.py,sha256=jG30nMWX03xoT9ohgyHNu_Xes8GwjIF2eTv6QaiWMqw,6466
58
+ graphiti_core/search/search_filters.py,sha256=H7Vgob2SvwsG56qiTDXDhI4R4MMY40TVpphY5KHPwYU,6382
58
59
  graphiti_core/search/search_helpers.py,sha256=G5Ceaq5Pfgx0Weelqgeylp_pUHwiBnINaUYsDbURJbE,2636
59
- graphiti_core/search/search_utils.py,sha256=74d3RDbx9MWkDei1U5g0K5l1EenzB1NPNYdSP9l8aEg,34958
60
+ graphiti_core/search/search_utils.py,sha256=MYIlA21f4G2hY_boWMuRK75E77mGdR7j_idjCjdy77Q,34619
60
61
  graphiti_core/telemetry/__init__.py,sha256=5kALLDlU9bb2v19CdN7qVANsJWyfnL9E60J6FFgzm3o,226
61
62
  graphiti_core/telemetry/telemetry.py,sha256=47LrzOVBCcZxsYPsnSxWFiztHoxYKKxPwyRX0hnbDGc,3230
62
63
  graphiti_core/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
63
64
  graphiti_core/utils/bulk_utils.py,sha256=YnyXzmOFgqbLdIAIu9Y6aJjUZHhXj8nBnlegkXBTKi8,16344
64
65
  graphiti_core/utils/datetime_utils.py,sha256=Ti-2tnrDFRzBsbfblzsHybsM3jaDLP4-VT2t0VhpIzU,1357
65
66
  graphiti_core/utils/maintenance/__init__.py,sha256=vW4H1KyapTl-OOz578uZABYcpND4wPx3Vt6aAPaXh78,301
66
- graphiti_core/utils/maintenance/community_operations.py,sha256=2rhRqtL9gDbjXKO4-S0nGpaWvS4ck5rFiazZiogIJao,10088
67
- graphiti_core/utils/maintenance/edge_operations.py,sha256=Fwu2TLmQF_9EVcA-uUlt1ZiGC6RILIfKDr9W7R4gAno,21633
68
- graphiti_core/utils/maintenance/graph_data_operations.py,sha256=OHuiAyP1Z7dfR90dWVQ87TJQO83P0sQihJyr4WIhOhk,5362
67
+ graphiti_core/utils/maintenance/community_operations.py,sha256=AimQzT7wr4M3ofsUetHa1cPEmhsngqJoNWm3Q-3Hwww,10115
68
+ graphiti_core/utils/maintenance/edge_operations.py,sha256=sj4AJ9zPm8ACiC1wSj99bFUUmg4OgFVFnPOSXKfb3T8,21578
69
+ graphiti_core/utils/maintenance/graph_data_operations.py,sha256=4NR06gn11yfNOknVkk2JpF_zYMiaNizl3urL0LgnXrE,5391
69
70
  graphiti_core/utils/maintenance/node_operations.py,sha256=0WdH_VrkVXLV9YX3xPErXOFygOo2N9g3es9yIB2Yl8Q,15876
70
71
  graphiti_core/utils/maintenance/temporal_operations.py,sha256=mJkw9xLB4W2BsLfC5POr0r-PHWL9SIfNj_l_xu0B5ug,3410
71
72
  graphiti_core/utils/maintenance/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
72
73
  graphiti_core/utils/ontology_utils/entity_types_utils.py,sha256=QJX5cG0GSSNF_Mm_yrldr69wjVAbN_MxLhOSznz85Hk,1279
73
- graphiti_core-0.14.0.dist-info/METADATA,sha256=ePJs8ax8EBgFysrMfz-D_uJ9RKo6O5T5DjvETb7ijqU,20591
74
- graphiti_core-0.14.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
75
- graphiti_core-0.14.0.dist-info/licenses/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
76
- graphiti_core-0.14.0.dist-info/RECORD,,
74
+ graphiti_core-0.15.0.dist-info/METADATA,sha256=55es9UNNaAPAxb8onmNH-rz0rpqafsy2zgpBQqIKWl0,21839
75
+ graphiti_core-0.15.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
76
+ graphiti_core-0.15.0.dist-info/licenses/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
77
+ graphiti_core-0.15.0.dist-info/RECORD,,