graphiti-core 0.17.2__py3-none-any.whl → 0.17.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

@@ -37,7 +37,7 @@ from .client import EmbedderClient, EmbedderConfig
37
37
 
38
38
  logger = logging.getLogger(__name__)
39
39
 
40
- DEFAULT_EMBEDDING_MODEL = 'text-embedding-001' # gemini-embedding-001 or text-embedding-005
40
+ DEFAULT_EMBEDDING_MODEL = 'text-embedding-001' # gemini-embedding-001 or text-embedding-005
41
41
 
42
42
  DEFAULT_BATCH_SIZE = 100
43
43
 
@@ -78,7 +78,7 @@ class GeminiEmbedder(EmbedderClient):
78
78
 
79
79
  if batch_size is None and self.config.embedding_model == 'gemini-embedding-001':
80
80
  # Gemini API has a limit on the number of instances per request
81
- #https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api
81
+ # https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api
82
82
  self.batch_size = 1
83
83
  elif batch_size is None:
84
84
  self.batch_size = DEFAULT_BATCH_SIZE
@@ -113,32 +113,34 @@ class GeminiEmbedder(EmbedderClient):
113
113
  async def create_batch(self, input_data_list: list[str]) -> list[list[float]]:
114
114
  """
115
115
  Create embeddings for a batch of input data using Google's Gemini embedding model.
116
-
116
+
117
117
  This method handles batching to respect the Gemini API's limits on the number
118
118
  of instances that can be processed in a single request.
119
-
119
+
120
120
  Args:
121
121
  input_data_list: A list of strings to create embeddings for.
122
-
122
+
123
123
  Returns:
124
124
  A list of embedding vectors (each vector is a list of floats).
125
125
  """
126
126
  if not input_data_list:
127
127
  return []
128
-
128
+
129
129
  batch_size = self.batch_size
130
130
  all_embeddings = []
131
-
131
+
132
132
  # Process inputs in batches
133
133
  for i in range(0, len(input_data_list), batch_size):
134
- batch = input_data_list[i:i + batch_size]
135
-
134
+ batch = input_data_list[i : i + batch_size]
135
+
136
136
  try:
137
137
  # Generate embeddings for this batch
138
138
  result = await self.client.aio.models.embed_content(
139
139
  model=self.config.embedding_model or DEFAULT_EMBEDDING_MODEL,
140
140
  contents=batch, # type: ignore[arg-type] # mypy fails on broad union type
141
- config=types.EmbedContentConfig(output_dimensionality=self.config.embedding_dim),
141
+ config=types.EmbedContentConfig(
142
+ output_dimensionality=self.config.embedding_dim
143
+ ),
142
144
  )
143
145
 
144
146
  if not result.embeddings or len(result.embeddings) == 0:
@@ -149,29 +151,33 @@ class GeminiEmbedder(EmbedderClient):
149
151
  if not embedding.values:
150
152
  raise ValueError('Empty embedding values returned')
151
153
  all_embeddings.append(embedding.values)
152
-
154
+
153
155
  except Exception as e:
154
156
  # If batch processing fails, fall back to individual processing
155
- logger.warning(f"Batch embedding failed for batch {i//batch_size + 1}, falling back to individual processing: {e}")
156
-
157
+ logger.warning(
158
+ f'Batch embedding failed for batch {i // batch_size + 1}, falling back to individual processing: {e}'
159
+ )
160
+
157
161
  for item in batch:
158
162
  try:
159
163
  # Process each item individually
160
164
  result = await self.client.aio.models.embed_content(
161
165
  model=self.config.embedding_model or DEFAULT_EMBEDDING_MODEL,
162
166
  contents=[item], # type: ignore[arg-type] # mypy fails on broad union type
163
- config=types.EmbedContentConfig(output_dimensionality=self.config.embedding_dim),
167
+ config=types.EmbedContentConfig(
168
+ output_dimensionality=self.config.embedding_dim
169
+ ),
164
170
  )
165
-
171
+
166
172
  if not result.embeddings or len(result.embeddings) == 0:
167
173
  raise ValueError('No embeddings returned from Gemini API')
168
174
  if not result.embeddings[0].values:
169
175
  raise ValueError('Empty embedding values returned')
170
-
176
+
171
177
  all_embeddings.append(result.embeddings[0].values)
172
-
178
+
173
179
  except Exception as individual_error:
174
- logger.error(f"Failed to embed individual item: {individual_error}")
180
+ logger.error(f'Failed to embed individual item: {individual_error}')
175
181
  raise individual_error
176
-
182
+
177
183
  return all_embeddings
@@ -172,13 +172,13 @@ class LLMClient(ABC):
172
172
  """
173
173
  Log the full input messages, the raw output (if any), and the exception for debugging failed generations.
174
174
  """
175
- log = ""
176
- log += f"Input messages: {json.dumps([m.model_dump() for m in messages], indent=2)}\n"
175
+ log = ''
176
+ log += f'Input messages: {json.dumps([m.model_dump() for m in messages], indent=2)}\n'
177
177
  if output is not None:
178
178
  if len(output) > 4000:
179
- log += f"Raw output: {output[:2000]}... (truncated) ...{output[-2000:]}\n"
179
+ log += f'Raw output: {output[:2000]}... (truncated) ...{output[-2000:]}\n'
180
180
  else:
181
- log += f"Raw output: {output}\n"
181
+ log += f'Raw output: {output}\n'
182
182
  else:
183
- log += "No raw output available"
183
+ log += 'No raw output available'
184
184
  return log
@@ -219,14 +219,14 @@ class GeminiClient(LLMClient):
219
219
  array_match = re.search(r'\]\s*$', raw_output)
220
220
  if array_match:
221
221
  try:
222
- return json.loads(raw_output[:array_match.end()])
222
+ return json.loads(raw_output[: array_match.end()])
223
223
  except Exception:
224
224
  pass
225
225
  # Try to salvage a JSON object
226
226
  obj_match = re.search(r'\}\s*$', raw_output)
227
227
  if obj_match:
228
228
  try:
229
- return json.loads(raw_output[:obj_match.end()])
229
+ return json.loads(raw_output[: obj_match.end()])
230
230
  except Exception:
231
231
  pass
232
232
  return None
@@ -323,12 +323,14 @@ class GeminiClient(LLMClient):
323
323
  return validated_model.model_dump()
324
324
  except Exception as e:
325
325
  if raw_output:
326
- logger.error("🦀 LLM generation failed parsing as JSON, will try to salvage.")
326
+ logger.error(
327
+ '🦀 LLM generation failed parsing as JSON, will try to salvage.'
328
+ )
327
329
  logger.error(self._get_failed_generation_log(gemini_messages, raw_output))
328
330
  # Try to salvage
329
331
  salvaged = self.salvage_json(raw_output)
330
332
  if salvaged is not None:
331
- logger.warning("Salvaged partial JSON from truncated/malformed output.")
333
+ logger.warning('Salvaged partial JSON from truncated/malformed output.')
332
334
  return salvaged
333
335
  raise Exception(f'Failed to parse structured response: {e}') from e
334
336
 
@@ -384,7 +386,11 @@ class GeminiClient(LLMClient):
384
386
  max_tokens=max_tokens,
385
387
  model_size=model_size,
386
388
  )
387
- last_output = response.get('content') if isinstance(response, dict) and 'content' in response else None
389
+ last_output = (
390
+ response.get('content')
391
+ if isinstance(response, dict) and 'content' in response
392
+ else None
393
+ )
388
394
  return response
389
395
  except RateLimitError as e:
390
396
  # Rate limit errors should not trigger retries (fail fast)
@@ -416,7 +422,7 @@ class GeminiClient(LLMClient):
416
422
  )
417
423
 
418
424
  # If we exit the loop without returning, all retries are exhausted
419
- logger.error("🦀 LLM generation failed and retries are exhausted.")
425
+ logger.error('🦀 LLM generation failed and retries are exhausted.')
420
426
  logger.error(self._get_failed_generation_log(messages, last_output))
421
427
  logger.error(f'Max retries ({self.MAX_RETRIES}) exceeded. Last error: {last_error}')
422
- raise last_error or Exception("Max retries exceeded")
428
+ raise last_error or Exception('Max retries exceeded')
@@ -31,9 +31,9 @@ EPISODIC_EDGE_SAVE_BULK = """
31
31
  """
32
32
 
33
33
  ENTITY_EDGE_SAVE = """
34
- MATCH (source:Entity {uuid: $source_uuid})
35
- MATCH (target:Entity {uuid: $target_uuid})
36
- MERGE (source)-[r:RELATES_TO {uuid: $uuid}]->(target)
34
+ MATCH (source:Entity {uuid: $edge_data.source_uuid})
35
+ MATCH (target:Entity {uuid: $edge_data.target_uuid})
36
+ MERGE (source)-[r:RELATES_TO {uuid: $edge_data.uuid}]->(target)
37
37
  SET r = $edge_data
38
38
  WITH r CALL db.create.setRelationshipVectorProperty(r, "fact_embedding", $edge_data.fact_embedding)
39
39
  RETURN r.uuid AS uuid"""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: graphiti-core
3
- Version: 0.17.2
3
+ Version: 0.17.3
4
4
  Summary: A temporal graph building library
5
5
  Project-URL: Homepage, https://help.getzep.com/graphiti/graphiti/overview
6
6
  Project-URL: Repository, https://github.com/getzep/graphiti
@@ -19,16 +19,16 @@ graphiti_core/driver/neo4j_driver.py,sha256=0MCAWAPay0LdcqrFSkY91GooUtrn1yX1CTKu
19
19
  graphiti_core/embedder/__init__.py,sha256=EL564ZuE-DZjcuKNUK_exMn_XHXm2LdO9fzdXePVKL4,179
20
20
  graphiti_core/embedder/azure_openai.py,sha256=OyomPwC1fIsddI-3n6g00kQFdQznZorBhHwkQKCLUok,2384
21
21
  graphiti_core/embedder/client.py,sha256=qEpSHceL_Gc4QQPJWIOnuNLemNuR_TYA4r28t2Vldbg,1115
22
- graphiti_core/embedder/gemini.py,sha256=GdpnmRKunruLB4ViJMo6K-WEv8RqZvuLfgyKXtRcEMI,7218
22
+ graphiti_core/embedder/gemini.py,sha256=s3_2xjHdFTIuF-fJlBFwh64XK5BLPHHThuBymDpM5Jo,7194
23
23
  graphiti_core/embedder/openai.py,sha256=bIThUoLMeGlHG2-3VikzK6JZfOHKn4PKvUMx5sHxJy8,2192
24
24
  graphiti_core/embedder/voyage.py,sha256=oJHAZiNqjdEJOKgoKfGWcxK2-Ewqn5UB3vrBwIwP2u4,2546
25
25
  graphiti_core/llm_client/__init__.py,sha256=QgBWUiCeBp6YiA_xqyrDvJ9jIyy1hngH8g7FWahN3nw,776
26
26
  graphiti_core/llm_client/anthropic_client.py,sha256=xTFcrgMDK77BwnChBhYj51Jaa2mRNI850oJv2pKZI0A,12892
27
27
  graphiti_core/llm_client/azure_openai_client.py,sha256=ekERggAekbb7enes1RJqdRChf_mjaZTFXsnMbxO7azQ,2497
28
- graphiti_core/llm_client/client.py,sha256=fgNnJgmoZN7v7PNoJGtt4MMdKkDNsmT9F2XOLKZOU38,6473
28
+ graphiti_core/llm_client/client.py,sha256=cUwwCZEhP9jJAI04AhHxsFPecggajSgCRCM3frrYJqA,6473
29
29
  graphiti_core/llm_client/config.py,sha256=90IgSBxZE_3nWdaEONVLUznI8lytPA7ZyexQz-_c55U,2560
30
30
  graphiti_core/llm_client/errors.py,sha256=pn6brRiLW60DAUIXJYKBT6MInrS4ueuH1hNLbn_JbQo,1243
31
- graphiti_core/llm_client/gemini_client.py,sha256=LKB6nktFMIn2fuRNRoGeBOmxlE3WuhB5sWI7yUwGGaA,17583
31
+ graphiti_core/llm_client/gemini_client.py,sha256=m0-6SFUs8qqoR5rGTrASAcMtTbJKfZqO4-MaDr4CYCQ,17719
32
32
  graphiti_core/llm_client/groq_client.py,sha256=bYLE_cg1QEhugsJOXh4b1vPbxagKeMWqk48240GCzMs,2922
33
33
  graphiti_core/llm_client/openai_base_client.py,sha256=gfMcKPyLrylz_ouRdoenDWXyitmgfFZ17Zthbkq3Qs4,8126
34
34
  graphiti_core/llm_client/openai_client.py,sha256=ykBK94gxzE7iXux5rvOzVNA8q0Sqzq-8njPB75XcRe8,3240
@@ -36,7 +36,7 @@ graphiti_core/llm_client/openai_generic_client.py,sha256=WElMnPqdb1CxzYH4p2-m_9r
36
36
  graphiti_core/llm_client/utils.py,sha256=zKpxXEbKa369m4W7RDEf-m56kH46V1Mx3RowcWZEWWs,1000
37
37
  graphiti_core/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
38
  graphiti_core/models/edges/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
- graphiti_core/models/edges/edge_db_queries.py,sha256=4vSWdmE5MKoDrlHJmmr2xNhVSQ-buE1O7mCX_H0Wtfk,2294
39
+ graphiti_core/models/edges/edge_db_queries.py,sha256=YyGc0UT4eeOHQrYuXfAGIwxiX4xCc4YHrz9c2bVT7Lw,2324
40
40
  graphiti_core/models/nodes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  graphiti_core/models/nodes/node_db_queries.py,sha256=AQgRGVO-GgFWfLq1G6k8s86WItwpXruy3Mj4DBli-vM,2145
42
42
  graphiti_core/prompts/__init__.py,sha256=EA-x9xUki9l8wnu2l8ek_oNf75-do5tq5hVq7Zbv8Kw,101
@@ -71,7 +71,7 @@ graphiti_core/utils/maintenance/node_operations.py,sha256=4jMlmbB3zwK9KzIm2QXRxz
71
71
  graphiti_core/utils/maintenance/temporal_operations.py,sha256=mJkw9xLB4W2BsLfC5POr0r-PHWL9SIfNj_l_xu0B5ug,3410
72
72
  graphiti_core/utils/maintenance/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
73
  graphiti_core/utils/ontology_utils/entity_types_utils.py,sha256=QJX5cG0GSSNF_Mm_yrldr69wjVAbN_MxLhOSznz85Hk,1279
74
- graphiti_core-0.17.2.dist-info/METADATA,sha256=bqv7KcvfhQ5qp4F2BYrWK5ziw2uJzAA2rPxi-KlJTp8,23791
75
- graphiti_core-0.17.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
76
- graphiti_core-0.17.2.dist-info/licenses/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
77
- graphiti_core-0.17.2.dist-info/RECORD,,
74
+ graphiti_core-0.17.3.dist-info/METADATA,sha256=lKv8hXBNV6eA3NXeWdCJRfm5QaJT5Eth936n9ZxuMtI,23791
75
+ graphiti_core-0.17.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
76
+ graphiti_core-0.17.3.dist-info/licenses/LICENSE,sha256=KCUwCyDXuVEgmDWkozHyniRyWjnWUWjkuDHfU6o3JlA,11325
77
+ graphiti_core-0.17.3.dist-info/RECORD,,