graphiti-core 0.5.1__tar.gz → 0.5.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

Files changed (60) hide show
  1. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/PKG-INFO +1 -1
  2. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/helpers.py +2 -2
  3. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/llm_client/anthropic_client.py +5 -2
  4. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/llm_client/client.py +15 -6
  5. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/llm_client/config.py +1 -1
  6. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/llm_client/groq_client.py +5 -2
  7. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/llm_client/openai_client.py +16 -6
  8. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/llm_client/openai_generic_client.py +12 -4
  9. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/utils/maintenance/edge_operations.py +5 -1
  10. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/pyproject.toml +1 -1
  11. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/LICENSE +0 -0
  12. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/README.md +0 -0
  13. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/__init__.py +0 -0
  14. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/cross_encoder/__init__.py +0 -0
  15. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/cross_encoder/bge_reranker_client.py +0 -0
  16. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/cross_encoder/client.py +0 -0
  17. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/cross_encoder/openai_reranker_client.py +0 -0
  18. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/edges.py +0 -0
  19. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/embedder/__init__.py +0 -0
  20. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/embedder/client.py +0 -0
  21. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/embedder/openai.py +0 -0
  22. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/embedder/voyage.py +0 -0
  23. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/errors.py +0 -0
  24. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/graphiti.py +0 -0
  25. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/llm_client/__init__.py +0 -0
  26. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/llm_client/errors.py +0 -0
  27. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/llm_client/utils.py +0 -0
  28. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/models/__init__.py +0 -0
  29. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/models/edges/__init__.py +0 -0
  30. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/models/edges/edge_db_queries.py +0 -0
  31. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/models/nodes/__init__.py +0 -0
  32. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/models/nodes/node_db_queries.py +0 -0
  33. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/nodes.py +0 -0
  34. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/__init__.py +0 -0
  35. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/dedupe_edges.py +0 -0
  36. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/dedupe_nodes.py +0 -0
  37. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/eval.py +0 -0
  38. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/extract_edge_dates.py +0 -0
  39. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/extract_edges.py +0 -0
  40. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/extract_nodes.py +0 -0
  41. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/invalidate_edges.py +0 -0
  42. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/lib.py +0 -0
  43. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/models.py +0 -0
  44. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/prompt_helpers.py +0 -0
  45. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/prompts/summarize_nodes.py +0 -0
  46. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/py.typed +0 -0
  47. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/search/__init__.py +0 -0
  48. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/search/search.py +0 -0
  49. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/search/search_config.py +0 -0
  50. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/search/search_config_recipes.py +0 -0
  51. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/search/search_utils.py +0 -0
  52. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/utils/__init__.py +0 -0
  53. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/utils/bulk_utils.py +0 -0
  54. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/utils/datetime_utils.py +0 -0
  55. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/utils/maintenance/__init__.py +0 -0
  56. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/utils/maintenance/community_operations.py +0 -0
  57. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/utils/maintenance/graph_data_operations.py +0 -0
  58. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/utils/maintenance/node_operations.py +0 -0
  59. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/utils/maintenance/temporal_operations.py +0 -0
  60. {graphiti_core-0.5.1 → graphiti_core-0.5.2}/graphiti_core/utils/maintenance/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: graphiti-core
3
- Version: 0.5.1
3
+ Version: 0.5.2
4
4
  Summary: A temporal graph building library
5
5
  License: Apache-2.0
6
6
  Author: Paul Paliychuk
@@ -73,12 +73,12 @@ def lucene_sanitize(query: str) -> str:
73
73
  return sanitized
74
74
 
75
75
 
76
- def normalize_l2(embedding: list[float]) -> list[float]:
76
+ def normalize_l2(embedding: list[float]):
77
77
  embedding_array = np.array(embedding)
78
78
  if embedding_array.ndim == 1:
79
79
  norm = np.linalg.norm(embedding_array)
80
80
  if norm == 0:
81
- return embedding_array.tolist()
81
+ return [0.0] * len(embedding)
82
82
  return (embedding_array / norm).tolist()
83
83
  else:
84
84
  norm = np.linalg.norm(embedding_array, 2, axis=1, keepdims=True)
@@ -48,7 +48,10 @@ class AnthropicClient(LLMClient):
48
48
  )
49
49
 
50
50
  async def _generate_response(
51
- self, messages: list[Message], response_model: type[BaseModel] | None = None
51
+ self,
52
+ messages: list[Message],
53
+ response_model: type[BaseModel] | None = None,
54
+ max_tokens: int = DEFAULT_MAX_TOKENS,
52
55
  ) -> dict[str, typing.Any]:
53
56
  system_message = messages[0]
54
57
  user_messages = [{'role': m.role, 'content': m.content} for m in messages[1:]] + [
@@ -59,7 +62,7 @@ class AnthropicClient(LLMClient):
59
62
  result = await self.client.messages.create(
60
63
  system='Only include JSON in the response. Do not include any additional text or explanation of the content.\n'
61
64
  + system_message.content,
62
- max_tokens=self.max_tokens,
65
+ max_tokens=max_tokens or self.max_tokens,
63
66
  temperature=self.temperature,
64
67
  messages=user_messages, # type: ignore
65
68
  model=self.model or DEFAULT_MODEL,
@@ -26,7 +26,7 @@ from pydantic import BaseModel
26
26
  from tenacity import retry, retry_if_exception, stop_after_attempt, wait_random_exponential
27
27
 
28
28
  from ..prompts.models import Message
29
- from .config import LLMConfig
29
+ from .config import DEFAULT_MAX_TOKENS, LLMConfig
30
30
  from .errors import RateLimitError
31
31
 
32
32
  DEFAULT_TEMPERATURE = 0
@@ -90,16 +90,22 @@ class LLMClient(ABC):
90
90
  reraise=True,
91
91
  )
92
92
  async def _generate_response_with_retry(
93
- self, messages: list[Message], response_model: type[BaseModel] | None = None
93
+ self,
94
+ messages: list[Message],
95
+ response_model: type[BaseModel] | None = None,
96
+ max_tokens: int = DEFAULT_MAX_TOKENS,
94
97
  ) -> dict[str, typing.Any]:
95
98
  try:
96
- return await self._generate_response(messages, response_model)
99
+ return await self._generate_response(messages, response_model, max_tokens)
97
100
  except (httpx.HTTPStatusError, RateLimitError) as e:
98
101
  raise e
99
102
 
100
103
  @abstractmethod
101
104
  async def _generate_response(
102
- self, messages: list[Message], response_model: type[BaseModel] | None = None
105
+ self,
106
+ messages: list[Message],
107
+ response_model: type[BaseModel] | None = None,
108
+ max_tokens: int = DEFAULT_MAX_TOKENS,
103
109
  ) -> dict[str, typing.Any]:
104
110
  pass
105
111
 
@@ -110,7 +116,10 @@ class LLMClient(ABC):
110
116
  return hashlib.md5(key_str.encode()).hexdigest()
111
117
 
112
118
  async def generate_response(
113
- self, messages: list[Message], response_model: type[BaseModel] | None = None
119
+ self,
120
+ messages: list[Message],
121
+ response_model: type[BaseModel] | None = None,
122
+ max_tokens: int = DEFAULT_MAX_TOKENS,
114
123
  ) -> dict[str, typing.Any]:
115
124
  if response_model is not None:
116
125
  serialized_model = json.dumps(response_model.model_json_schema())
@@ -131,7 +140,7 @@ class LLMClient(ABC):
131
140
  for message in messages:
132
141
  message.content = self._clean_input(message.content)
133
142
 
134
- response = await self._generate_response_with_retry(messages, response_model)
143
+ response = await self._generate_response_with_retry(messages, response_model, max_tokens)
135
144
 
136
145
  if self.cache_enabled:
137
146
  self.cache_dir.set(cache_key, response)
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- DEFAULT_MAX_TOKENS = 16384
17
+ DEFAULT_MAX_TOKENS = 1024
18
18
  DEFAULT_TEMPERATURE = 0
19
19
 
20
20
 
@@ -45,7 +45,10 @@ class GroqClient(LLMClient):
45
45
  self.client = AsyncGroq(api_key=config.api_key)
46
46
 
47
47
  async def _generate_response(
48
- self, messages: list[Message], response_model: type[BaseModel] | None = None
48
+ self,
49
+ messages: list[Message],
50
+ response_model: type[BaseModel] | None = None,
51
+ max_tokens: int = DEFAULT_MAX_TOKENS,
49
52
  ) -> dict[str, typing.Any]:
50
53
  msgs: list[ChatCompletionMessageParam] = []
51
54
  for m in messages:
@@ -58,7 +61,7 @@ class GroqClient(LLMClient):
58
61
  model=self.model or DEFAULT_MODEL,
59
62
  messages=msgs,
60
63
  temperature=self.temperature,
61
- max_tokens=self.max_tokens,
64
+ max_tokens=max_tokens or self.max_tokens,
62
65
  response_format={'type': 'json_object'},
63
66
  )
64
67
  result = response.choices[0].message.content or ''
@@ -25,7 +25,7 @@ from pydantic import BaseModel
25
25
 
26
26
  from ..prompts.models import Message
27
27
  from .client import LLMClient
28
- from .config import LLMConfig
28
+ from .config import DEFAULT_MAX_TOKENS, LLMConfig
29
29
  from .errors import RateLimitError, RefusalError
30
30
 
31
31
  logger = logging.getLogger(__name__)
@@ -58,7 +58,11 @@ class OpenAIClient(LLMClient):
58
58
  MAX_RETRIES: ClassVar[int] = 2
59
59
 
60
60
  def __init__(
61
- self, config: LLMConfig | None = None, cache: bool = False, client: typing.Any = None
61
+ self,
62
+ config: LLMConfig | None = None,
63
+ cache: bool = False,
64
+ client: typing.Any = None,
65
+ max_tokens: int = DEFAULT_MAX_TOKENS,
62
66
  ):
63
67
  """
64
68
  Initialize the OpenAIClient with the provided configuration, cache setting, and client.
@@ -84,7 +88,10 @@ class OpenAIClient(LLMClient):
84
88
  self.client = client
85
89
 
86
90
  async def _generate_response(
87
- self, messages: list[Message], response_model: type[BaseModel] | None = None
91
+ self,
92
+ messages: list[Message],
93
+ response_model: type[BaseModel] | None = None,
94
+ max_tokens: int = DEFAULT_MAX_TOKENS,
88
95
  ) -> dict[str, typing.Any]:
89
96
  openai_messages: list[ChatCompletionMessageParam] = []
90
97
  for m in messages:
@@ -98,7 +105,7 @@ class OpenAIClient(LLMClient):
98
105
  model=self.model or DEFAULT_MODEL,
99
106
  messages=openai_messages,
100
107
  temperature=self.temperature,
101
- max_tokens=self.max_tokens,
108
+ max_tokens=max_tokens or self.max_tokens,
102
109
  response_format=response_model, # type: ignore
103
110
  )
104
111
 
@@ -119,14 +126,17 @@ class OpenAIClient(LLMClient):
119
126
  raise
120
127
 
121
128
  async def generate_response(
122
- self, messages: list[Message], response_model: type[BaseModel] | None = None
129
+ self,
130
+ messages: list[Message],
131
+ response_model: type[BaseModel] | None = None,
132
+ max_tokens: int = DEFAULT_MAX_TOKENS,
123
133
  ) -> dict[str, typing.Any]:
124
134
  retry_count = 0
125
135
  last_error = None
126
136
 
127
137
  while retry_count <= self.MAX_RETRIES:
128
138
  try:
129
- response = await self._generate_response(messages, response_model)
139
+ response = await self._generate_response(messages, response_model, max_tokens)
130
140
  return response
131
141
  except (RateLimitError, RefusalError):
132
142
  # These errors should not trigger retries
@@ -26,7 +26,7 @@ from pydantic import BaseModel
26
26
 
27
27
  from ..prompts.models import Message
28
28
  from .client import LLMClient
29
- from .config import LLMConfig
29
+ from .config import DEFAULT_MAX_TOKENS, LLMConfig
30
30
  from .errors import RateLimitError, RefusalError
31
31
 
32
32
  logger = logging.getLogger(__name__)
@@ -85,7 +85,10 @@ class OpenAIGenericClient(LLMClient):
85
85
  self.client = client
86
86
 
87
87
  async def _generate_response(
88
- self, messages: list[Message], response_model: type[BaseModel] | None = None
88
+ self,
89
+ messages: list[Message],
90
+ response_model: type[BaseModel] | None = None,
91
+ max_tokens: int = DEFAULT_MAX_TOKENS,
89
92
  ) -> dict[str, typing.Any]:
90
93
  openai_messages: list[ChatCompletionMessageParam] = []
91
94
  for m in messages:
@@ -111,7 +114,10 @@ class OpenAIGenericClient(LLMClient):
111
114
  raise
112
115
 
113
116
  async def generate_response(
114
- self, messages: list[Message], response_model: type[BaseModel] | None = None
117
+ self,
118
+ messages: list[Message],
119
+ response_model: type[BaseModel] | None = None,
120
+ max_tokens: int = DEFAULT_MAX_TOKENS,
115
121
  ) -> dict[str, typing.Any]:
116
122
  retry_count = 0
117
123
  last_error = None
@@ -126,7 +132,9 @@ class OpenAIGenericClient(LLMClient):
126
132
 
127
133
  while retry_count <= self.MAX_RETRIES:
128
134
  try:
129
- response = await self._generate_response(messages, response_model)
135
+ response = await self._generate_response(
136
+ messages, response_model, max_tokens=max_tokens
137
+ )
130
138
  return response
131
139
  except (RateLimitError, RefusalError):
132
140
  # These errors should not trigger retries
@@ -79,6 +79,8 @@ async def extract_edges(
79
79
  ) -> list[EntityEdge]:
80
80
  start = time()
81
81
 
82
+ EXTRACT_EDGES_MAX_TOKENS = 16384
83
+
82
84
  node_uuids_by_name_map = {node.name: node.uuid for node in nodes}
83
85
 
84
86
  # Prepare context for LLM
@@ -93,7 +95,9 @@ async def extract_edges(
93
95
  reflexion_iterations = 0
94
96
  while facts_missed and reflexion_iterations < MAX_REFLEXION_ITERATIONS:
95
97
  llm_response = await llm_client.generate_response(
96
- prompt_library.extract_edges.edge(context), response_model=ExtractedEdges
98
+ prompt_library.extract_edges.edge(context),
99
+ response_model=ExtractedEdges,
100
+ max_tokens=EXTRACT_EDGES_MAX_TOKENS,
97
101
  )
98
102
  edges_data = llm_response.get('edges', [])
99
103
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "graphiti-core"
3
- version = "0.5.1"
3
+ version = "0.5.2"
4
4
  description = "A temporal graph building library"
5
5
  authors = [
6
6
  "Paul Paliychuk <paul@getzep.com>",
File without changes
File without changes