graphiti-core 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

Files changed (37) hide show
  1. graphiti_core/__init__.py +3 -0
  2. graphiti_core/edges.py +232 -0
  3. graphiti_core/graphiti.py +618 -0
  4. graphiti_core/helpers.py +7 -0
  5. graphiti_core/llm_client/__init__.py +5 -0
  6. graphiti_core/llm_client/anthropic_client.py +63 -0
  7. graphiti_core/llm_client/client.py +96 -0
  8. graphiti_core/llm_client/config.py +58 -0
  9. graphiti_core/llm_client/groq_client.py +64 -0
  10. graphiti_core/llm_client/openai_client.py +65 -0
  11. graphiti_core/llm_client/utils.py +22 -0
  12. graphiti_core/nodes.py +250 -0
  13. graphiti_core/prompts/__init__.py +4 -0
  14. graphiti_core/prompts/dedupe_edges.py +154 -0
  15. graphiti_core/prompts/dedupe_nodes.py +151 -0
  16. graphiti_core/prompts/extract_edge_dates.py +60 -0
  17. graphiti_core/prompts/extract_edges.py +138 -0
  18. graphiti_core/prompts/extract_nodes.py +145 -0
  19. graphiti_core/prompts/invalidate_edges.py +74 -0
  20. graphiti_core/prompts/lib.py +122 -0
  21. graphiti_core/prompts/models.py +31 -0
  22. graphiti_core/search/__init__.py +0 -0
  23. graphiti_core/search/search.py +142 -0
  24. graphiti_core/search/search_utils.py +454 -0
  25. graphiti_core/utils/__init__.py +15 -0
  26. graphiti_core/utils/bulk_utils.py +227 -0
  27. graphiti_core/utils/maintenance/__init__.py +16 -0
  28. graphiti_core/utils/maintenance/edge_operations.py +170 -0
  29. graphiti_core/utils/maintenance/graph_data_operations.py +133 -0
  30. graphiti_core/utils/maintenance/node_operations.py +199 -0
  31. graphiti_core/utils/maintenance/temporal_operations.py +184 -0
  32. graphiti_core/utils/maintenance/utils.py +0 -0
  33. graphiti_core/utils/utils.py +39 -0
  34. graphiti_core-0.1.0.dist-info/LICENSE +201 -0
  35. graphiti_core-0.1.0.dist-info/METADATA +199 -0
  36. graphiti_core-0.1.0.dist-info/RECORD +37 -0
  37. graphiti_core-0.1.0.dist-info/WHEEL +4 -0
@@ -0,0 +1,96 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ import hashlib
18
+ import json
19
+ import logging
20
+ import typing
21
+ from abc import ABC, abstractmethod
22
+
23
+ import httpx
24
+ from diskcache import Cache
25
+ from tenacity import retry, retry_if_exception, stop_after_attempt, wait_exponential
26
+
27
+ from ..prompts.models import Message
28
+ from .config import LLMConfig
29
+
30
+ DEFAULT_TEMPERATURE = 0
31
+ DEFAULT_CACHE_DIR = './llm_cache'
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+
36
+ def is_server_error(exception):
37
+ return (
38
+ isinstance(exception, httpx.HTTPStatusError) and 500 <= exception.response.status_code < 600
39
+ )
40
+
41
+
42
+ class LLMClient(ABC):
43
+ def __init__(self, config: LLMConfig | None, cache: bool = False):
44
+ if config is None:
45
+ config = LLMConfig()
46
+
47
+ self.config = config
48
+ self.model = config.model
49
+ self.temperature = config.temperature
50
+ self.max_tokens = config.max_tokens
51
+ self.cache_enabled = cache
52
+ self.cache_dir = Cache(DEFAULT_CACHE_DIR) # Create a cache directory
53
+
54
+ @abstractmethod
55
+ def get_embedder(self) -> typing.Any:
56
+ pass
57
+
58
+ @retry(
59
+ stop=stop_after_attempt(3),
60
+ wait=wait_exponential(multiplier=1, min=4, max=10),
61
+ retry=retry_if_exception(is_server_error),
62
+ )
63
+ async def _generate_response_with_retry(self, messages: list[Message]) -> dict[str, typing.Any]:
64
+ try:
65
+ return await self._generate_response(messages)
66
+ except httpx.HTTPStatusError as e:
67
+ if not is_server_error(e):
68
+ raise Exception(f'LLM request error: {e}') from e
69
+ else:
70
+ raise
71
+
72
+ @abstractmethod
73
+ async def _generate_response(self, messages: list[Message]) -> dict[str, typing.Any]:
74
+ pass
75
+
76
+ def _get_cache_key(self, messages: list[Message]) -> str:
77
+ # Create a unique cache key based on the messages and model
78
+ message_str = json.dumps([m.model_dump() for m in messages], sort_keys=True)
79
+ key_str = f'{self.model}:{message_str}'
80
+ return hashlib.md5(key_str.encode()).hexdigest()
81
+
82
+ async def generate_response(self, messages: list[Message]) -> dict[str, typing.Any]:
83
+ if self.cache_enabled:
84
+ cache_key = self._get_cache_key(messages)
85
+
86
+ cached_response = self.cache_dir.get(cache_key)
87
+ if cached_response is not None:
88
+ logger.debug(f'Cache hit for {cache_key}')
89
+ return cached_response
90
+
91
+ response = await self._generate_response_with_retry(messages)
92
+
93
+ if self.cache_enabled:
94
+ self.cache_dir.set(cache_key, response)
95
+
96
+ return response
@@ -0,0 +1,58 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ EMBEDDING_DIM = 1024
18
+ DEFAULT_MAX_TOKENS = 4096
19
+ DEFAULT_TEMPERATURE = 0
20
+
21
+
22
+ class LLMConfig:
23
+ """
24
+ Configuration class for the Language Learning Model (LLM).
25
+
26
+ This class encapsulates the necessary parameters to interact with an LLM API,
27
+ such as OpenAI's GPT models. It stores the API key, model name, and base URL
28
+ for making requests to the LLM service.
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ api_key: str | None = None,
34
+ model: str | None = None,
35
+ base_url: str | None = None,
36
+ temperature: float = DEFAULT_TEMPERATURE,
37
+ max_tokens: int = DEFAULT_MAX_TOKENS,
38
+ ):
39
+ """
40
+ Initialize the LLMConfig with the provided parameters.
41
+
42
+ Args:
43
+ api_key (str): The authentication key for accessing the LLM API.
44
+ This is required for making authorized requests.
45
+
46
+ model (str, optional): The specific LLM model to use for generating responses.
47
+ Defaults to "gpt-4o-mini", which appears to be a custom model name.
48
+ Common values might include "gpt-3.5-turbo" or "gpt-4".
49
+
50
+ base_url (str, optional): The base URL of the LLM API service.
51
+ Defaults to "https://api.openai.com", which is OpenAI's standard API endpoint.
52
+ This can be changed if using a different provider or a custom endpoint.
53
+ """
54
+ self.base_url = base_url
55
+ self.api_key = api_key
56
+ self.model = model
57
+ self.temperature = temperature
58
+ self.max_tokens = max_tokens
@@ -0,0 +1,64 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ import json
18
+ import logging
19
+ import typing
20
+
21
+ from groq import AsyncGroq
22
+ from groq.types.chat import ChatCompletionMessageParam
23
+ from openai import AsyncOpenAI
24
+
25
+ from ..prompts.models import Message
26
+ from .client import LLMClient
27
+ from .config import LLMConfig
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+ DEFAULT_MODEL = 'llama-3.1-70b-versatile'
32
+
33
+
34
+ class GroqClient(LLMClient):
35
+ def __init__(self, config: LLMConfig | None = None, cache: bool = False):
36
+ if config is None:
37
+ config = LLMConfig()
38
+ super().__init__(config, cache)
39
+ self.client = AsyncGroq(api_key=config.api_key)
40
+
41
+ def get_embedder(self) -> typing.Any:
42
+ openai_client = AsyncOpenAI()
43
+ return openai_client.embeddings
44
+
45
+ async def _generate_response(self, messages: list[Message]) -> dict[str, typing.Any]:
46
+ msgs: list[ChatCompletionMessageParam] = []
47
+ for m in messages:
48
+ if m.role == 'user':
49
+ msgs.append({'role': 'user', 'content': m.content})
50
+ elif m.role == 'system':
51
+ msgs.append({'role': 'system', 'content': m.content})
52
+ try:
53
+ response = await self.client.chat.completions.create(
54
+ model=self.model or DEFAULT_MODEL,
55
+ messages=msgs,
56
+ temperature=self.temperature,
57
+ max_tokens=self.max_tokens,
58
+ response_format={'type': 'json_object'},
59
+ )
60
+ result = response.choices[0].message.content or ''
61
+ return json.loads(result)
62
+ except Exception as e:
63
+ logger.error(f'Error in generating LLM response: {e}')
64
+ raise
@@ -0,0 +1,65 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ import json
18
+ import logging
19
+ import typing
20
+
21
+ from openai import AsyncOpenAI
22
+ from openai.types.chat import ChatCompletionMessageParam
23
+
24
+ from ..prompts.models import Message
25
+ from .client import LLMClient
26
+ from .config import LLMConfig
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+ DEFAULT_MODEL = 'gpt-4o-2024-08-06'
31
+
32
+
33
+ class OpenAIClient(LLMClient):
34
+ def __init__(self, config: LLMConfig | None = None, cache: bool = False):
35
+ if config is None:
36
+ config = LLMConfig()
37
+
38
+ super().__init__(config, cache)
39
+
40
+ self.client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
41
+
42
+ def get_embedder(self) -> typing.Any:
43
+ return self.client.embeddings
44
+
45
+ async def _generate_response(self, messages: list[Message]) -> dict[str, typing.Any]:
46
+ openai_messages: list[ChatCompletionMessageParam] = []
47
+ for m in messages:
48
+ if m.role == 'user':
49
+ openai_messages.append({'role': 'user', 'content': m.content})
50
+ elif m.role == 'system':
51
+ openai_messages.append({'role': 'system', 'content': m.content})
52
+ try:
53
+ response = await self.client.chat.completions.create(
54
+ model=self.model or DEFAULT_MODEL,
55
+ messages=openai_messages,
56
+ temperature=self.temperature,
57
+ max_tokens=self.max_tokens,
58
+ response_format={'type': 'json_object'},
59
+ )
60
+ result = response.choices[0].message.content or ''
61
+ return json.loads(result)
62
+ except Exception as e:
63
+ print(openai_messages)
64
+ logger.error(f'Error in generating LLM response: {e}')
65
+ raise
@@ -0,0 +1,22 @@
1
+ import logging
2
+ import typing
3
+ from time import time
4
+
5
+ from graphiti_core.llm_client.config import EMBEDDING_DIM
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ async def generate_embedding(
11
+ embedder: typing.Any, text: str, model: str = 'text-embedding-3-small'
12
+ ):
13
+ start = time()
14
+
15
+ text = text.replace('\n', ' ')
16
+ embedding = (await embedder.create(input=[text], model=model)).data[0].embedding
17
+ embedding = embedding[:EMBEDDING_DIM]
18
+
19
+ end = time()
20
+ logger.debug(f'embedded text of length {len(text)} in {end-start} ms')
21
+
22
+ return embedding
graphiti_core/nodes.py ADDED
@@ -0,0 +1,250 @@
1
+ """
2
+ Copyright 2024, Zep Software, Inc.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ import logging
18
+ from abc import ABC, abstractmethod
19
+ from datetime import datetime
20
+ from enum import Enum
21
+ from time import time
22
+ from uuid import uuid4
23
+
24
+ from neo4j import AsyncDriver
25
+ from openai import OpenAI
26
+ from pydantic import BaseModel, Field
27
+
28
+ from graphiti_core.llm_client.config import EMBEDDING_DIM
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ class EpisodeType(Enum):
34
+ """
35
+ Enumeration of different types of episodes that can be processed.
36
+
37
+ This enum defines the various sources or formats of episodes that the system
38
+ can handle. It's used to categorize and potentially handle different types
39
+ of input data differently.
40
+
41
+ Attributes:
42
+ -----------
43
+ message : str
44
+ Represents a standard message-type episode. The content for this type
45
+ should be formatted as "actor: content". For example, "user: Hello, how are you?"
46
+ or "assistant: I'm doing well, thank you for asking."
47
+ json : str
48
+ Represents an episode containing a JSON string object with structured data.
49
+ text : str
50
+ Represents a plain text episode.
51
+ """
52
+
53
+ message = 'message'
54
+ json = 'json'
55
+ text = 'text'
56
+
57
+ @staticmethod
58
+ def from_str(episode_type: str):
59
+ if episode_type == 'message':
60
+ return EpisodeType.message
61
+ if episode_type == 'json':
62
+ return EpisodeType.json
63
+ if episode_type == 'text':
64
+ return EpisodeType.text
65
+ logger.error(f'Episode type: {episode_type} not implemented')
66
+ raise NotImplementedError
67
+
68
+
69
+ class Node(BaseModel, ABC):
70
+ uuid: str = Field(default_factory=lambda: uuid4().hex)
71
+ name: str = Field(description='name of the node')
72
+ labels: list[str] = Field(default_factory=list)
73
+ created_at: datetime = Field(default_factory=lambda: datetime.now())
74
+
75
+ @abstractmethod
76
+ async def save(self, driver: AsyncDriver): ...
77
+
78
+ @abstractmethod
79
+ async def delete(self, driver: AsyncDriver): ...
80
+
81
+ def __hash__(self):
82
+ return hash(self.uuid)
83
+
84
+ def __eq__(self, other):
85
+ if isinstance(other, Node):
86
+ return self.uuid == other.uuid
87
+ return False
88
+
89
+ @classmethod
90
+ async def get_by_uuid(cls, driver: AsyncDriver, uuid: str): ...
91
+
92
+
93
+ class EpisodicNode(Node):
94
+ source: EpisodeType = Field(description='source type')
95
+ source_description: str = Field(description='description of the data source')
96
+ content: str = Field(description='raw episode data')
97
+ valid_at: datetime = Field(
98
+ description='datetime of when the original document was created',
99
+ )
100
+ entity_edges: list[str] = Field(
101
+ description='list of entity edges referenced in this episode',
102
+ default_factory=list,
103
+ )
104
+
105
+ async def save(self, driver: AsyncDriver):
106
+ result = await driver.execute_query(
107
+ """
108
+ MERGE (n:Episodic {uuid: $uuid})
109
+ SET n = {uuid: $uuid, name: $name, source_description: $source_description, source: $source, content: $content,
110
+ entity_edges: $entity_edges, created_at: $created_at, valid_at: $valid_at}
111
+ RETURN n.uuid AS uuid""",
112
+ uuid=self.uuid,
113
+ name=self.name,
114
+ source_description=self.source_description,
115
+ content=self.content,
116
+ entity_edges=self.entity_edges,
117
+ created_at=self.created_at,
118
+ valid_at=self.valid_at,
119
+ source=self.source.value,
120
+ )
121
+
122
+ logger.info(f'Saved Node to neo4j: {self.uuid}')
123
+
124
+ return result
125
+
126
+ async def delete(self, driver: AsyncDriver):
127
+ result = await driver.execute_query(
128
+ """
129
+ MATCH (n:Episodic {uuid: $uuid})
130
+ DETACH DELETE n
131
+ """,
132
+ uuid=self.uuid,
133
+ )
134
+
135
+ logger.info(f'Deleted Node: {self.uuid}')
136
+
137
+ return result
138
+
139
+ @classmethod
140
+ async def get_by_uuid(cls, driver: AsyncDriver, uuid: str):
141
+ records, _, _ = await driver.execute_query(
142
+ """
143
+ MATCH (e:Episodic {uuid: $uuid})
144
+ RETURN e.content as content,
145
+ e.created_at as created_at,
146
+ e.valid_at as valid_at,
147
+ e.uuid as uuid,
148
+ e.name as name,
149
+ e.source_description as source_description,
150
+ e.source as source
151
+ """,
152
+ uuid=uuid,
153
+ )
154
+
155
+ episodes = [
156
+ EpisodicNode(
157
+ content=record['content'],
158
+ created_at=record['created_at'].to_native().timestamp(),
159
+ valid_at=(record['valid_at'].to_native()),
160
+ uuid=record['uuid'],
161
+ source=EpisodeType.from_str(record['source']),
162
+ name=record['name'],
163
+ source_description=record['source_description'],
164
+ )
165
+ for record in records
166
+ ]
167
+
168
+ logger.info(f'Found Node: {uuid}')
169
+
170
+ return episodes[0]
171
+
172
+
173
+ class EntityNode(Node):
174
+ name_embedding: list[float] | None = Field(default=None, description='embedding of the name')
175
+ summary: str = Field(description='regional summary of surrounding edges', default_factory=str)
176
+
177
+ async def update_summary(self, driver: AsyncDriver): ...
178
+
179
+ async def refresh_summary(self, driver: AsyncDriver, llm_client: OpenAI): ...
180
+
181
+ async def generate_name_embedding(self, embedder, model='text-embedding-3-small'):
182
+ start = time()
183
+ text = self.name.replace('\n', ' ')
184
+ embedding = (await embedder.create(input=[text], model=model)).data[0].embedding
185
+ self.name_embedding = embedding[:EMBEDDING_DIM]
186
+ end = time()
187
+ logger.info(f'embedded {text} in {end - start} ms')
188
+
189
+ return embedding
190
+
191
+ async def save(self, driver: AsyncDriver):
192
+ result = await driver.execute_query(
193
+ """
194
+ MERGE (n:Entity {uuid: $uuid})
195
+ SET n = {uuid: $uuid, name: $name, name_embedding: $name_embedding, summary: $summary, created_at: $created_at}
196
+ RETURN n.uuid AS uuid""",
197
+ uuid=self.uuid,
198
+ name=self.name,
199
+ summary=self.summary,
200
+ name_embedding=self.name_embedding,
201
+ created_at=self.created_at,
202
+ )
203
+
204
+ logger.info(f'Saved Node to neo4j: {self.uuid}')
205
+
206
+ return result
207
+
208
+ async def delete(self, driver: AsyncDriver):
209
+ result = await driver.execute_query(
210
+ """
211
+ MATCH (n:Entity {uuid: $uuid})
212
+ DETACH DELETE n
213
+ """,
214
+ uuid=self.uuid,
215
+ )
216
+
217
+ logger.info(f'Deleted Node: {self.uuid}')
218
+
219
+ return result
220
+
221
+ @classmethod
222
+ async def get_by_uuid(cls, driver: AsyncDriver, uuid: str):
223
+ records, _, _ = await driver.execute_query(
224
+ """
225
+ MATCH (n:Entity {uuid: $uuid})
226
+ RETURN
227
+ n.uuid As uuid,
228
+ n.name AS name,
229
+ n.created_at AS created_at,
230
+ n.summary AS summary
231
+ """,
232
+ uuid=uuid,
233
+ )
234
+
235
+ nodes: list[EntityNode] = []
236
+
237
+ for record in records:
238
+ nodes.append(
239
+ EntityNode(
240
+ uuid=record['uuid'],
241
+ name=record['name'],
242
+ labels=['Entity'],
243
+ created_at=record['created_at'].to_native(),
244
+ summary=record['summary'],
245
+ )
246
+ )
247
+
248
+ logger.info(f'Found Node: {uuid}')
249
+
250
+ return nodes[0]
@@ -0,0 +1,4 @@
1
+ from .lib import prompt_library
2
+ from .models import Message
3
+
4
+ __all__ = ['prompt_library', 'Message']