graphiti-core 0.4.2__py3-none-any.whl → 0.5.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphiti-core might be problematic. Click here for more details.

Files changed (34) hide show
  1. graphiti_core/cross_encoder/bge_reranker_client.py +1 -2
  2. graphiti_core/cross_encoder/client.py +3 -4
  3. graphiti_core/edges.py +51 -5
  4. graphiti_core/embedder/client.py +3 -3
  5. graphiti_core/embedder/openai.py +2 -2
  6. graphiti_core/embedder/voyage.py +3 -3
  7. graphiti_core/graphiti.py +14 -10
  8. graphiti_core/helpers.py +1 -0
  9. graphiti_core/llm_client/anthropic_client.py +4 -1
  10. graphiti_core/llm_client/client.py +20 -5
  11. graphiti_core/llm_client/errors.py +8 -0
  12. graphiti_core/llm_client/groq_client.py +4 -1
  13. graphiti_core/llm_client/openai_client.py +29 -7
  14. graphiti_core/nodes.py +50 -4
  15. graphiti_core/prompts/dedupe_edges.py +20 -17
  16. graphiti_core/prompts/dedupe_nodes.py +15 -1
  17. graphiti_core/prompts/eval.py +17 -14
  18. graphiti_core/prompts/extract_edge_dates.py +15 -7
  19. graphiti_core/prompts/extract_edges.py +18 -19
  20. graphiti_core/prompts/extract_nodes.py +11 -21
  21. graphiti_core/prompts/invalidate_edges.py +13 -25
  22. graphiti_core/prompts/lib.py +5 -1
  23. graphiti_core/prompts/prompt_helpers.py +1 -0
  24. graphiti_core/prompts/summarize_nodes.py +12 -16
  25. graphiti_core/search/search_utils.py +1 -1
  26. graphiti_core/utils/maintenance/community_operations.py +4 -2
  27. graphiti_core/utils/maintenance/edge_operations.py +14 -11
  28. graphiti_core/utils/maintenance/node_operations.py +14 -7
  29. graphiti_core/utils/maintenance/temporal_operations.py +9 -4
  30. {graphiti_core-0.4.2.dist-info → graphiti_core-0.5.0rc1.dist-info}/METADATA +1 -1
  31. graphiti_core-0.5.0rc1.dist-info/RECORD +58 -0
  32. graphiti_core-0.4.2.dist-info/RECORD +0 -57
  33. {graphiti_core-0.4.2.dist-info → graphiti_core-0.5.0rc1.dist-info}/LICENSE +0 -0
  34. {graphiti_core-0.4.2.dist-info → graphiti_core-0.5.0rc1.dist-info}/WHEEL +0 -0
@@ -17,9 +17,26 @@ limitations under the License.
17
17
  import json
18
18
  from typing import Any, Protocol, TypedDict
19
19
 
20
+ from pydantic import BaseModel, Field
21
+
20
22
  from .models import Message, PromptFunction, PromptVersion
21
23
 
22
24
 
25
+ class QueryExpansion(BaseModel):
26
+ query: str = Field(..., description='query optimized for database search')
27
+
28
+
29
+ class QAResponse(BaseModel):
30
+ ANSWER: str = Field(..., description='how Alice would answer the question')
31
+
32
+
33
+ class EvalResponse(BaseModel):
34
+ is_correct: bool = Field(..., description='boolean if the answer is correct or incorrect')
35
+ reasoning: str = Field(
36
+ ..., description='why you determined the response was correct or incorrect'
37
+ )
38
+
39
+
23
40
  class Prompt(Protocol):
24
41
  qa_prompt: PromptVersion
25
42
  eval_prompt: PromptVersion
@@ -41,10 +58,6 @@ def query_expansion(context: dict[str, Any]) -> list[Message]:
41
58
  <QUESTION>
42
59
  {json.dumps(context['query'])}
43
60
  </QUESTION>
44
- respond with a JSON object in the following format:
45
- {{
46
- "query": "query optimized for database search"
47
- }}
48
61
  """
49
62
  return [
50
63
  Message(role='system', content=sys_prompt),
@@ -67,10 +80,6 @@ def qa_prompt(context: dict[str, Any]) -> list[Message]:
67
80
  <QUESTION>
68
81
  {context['query']}
69
82
  </QUESTION>
70
- respond with a JSON object in the following format:
71
- {{
72
- "ANSWER": "how Alice would answer the question"
73
- }}
74
83
  """
75
84
  return [
76
85
  Message(role='system', content=sys_prompt),
@@ -96,12 +105,6 @@ def eval_prompt(context: dict[str, Any]) -> list[Message]:
96
105
  <RESPONSE>
97
106
  {context['response']}
98
107
  </RESPONSE>
99
-
100
- respond with a JSON object in the following format:
101
- {{
102
- "is_correct": "boolean if the answer is correct or incorrect"
103
- "reasoning": "why you determined the response was correct or incorrect"
104
- }}
105
108
  """
106
109
  return [
107
110
  Message(role='system', content=sys_prompt),
@@ -14,11 +14,24 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
- from typing import Any, Protocol, TypedDict
17
+ from typing import Any, Optional, Protocol, TypedDict
18
+
19
+ from pydantic import BaseModel, Field
18
20
 
19
21
  from .models import Message, PromptFunction, PromptVersion
20
22
 
21
23
 
24
+ class EdgeDates(BaseModel):
25
+ valid_at: Optional[str] = Field(
26
+ None,
27
+ description='The date and time when the relationship described by the edge fact became true or was established. YYYY-MM-DDTHH:MM:SS.SSSSSSZ or null.',
28
+ )
29
+ invalid_at: Optional[str] = Field(
30
+ None,
31
+ description='The date and time when the relationship described by the edge fact stopped being true or ended. YYYY-MM-DDTHH:MM:SS.SSSSSSZ or null.',
32
+ )
33
+
34
+
22
35
  class Prompt(Protocol):
23
36
  v1: PromptVersion
24
37
 
@@ -60,7 +73,7 @@ def v1(context: dict[str, Any]) -> list[Message]:
60
73
  Analyze the conversation and determine if there are dates that are part of the edge fact. Only set dates if they explicitly relate to the formation or alteration of the relationship itself.
61
74
 
62
75
  Guidelines:
63
- 1. Use ISO 8601 format (YYYY-MM-DDTHH:MM:SSZ) for datetimes.
76
+ 1. Use ISO 8601 format (YYYY-MM-DDTHH:MM:SS.SSSSSSZ) for datetimes.
64
77
  2. Use the reference timestamp as the current time when determining the valid_at and invalid_at dates.
65
78
  3. If the fact is written in the present tense, use the Reference Timestamp for the valid_at date
66
79
  4. If no temporal information is found that establishes or changes the relationship, leave the fields as null.
@@ -69,11 +82,6 @@ def v1(context: dict[str, Any]) -> list[Message]:
69
82
  7. If only a date is mentioned without a specific time, use 00:00:00 (midnight) for that date.
70
83
  8. If only year is mentioned, use January 1st of that year at 00:00:00.
71
84
  9. Always include the time zone offset (use Z for UTC if no specific time zone is mentioned).
72
- Respond with a JSON object:
73
- {{
74
- "valid_at": "YYYY-MM-DDTHH:MM:SS.SSSSSSZ or null",
75
- "invalid_at": "YYYY-MM-DDTHH:MM:SS.SSSSSSZ or null",
76
- }}
77
85
  """,
78
86
  ),
79
87
  ]
@@ -17,9 +17,26 @@ limitations under the License.
17
17
  import json
18
18
  from typing import Any, Protocol, TypedDict
19
19
 
20
+ from pydantic import BaseModel, Field
21
+
20
22
  from .models import Message, PromptFunction, PromptVersion
21
23
 
22
24
 
25
+ class Edge(BaseModel):
26
+ relation_type: str = Field(..., description='RELATION_TYPE_IN_CAPS')
27
+ source_entity_name: str = Field(..., description='name of the source entity')
28
+ target_entity_name: str = Field(..., description='name of the target entity')
29
+ fact: str = Field(..., description='extracted factual information')
30
+
31
+
32
+ class ExtractedEdges(BaseModel):
33
+ edges: list[Edge]
34
+
35
+
36
+ class MissingFacts(BaseModel):
37
+ missing_facts: list[str] = Field(..., description="facts that weren't extracted")
38
+
39
+
23
40
  class Prompt(Protocol):
24
41
  edge: PromptVersion
25
42
  reflexion: PromptVersion
@@ -54,25 +71,12 @@ def edge(context: dict[str, Any]) -> list[Message]:
54
71
 
55
72
  Given the above MESSAGES and ENTITIES, extract all facts pertaining to the listed ENTITIES from the CURRENT MESSAGE.
56
73
 
57
-
58
74
  Guidelines:
59
75
  1. Extract facts only between the provided entities.
60
76
  2. Each fact should represent a clear relationship between two DISTINCT nodes.
61
77
  3. The relation_type should be a concise, all-caps description of the fact (e.g., LOVES, IS_FRIENDS_WITH, WORKS_FOR).
62
78
  4. Provide a more detailed fact containing all relevant information.
63
79
  5. Consider temporal aspects of relationships when relevant.
64
-
65
- Respond with a JSON object in the following format:
66
- {{
67
- "edges": [
68
- {{
69
- "relation_type": "RELATION_TYPE_IN_CAPS",
70
- "source_entity_name": "name of the source entity",
71
- "target_entity_name": "name of the target entity",
72
- "fact": "extracted factual information",
73
- }}
74
- ]
75
- }}
76
80
  """,
77
81
  ),
78
82
  ]
@@ -98,12 +102,7 @@ def reflexion(context: dict[str, Any]) -> list[Message]:
98
102
  </EXTRACTED FACTS>
99
103
 
100
104
  Given the above MESSAGES, list of EXTRACTED ENTITIES entities, and list of EXTRACTED FACTS;
101
- determine if any facts haven't been extracted:
102
-
103
- Respond with a JSON object in the following format:
104
- {{
105
- "missing_facts": [ "facts that weren't extracted", ...]
106
- }}
105
+ determine if any facts haven't been extracted.
107
106
  """
108
107
  return [
109
108
  Message(role='system', content=sys_prompt),
@@ -17,9 +17,19 @@ limitations under the License.
17
17
  import json
18
18
  from typing import Any, Protocol, TypedDict
19
19
 
20
+ from pydantic import BaseModel, Field
21
+
20
22
  from .models import Message, PromptFunction, PromptVersion
21
23
 
22
24
 
25
+ class ExtractedNodes(BaseModel):
26
+ extracted_node_names: list[str] = Field(..., description='Name of the extracted entity')
27
+
28
+
29
+ class MissedEntities(BaseModel):
30
+ missed_entities: list[str] = Field(..., description="Names of entities that weren't extracted")
31
+
32
+
23
33
  class Prompt(Protocol):
24
34
  extract_message: PromptVersion
25
35
  extract_json: PromptVersion
@@ -56,11 +66,6 @@ Guidelines:
56
66
  4. DO NOT create nodes for temporal information like dates, times or years (these will be added to edges later).
57
67
  5. Be as explicit as possible in your node names, using full names.
58
68
  6. DO NOT extract entities mentioned only in PREVIOUS MESSAGES, those messages are only to provide context.
59
-
60
- Respond with a JSON object in the following format:
61
- {{
62
- "extracted_node_names": ["Name of the extracted entity", ...],
63
- }}
64
69
  """
65
70
  return [
66
71
  Message(role='system', content=sys_prompt),
@@ -87,11 +92,6 @@ Given the above source description and JSON, extract relevant entity nodes from
87
92
  Guidelines:
88
93
  1. Always try to extract an entities that the JSON represents. This will often be something like a "name" or "user field
89
94
  2. Do NOT extract any properties that contain dates
90
-
91
- Respond with a JSON object in the following format:
92
- {{
93
- "extracted_node_names": ["Name of the extracted entity", ...],
94
- }}
95
95
  """
96
96
  return [
97
97
  Message(role='system', content=sys_prompt),
@@ -116,11 +116,6 @@ Guidelines:
116
116
  2. Avoid creating nodes for relationships or actions.
117
117
  3. Avoid creating nodes for temporal information like dates, times or years (these will be added to edges later).
118
118
  4. Be as explicit as possible in your node names, using full names and avoiding abbreviations.
119
-
120
- Respond with a JSON object in the following format:
121
- {{
122
- "extracted_node_names": ["Name of the extracted entity", ...],
123
- }}
124
119
  """
125
120
  return [
126
121
  Message(role='system', content=sys_prompt),
@@ -144,12 +139,7 @@ def reflexion(context: dict[str, Any]) -> list[Message]:
144
139
  </EXTRACTED ENTITIES>
145
140
 
146
141
  Given the above previous messages, current message, and list of extracted entities; determine if any entities haven't been
147
- extracted:
148
-
149
- Respond with a JSON object in the following format:
150
- {{
151
- "missed_entities": [ "name of entity that wasn't extracted", ...]
152
- }}
142
+ extracted.
153
143
  """
154
144
  return [
155
145
  Message(role='system', content=sys_prompt),
@@ -16,9 +16,22 @@ limitations under the License.
16
16
 
17
17
  from typing import Any, Protocol, TypedDict
18
18
 
19
+ from pydantic import BaseModel, Field
20
+
19
21
  from .models import Message, PromptFunction, PromptVersion
20
22
 
21
23
 
24
+ class InvalidatedEdge(BaseModel):
25
+ uuid: str = Field(..., description='The UUID of the edge to be invalidated')
26
+ fact: str = Field(..., description='Updated fact of the edge')
27
+
28
+
29
+ class InvalidatedEdges(BaseModel):
30
+ invalidated_edges: list[InvalidatedEdge] = Field(
31
+ ..., description='List of edges that should be invalidated'
32
+ )
33
+
34
+
22
35
  class Prompt(Protocol):
23
36
  v1: PromptVersion
24
37
  v2: PromptVersion
@@ -56,18 +69,6 @@ def v1(context: dict[str, Any]) -> list[Message]:
56
69
  {context['new_edges']}
57
70
 
58
71
  Each edge is formatted as: "UUID | SOURCE_NODE - EDGE_NAME - TARGET_NODE (fact: EDGE_FACT), START_DATE (END_DATE, optional))"
59
-
60
- For each existing edge that should be invalidated, respond with a JSON object in the following format:
61
- {{
62
- "invalidated_edges": [
63
- {{
64
- "edge_uuid": "The UUID of the edge to be invalidated (the part before the | character)",
65
- "fact": "Updated fact of the edge"
66
- }}
67
- ]
68
- }}
69
-
70
- If no relationships need to be invalidated based on these strict criteria, return an empty list for "invalidated_edges".
71
72
  """,
72
73
  ),
73
74
  ]
@@ -89,19 +90,6 @@ def v2(context: dict[str, Any]) -> list[Message]:
89
90
 
90
91
  New Edge:
91
92
  {context['new_edge']}
92
-
93
-
94
- For each existing edge that should be invalidated, respond with a JSON object in the following format:
95
- {{
96
- "invalidated_edges": [
97
- {{
98
- "uuid": "The UUID of the edge to be invalidated",
99
- "fact": "Updated fact of the edge"
100
- }}
101
- ]
102
- }}
103
-
104
- If no relationships need to be invalidated based on these strict criteria, return an empty list for "invalidated_edges".
105
93
  """,
106
94
  ),
107
95
  ]
@@ -74,6 +74,7 @@ from .invalidate_edges import (
74
74
  versions as invalidate_edges_versions,
75
75
  )
76
76
  from .models import Message, PromptFunction
77
+ from .prompt_helpers import DO_NOT_ESCAPE_UNICODE
77
78
  from .summarize_nodes import Prompt as SummarizeNodesPrompt
78
79
  from .summarize_nodes import Versions as SummarizeNodesVersions
79
80
  from .summarize_nodes import versions as summarize_nodes_versions
@@ -106,7 +107,10 @@ class VersionWrapper:
106
107
  self.func = func
107
108
 
108
109
  def __call__(self, context: dict[str, Any]) -> list[Message]:
109
- return self.func(context)
110
+ messages = self.func(context)
111
+ for message in messages:
112
+ message.content += DO_NOT_ESCAPE_UNICODE if message.role == 'system' else ''
113
+ return messages
110
114
 
111
115
 
112
116
  class PromptTypeWrapper:
@@ -0,0 +1 @@
1
+ DO_NOT_ESCAPE_UNICODE = '\nDo not escape unicode characters.\n'
@@ -17,9 +17,21 @@ limitations under the License.
17
17
  import json
18
18
  from typing import Any, Protocol, TypedDict
19
19
 
20
+ from pydantic import BaseModel, Field
21
+
20
22
  from .models import Message, PromptFunction, PromptVersion
21
23
 
22
24
 
25
+ class Summary(BaseModel):
26
+ summary: str = Field(
27
+ ..., description='Summary containing the important information from both summaries'
28
+ )
29
+
30
+
31
+ class SummaryDescription(BaseModel):
32
+ description: str = Field(..., description='One sentence description of the provided summary')
33
+
34
+
23
35
  class Prompt(Protocol):
24
36
  summarize_pair: PromptVersion
25
37
  summarize_context: PromptVersion
@@ -45,11 +57,6 @@ def summarize_pair(context: dict[str, Any]) -> list[Message]:
45
57
 
46
58
  Summaries:
47
59
  {json.dumps(context['node_summaries'], indent=2)}
48
-
49
- Respond with a JSON object in the following format:
50
- {{
51
- "summary": "Summary containing the important information from both summaries"
52
- }}
53
60
  """,
54
61
  ),
55
62
  ]
@@ -77,12 +84,6 @@ def summarize_context(context: dict[str, Any]) -> list[Message]:
77
84
  <ENTITY>
78
85
  {context['node_name']}
79
86
  </ENTITY>
80
-
81
-
82
- Respond with a JSON object in the following format:
83
- {{
84
- "summary": "Entity summary"
85
- }}
86
87
  """,
87
88
  ),
88
89
  ]
@@ -101,11 +102,6 @@ def summary_description(context: dict[str, Any]) -> list[Message]:
101
102
 
102
103
  Summary:
103
104
  {json.dumps(context['summary'], indent=2)}
104
-
105
- Respond with a JSON object in the following format:
106
- {{
107
- "description": "One sentence description of the provided summary"
108
- }}
109
105
  """,
110
106
  ),
111
107
  ]
@@ -40,7 +40,7 @@ from graphiti_core.nodes import (
40
40
 
41
41
  logger = logging.getLogger(__name__)
42
42
 
43
- RELEVANT_SCHEMA_LIMIT = 3
43
+ RELEVANT_SCHEMA_LIMIT = 10
44
44
  DEFAULT_MIN_SCORE = 0.6
45
45
  DEFAULT_MMR_LAMBDA = 0.5
46
46
  MAX_SEARCH_DEPTH = 3
@@ -16,6 +16,7 @@ from graphiti_core.nodes import (
16
16
  get_community_node_from_record,
17
17
  )
18
18
  from graphiti_core.prompts import prompt_library
19
+ from graphiti_core.prompts.summarize_nodes import Summary, SummaryDescription
19
20
  from graphiti_core.utils.maintenance.edge_operations import build_community_edges
20
21
 
21
22
  MAX_COMMUNITY_BUILD_CONCURRENCY = 10
@@ -131,7 +132,7 @@ async def summarize_pair(llm_client: LLMClient, summary_pair: tuple[str, str]) -
131
132
  context = {'node_summaries': [{'summary': summary} for summary in summary_pair]}
132
133
 
133
134
  llm_response = await llm_client.generate_response(
134
- prompt_library.summarize_nodes.summarize_pair(context)
135
+ prompt_library.summarize_nodes.summarize_pair(context), response_model=Summary
135
136
  )
136
137
 
137
138
  pair_summary = llm_response.get('summary', '')
@@ -143,7 +144,8 @@ async def generate_summary_description(llm_client: LLMClient, summary: str) -> s
143
144
  context = {'summary': summary}
144
145
 
145
146
  llm_response = await llm_client.generate_response(
146
- prompt_library.summarize_nodes.summary_description(context)
147
+ prompt_library.summarize_nodes.summary_description(context),
148
+ response_model=SummaryDescription,
147
149
  )
148
150
 
149
151
  description = llm_response.get('description', '')
@@ -18,13 +18,14 @@ import asyncio
18
18
  import logging
19
19
  from datetime import datetime, timezone
20
20
  from time import time
21
- from typing import List
22
21
 
23
22
  from graphiti_core.edges import CommunityEdge, EntityEdge, EpisodicEdge
24
23
  from graphiti_core.helpers import MAX_REFLEXION_ITERATIONS
25
24
  from graphiti_core.llm_client import LLMClient
26
25
  from graphiti_core.nodes import CommunityNode, EntityNode, EpisodicNode
27
26
  from graphiti_core.prompts import prompt_library
27
+ from graphiti_core.prompts.dedupe_edges import EdgeDuplicate, UniqueFacts
28
+ from graphiti_core.prompts.extract_edges import ExtractedEdges, MissingFacts
28
29
  from graphiti_core.utils.maintenance.temporal_operations import (
29
30
  extract_edge_dates,
30
31
  get_edge_contradictions,
@@ -34,11 +35,11 @@ logger = logging.getLogger(__name__)
34
35
 
35
36
 
36
37
  def build_episodic_edges(
37
- entity_nodes: List[EntityNode],
38
+ entity_nodes: list[EntityNode],
38
39
  episode: EpisodicNode,
39
40
  created_at: datetime,
40
- ) -> List[EpisodicEdge]:
41
- edges: List[EpisodicEdge] = [
41
+ ) -> list[EpisodicEdge]:
42
+ edges: list[EpisodicEdge] = [
42
43
  EpisodicEdge(
43
44
  source_node_uuid=episode.uuid,
44
45
  target_node_uuid=node.uuid,
@@ -52,11 +53,11 @@ def build_episodic_edges(
52
53
 
53
54
 
54
55
  def build_community_edges(
55
- entity_nodes: List[EntityNode],
56
+ entity_nodes: list[EntityNode],
56
57
  community_node: CommunityNode,
57
58
  created_at: datetime,
58
- ) -> List[CommunityEdge]:
59
- edges: List[CommunityEdge] = [
59
+ ) -> list[CommunityEdge]:
60
+ edges: list[CommunityEdge] = [
60
61
  CommunityEdge(
61
62
  source_node_uuid=community_node.uuid,
62
63
  target_node_uuid=node.uuid,
@@ -92,7 +93,7 @@ async def extract_edges(
92
93
  reflexion_iterations = 0
93
94
  while facts_missed and reflexion_iterations < MAX_REFLEXION_ITERATIONS:
94
95
  llm_response = await llm_client.generate_response(
95
- prompt_library.extract_edges.edge(context)
96
+ prompt_library.extract_edges.edge(context), response_model=ExtractedEdges
96
97
  )
97
98
  edges_data = llm_response.get('edges', [])
98
99
 
@@ -101,7 +102,7 @@ async def extract_edges(
101
102
  reflexion_iterations += 1
102
103
  if reflexion_iterations < MAX_REFLEXION_ITERATIONS:
103
104
  reflexion_response = await llm_client.generate_response(
104
- prompt_library.extract_edges.reflexion(context)
105
+ prompt_library.extract_edges.reflexion(context), response_model=MissingFacts
105
106
  )
106
107
 
107
108
  missing_facts = reflexion_response.get('missing_facts', [])
@@ -318,7 +319,9 @@ async def dedupe_extracted_edge(
318
319
  'extracted_edges': extracted_edge_context,
319
320
  }
320
321
 
321
- llm_response = await llm_client.generate_response(prompt_library.dedupe_edges.edge(context))
322
+ llm_response = await llm_client.generate_response(
323
+ prompt_library.dedupe_edges.edge(context), response_model=EdgeDuplicate
324
+ )
322
325
 
323
326
  is_duplicate: bool = llm_response.get('is_duplicate', False)
324
327
  uuid: str | None = llm_response.get('uuid', None)
@@ -353,7 +356,7 @@ async def dedupe_edge_list(
353
356
  context = {'edges': [{'uuid': edge.uuid, 'fact': edge.fact} for edge in edges]}
354
357
 
355
358
  llm_response = await llm_client.generate_response(
356
- prompt_library.dedupe_edges.edge_list(context)
359
+ prompt_library.dedupe_edges.edge_list(context), response_model=UniqueFacts
357
360
  )
358
361
  unique_edges_data = llm_response.get('unique_facts', [])
359
362
 
@@ -23,6 +23,9 @@ from graphiti_core.helpers import MAX_REFLEXION_ITERATIONS
23
23
  from graphiti_core.llm_client import LLMClient
24
24
  from graphiti_core.nodes import EntityNode, EpisodeType, EpisodicNode
25
25
  from graphiti_core.prompts import prompt_library
26
+ from graphiti_core.prompts.dedupe_nodes import NodeDuplicate
27
+ from graphiti_core.prompts.extract_nodes import ExtractedNodes, MissedEntities
28
+ from graphiti_core.prompts.summarize_nodes import Summary
26
29
 
27
30
  logger = logging.getLogger(__name__)
28
31
 
@@ -42,7 +45,7 @@ async def extract_message_nodes(
42
45
  }
43
46
 
44
47
  llm_response = await llm_client.generate_response(
45
- prompt_library.extract_nodes.extract_message(context)
48
+ prompt_library.extract_nodes.extract_message(context), response_model=ExtractedNodes
46
49
  )
47
50
  extracted_node_names = llm_response.get('extracted_node_names', [])
48
51
  return extracted_node_names
@@ -63,7 +66,7 @@ async def extract_text_nodes(
63
66
  }
64
67
 
65
68
  llm_response = await llm_client.generate_response(
66
- prompt_library.extract_nodes.extract_text(context)
69
+ prompt_library.extract_nodes.extract_text(context), ExtractedNodes
67
70
  )
68
71
  extracted_node_names = llm_response.get('extracted_node_names', [])
69
72
  return extracted_node_names
@@ -81,7 +84,7 @@ async def extract_json_nodes(
81
84
  }
82
85
 
83
86
  llm_response = await llm_client.generate_response(
84
- prompt_library.extract_nodes.extract_json(context)
87
+ prompt_library.extract_nodes.extract_json(context), ExtractedNodes
85
88
  )
86
89
  extracted_node_names = llm_response.get('extracted_node_names', [])
87
90
  return extracted_node_names
@@ -101,7 +104,7 @@ async def extract_nodes_reflexion(
101
104
  }
102
105
 
103
106
  llm_response = await llm_client.generate_response(
104
- prompt_library.extract_nodes.reflexion(context)
107
+ prompt_library.extract_nodes.reflexion(context), MissedEntities
105
108
  )
106
109
  missed_entities = llm_response.get('missed_entities', [])
107
110
 
@@ -273,9 +276,12 @@ async def resolve_extracted_node(
273
276
  }
274
277
 
275
278
  llm_response, node_summary_response = await asyncio.gather(
276
- llm_client.generate_response(prompt_library.dedupe_nodes.node(context)),
277
279
  llm_client.generate_response(
278
- prompt_library.summarize_nodes.summarize_context(summary_context)
280
+ prompt_library.dedupe_nodes.node(context), response_model=NodeDuplicate
281
+ ),
282
+ llm_client.generate_response(
283
+ prompt_library.summarize_nodes.summarize_context(summary_context),
284
+ response_model=Summary,
279
285
  ),
280
286
  )
281
287
 
@@ -294,7 +300,8 @@ async def resolve_extracted_node(
294
300
  summary_response = await llm_client.generate_response(
295
301
  prompt_library.summarize_nodes.summarize_pair(
296
302
  {'node_summaries': [extracted_node.summary, existing_node.summary]}
297
- )
303
+ ),
304
+ response_model=Summary,
298
305
  )
299
306
  node = existing_node
300
307
  node.name = name
@@ -17,12 +17,13 @@ limitations under the License.
17
17
  import logging
18
18
  from datetime import datetime
19
19
  from time import time
20
- from typing import List
21
20
 
22
21
  from graphiti_core.edges import EntityEdge
23
22
  from graphiti_core.llm_client import LLMClient
24
23
  from graphiti_core.nodes import EpisodicNode
25
24
  from graphiti_core.prompts import prompt_library
25
+ from graphiti_core.prompts.extract_edge_dates import EdgeDates
26
+ from graphiti_core.prompts.invalidate_edges import InvalidatedEdges
26
27
 
27
28
  logger = logging.getLogger(__name__)
28
29
 
@@ -31,7 +32,7 @@ async def extract_edge_dates(
31
32
  llm_client: LLMClient,
32
33
  edge: EntityEdge,
33
34
  current_episode: EpisodicNode,
34
- previous_episodes: List[EpisodicNode],
35
+ previous_episodes: list[EpisodicNode],
35
36
  ) -> tuple[datetime | None, datetime | None]:
36
37
  context = {
37
38
  'edge_fact': edge.fact,
@@ -39,7 +40,9 @@ async def extract_edge_dates(
39
40
  'previous_episodes': [ep.content for ep in previous_episodes],
40
41
  'reference_timestamp': current_episode.valid_at.isoformat(),
41
42
  }
42
- llm_response = await llm_client.generate_response(prompt_library.extract_edge_dates.v1(context))
43
+ llm_response = await llm_client.generate_response(
44
+ prompt_library.extract_edge_dates.v1(context), response_model=EdgeDates
45
+ )
43
46
 
44
47
  valid_at = llm_response.get('valid_at')
45
48
  invalid_at = llm_response.get('invalid_at')
@@ -76,7 +79,9 @@ async def get_edge_contradictions(
76
79
 
77
80
  context = {'new_edge': new_edge_context, 'existing_edges': existing_edge_context}
78
81
 
79
- llm_response = await llm_client.generate_response(prompt_library.invalidate_edges.v2(context))
82
+ llm_response = await llm_client.generate_response(
83
+ prompt_library.invalidate_edges.v2(context), response_model=InvalidatedEdges
84
+ )
80
85
 
81
86
  contradicted_edge_data = llm_response.get('invalidated_edges', [])
82
87
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: graphiti-core
3
- Version: 0.4.2
3
+ Version: 0.5.0rc1
4
4
  Summary: A temporal graph building library
5
5
  License: Apache-2.0
6
6
  Author: Paul Paliychuk