noesium 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- noesium/core/__init__.py +4 -0
- noesium/core/agent/__init__.py +14 -0
- noesium/core/agent/base.py +227 -0
- noesium/core/consts.py +6 -0
- noesium/core/goalith/conflict/conflict.py +104 -0
- noesium/core/goalith/conflict/detector.py +53 -0
- noesium/core/goalith/decomposer/__init__.py +6 -0
- noesium/core/goalith/decomposer/base.py +46 -0
- noesium/core/goalith/decomposer/callable_decomposer.py +65 -0
- noesium/core/goalith/decomposer/llm_decomposer.py +326 -0
- noesium/core/goalith/decomposer/prompts.py +140 -0
- noesium/core/goalith/decomposer/simple_decomposer.py +61 -0
- noesium/core/goalith/errors.py +22 -0
- noesium/core/goalith/goalgraph/graph.py +526 -0
- noesium/core/goalith/goalgraph/node.py +179 -0
- noesium/core/goalith/replanner/base.py +31 -0
- noesium/core/goalith/replanner/replanner.py +36 -0
- noesium/core/goalith/service.py +26 -0
- noesium/core/llm/__init__.py +154 -0
- noesium/core/llm/base.py +152 -0
- noesium/core/llm/litellm.py +528 -0
- noesium/core/llm/llamacpp.py +487 -0
- noesium/core/llm/message.py +184 -0
- noesium/core/llm/ollama.py +459 -0
- noesium/core/llm/openai.py +520 -0
- noesium/core/llm/openrouter.py +89 -0
- noesium/core/llm/prompt.py +551 -0
- noesium/core/memory/__init__.py +11 -0
- noesium/core/memory/base.py +464 -0
- noesium/core/memory/memu/__init__.py +24 -0
- noesium/core/memory/memu/config/__init__.py +26 -0
- noesium/core/memory/memu/config/activity/config.py +46 -0
- noesium/core/memory/memu/config/event/config.py +46 -0
- noesium/core/memory/memu/config/markdown_config.py +241 -0
- noesium/core/memory/memu/config/profile/config.py +48 -0
- noesium/core/memory/memu/llm_adapter.py +129 -0
- noesium/core/memory/memu/memory/__init__.py +31 -0
- noesium/core/memory/memu/memory/actions/__init__.py +40 -0
- noesium/core/memory/memu/memory/actions/add_activity_memory.py +299 -0
- noesium/core/memory/memu/memory/actions/base_action.py +342 -0
- noesium/core/memory/memu/memory/actions/cluster_memories.py +262 -0
- noesium/core/memory/memu/memory/actions/generate_suggestions.py +198 -0
- noesium/core/memory/memu/memory/actions/get_available_categories.py +66 -0
- noesium/core/memory/memu/memory/actions/link_related_memories.py +515 -0
- noesium/core/memory/memu/memory/actions/run_theory_of_mind.py +254 -0
- noesium/core/memory/memu/memory/actions/update_memory_with_suggestions.py +514 -0
- noesium/core/memory/memu/memory/embeddings.py +130 -0
- noesium/core/memory/memu/memory/file_manager.py +306 -0
- noesium/core/memory/memu/memory/memory_agent.py +578 -0
- noesium/core/memory/memu/memory/recall_agent.py +376 -0
- noesium/core/memory/memu/memory_store.py +628 -0
- noesium/core/memory/models.py +149 -0
- noesium/core/msgbus/__init__.py +12 -0
- noesium/core/msgbus/base.py +395 -0
- noesium/core/orchestrix/__init__.py +0 -0
- noesium/core/py.typed +0 -0
- noesium/core/routing/__init__.py +20 -0
- noesium/core/routing/base.py +66 -0
- noesium/core/routing/router.py +241 -0
- noesium/core/routing/strategies/__init__.py +9 -0
- noesium/core/routing/strategies/dynamic_complexity.py +361 -0
- noesium/core/routing/strategies/self_assessment.py +147 -0
- noesium/core/routing/types.py +38 -0
- noesium/core/toolify/__init__.py +39 -0
- noesium/core/toolify/base.py +360 -0
- noesium/core/toolify/config.py +138 -0
- noesium/core/toolify/mcp_integration.py +275 -0
- noesium/core/toolify/registry.py +214 -0
- noesium/core/toolify/toolkits/__init__.py +1 -0
- noesium/core/tracing/__init__.py +37 -0
- noesium/core/tracing/langgraph_hooks.py +308 -0
- noesium/core/tracing/opik_tracing.py +144 -0
- noesium/core/tracing/token_tracker.py +166 -0
- noesium/core/utils/__init__.py +10 -0
- noesium/core/utils/logging.py +172 -0
- noesium/core/utils/statistics.py +12 -0
- noesium/core/utils/typing.py +17 -0
- noesium/core/vector_store/__init__.py +79 -0
- noesium/core/vector_store/base.py +94 -0
- noesium/core/vector_store/pgvector.py +304 -0
- noesium/core/vector_store/weaviate.py +383 -0
- noesium-0.1.0.dist-info/METADATA +525 -0
- noesium-0.1.0.dist-info/RECORD +86 -0
- noesium-0.1.0.dist-info/WHEEL +5 -0
- noesium-0.1.0.dist-info/licenses/LICENSE +21 -0
- noesium-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
|
|
5
|
+
from .base_action import BaseAction
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class RunTheoryOfMindAction(BaseAction):
|
|
11
|
+
"""
|
|
12
|
+
Run theory of mind on the conversation to infer subtle, obscure, and hidden information behind the conversation.
|
|
13
|
+
This is a very important step to understand the characters and the conversation.
|
|
14
|
+
The output should follow the same format as memory items.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
@property
|
|
18
|
+
def action_name(self) -> str:
|
|
19
|
+
return "run_theory_of_mind"
|
|
20
|
+
|
|
21
|
+
def get_schema(self) -> Dict[str, Any]:
|
|
22
|
+
"""Return OpenAI-compatible function schema"""
|
|
23
|
+
return {
|
|
24
|
+
"name": self.action_name,
|
|
25
|
+
"description": "Analyze the conversation and memory items to extract subtle, obscure, and hidden information behind the conversation.",
|
|
26
|
+
"parameters": {
|
|
27
|
+
"type": "object",
|
|
28
|
+
"properties": {
|
|
29
|
+
"character_name": {
|
|
30
|
+
"type": "string",
|
|
31
|
+
"description": "Name of the character",
|
|
32
|
+
},
|
|
33
|
+
"conversation_text": {
|
|
34
|
+
"type": "string",
|
|
35
|
+
"description": "The full conversation text to analyze",
|
|
36
|
+
},
|
|
37
|
+
"activity_items": {
|
|
38
|
+
"type": "array",
|
|
39
|
+
"items": {
|
|
40
|
+
"type": "object",
|
|
41
|
+
"properties": {
|
|
42
|
+
"memory_id": {"type": "string"},
|
|
43
|
+
"content": {"type": "string"},
|
|
44
|
+
},
|
|
45
|
+
"required": ["memory_id", "content"],
|
|
46
|
+
},
|
|
47
|
+
"description": "List of new activity items from the conversation",
|
|
48
|
+
},
|
|
49
|
+
"session_date": {
|
|
50
|
+
"type": "string",
|
|
51
|
+
"description": "Date of the session (e.g., '2024-01-15')",
|
|
52
|
+
"default": None,
|
|
53
|
+
},
|
|
54
|
+
"embeddings_enabled": {
|
|
55
|
+
"type": "boolean",
|
|
56
|
+
"description": "Whether to generate embeddings for the theory of mind items",
|
|
57
|
+
"default": True,
|
|
58
|
+
},
|
|
59
|
+
},
|
|
60
|
+
"required": ["character_name", "conversation_text", "activity_items"],
|
|
61
|
+
},
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
def execute(
|
|
65
|
+
self,
|
|
66
|
+
character_name: str,
|
|
67
|
+
conversation_text: str,
|
|
68
|
+
activity_items: List[Dict[str, str]],
|
|
69
|
+
session_date: str = None,
|
|
70
|
+
embeddings_enabled: bool = True,
|
|
71
|
+
) -> Dict[str, Any]:
|
|
72
|
+
"""
|
|
73
|
+
Analyze the conversation and memory items to extract subtle, obscure, and hidden information behind the conversation.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
character_name: Name of the character
|
|
77
|
+
conversation_text: The full conversation text to analyze
|
|
78
|
+
activity_items: List of new memory items from the conversation
|
|
79
|
+
session_date: Date of the session
|
|
80
|
+
embeddings_enabled: Whether to generate embeddings for the theory of mind items
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
Dict containing memory items obtained through theory of mind
|
|
84
|
+
"""
|
|
85
|
+
try:
|
|
86
|
+
if not conversation_text.strip():
|
|
87
|
+
return self._add_metadata({"success": False, "error": "Empty conversation text provided"})
|
|
88
|
+
|
|
89
|
+
if not activity_items:
|
|
90
|
+
return self._add_metadata({"success": False, "error": "No memory items provided"})
|
|
91
|
+
|
|
92
|
+
if not session_date:
|
|
93
|
+
session_date = datetime.now().strftime("%Y-%m-%d")
|
|
94
|
+
|
|
95
|
+
# Call LLM to run theory of mind
|
|
96
|
+
response = self._extract_theory_of_mind_with_llm(character_name, conversation_text, activity_items)
|
|
97
|
+
|
|
98
|
+
if not response.strip():
|
|
99
|
+
return self._add_metadata({"success": False, "error": "LLM returned empty response"})
|
|
100
|
+
|
|
101
|
+
# Parse text response
|
|
102
|
+
reasoning_process, theory_of_mind_items = self._parse_theory_of_mind_from_text(
|
|
103
|
+
character_name, response.strip(), session_date
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
if not theory_of_mind_items:
|
|
107
|
+
return self._add_metadata(
|
|
108
|
+
{
|
|
109
|
+
"success": False,
|
|
110
|
+
"error": "No theory of mind items could be extracted from conversation",
|
|
111
|
+
}
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
return self._add_metadata(
|
|
115
|
+
{
|
|
116
|
+
"success": True,
|
|
117
|
+
"character_name": character_name,
|
|
118
|
+
"theory_of_mind_items_added": len(theory_of_mind_items),
|
|
119
|
+
"theory_of_mind_items": theory_of_mind_items,
|
|
120
|
+
"reasoning_process": reasoning_process,
|
|
121
|
+
"message": f"Successfully extracted {len(theory_of_mind_items)} theory of mind items from conversation",
|
|
122
|
+
}
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
except Exception as e:
|
|
126
|
+
return self._handle_error(e)
|
|
127
|
+
|
|
128
|
+
def _extract_theory_of_mind_with_llm(
|
|
129
|
+
self,
|
|
130
|
+
character_name: str,
|
|
131
|
+
conversation_text: str,
|
|
132
|
+
activity_items: List[Dict[str, str]],
|
|
133
|
+
) -> str:
|
|
134
|
+
"""Extract theory of mind items from conversation and activity items with LLM"""
|
|
135
|
+
|
|
136
|
+
activity_items_text = "\n".join(
|
|
137
|
+
[
|
|
138
|
+
# f"Memory ID: {item['memory_id']}\nContent: {item['content']}"
|
|
139
|
+
f"- {item['content']}"
|
|
140
|
+
for item in activity_items
|
|
141
|
+
]
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
user_name = character_name
|
|
145
|
+
|
|
146
|
+
theory_of_mind_prompt = f"""You are analyzing the following conversation and activity items for {user_name} to try to infer information that is not explicitly mentioned by {user_name} in the conversation, but he or she might meant to express or the listener can reasonably deduce.
|
|
147
|
+
|
|
148
|
+
Conversation:
|
|
149
|
+
{conversation_text}
|
|
150
|
+
|
|
151
|
+
Activity Items:
|
|
152
|
+
{activity_items_text}
|
|
153
|
+
|
|
154
|
+
**CRITICAL REQUIREMENT: Inference results must be SELF-CONTAINED MEMORY ITEMS**
|
|
155
|
+
|
|
156
|
+
Your task it to leverage your reasoning skills to infer the information that is not explicitly mentioned in the conversation, but the character might meant to express or the listener can reasonably deduce.
|
|
157
|
+
|
|
158
|
+
**SELF-CONTAINED MEMORY REQUIREMENTS:**
|
|
159
|
+
- Plain text only, no markdown grammar
|
|
160
|
+
- EVERY activity item must be complete and standalone
|
|
161
|
+
- ALWAYS include the full subject (do not use "she/he/they/it")
|
|
162
|
+
- NEVER use pronouns that depend on context (no "she", "he", "they", "it")
|
|
163
|
+
- Include specific names, places, dates, and full context in each item
|
|
164
|
+
- Each activity should be understandable without reading other items
|
|
165
|
+
- You can use words like "perhaps" or "maybe" to indicate that the information is obtained through reasoning and is not 100% certain
|
|
166
|
+
- NO need to include evidences or reasoning processes in the items
|
|
167
|
+
|
|
168
|
+
**INFERENCE GUIDELINES:**
|
|
169
|
+
- Leverage your reasoning skills to infer the information that is not explicitly mentioned
|
|
170
|
+
- Use the activity items as a reference to assist your reasoning process and inferences
|
|
171
|
+
- DO NOT repeat the information that is already included in the activity items
|
|
172
|
+
- Use modal adverbs (perhaps, probably, likely, etc.) to indicate your confidence level of the inference
|
|
173
|
+
|
|
174
|
+
**COMPLETE SENTENCE EXAMPLES:**
|
|
175
|
+
GOOD: "{user_name} may have experience working abroad"
|
|
176
|
+
BAD: "Have experience working abroad" (missing subject)
|
|
177
|
+
BAD: "He may have experience working abroad" (pronouns as subject)
|
|
178
|
+
GOOD: "{user_name} perhaps not enjoy his trip to Europe this summer"
|
|
179
|
+
BAD: "{user_name} perhaps not enjoy his trip" (missing location and time)
|
|
180
|
+
GOOD: "Harry Potter series are probably important to {user_name}'s childhood"
|
|
181
|
+
BAD: "Harry Potter series are probably important to {user_name}'s childhood, because she mentioned it and recommended it to her friends many times" (no need to include evidences or reasoning processes)
|
|
182
|
+
|
|
183
|
+
**OUTPUT FORMAT:**
|
|
184
|
+
|
|
185
|
+
**REASONING PROCESS:**
|
|
186
|
+
[Your reasoning process for what kind of implicit information can be hidden behind the conversation, what are the evidences, how you get to your conclusion, and how confident you are.]
|
|
187
|
+
|
|
188
|
+
**INFERENCE ITEMS:**
|
|
189
|
+
[One piece of inference per line, no markdown headers, no structure, no numbering, no bullet points, ends with a period]
|
|
190
|
+
[After carefully reasoning, if you determine that there is no implicit information that can be inferred from the conversation beyong the explicit information already mentioned in the activity items, you can leave this section empty. DO NOT output things like "No inference available".]
|
|
191
|
+
|
|
192
|
+
"""
|
|
193
|
+
|
|
194
|
+
# Call LLM to run theory of mind
|
|
195
|
+
response = self.llm_client.simple_chat(theory_of_mind_prompt)
|
|
196
|
+
return response
|
|
197
|
+
|
|
198
|
+
def _parse_theory_of_mind_from_text(self, character_name: str, response_text: str, session_date: str) -> tuple:
|
|
199
|
+
"""Parse theory of mind items from text format response"""
|
|
200
|
+
|
|
201
|
+
reasoning_process = ""
|
|
202
|
+
theory_of_mind_items = []
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
lines = response_text.split("\n")
|
|
206
|
+
|
|
207
|
+
# Parse reasoning process
|
|
208
|
+
reasoning_section = False
|
|
209
|
+
inference_section = False
|
|
210
|
+
|
|
211
|
+
for line in lines:
|
|
212
|
+
line = line.strip()
|
|
213
|
+
|
|
214
|
+
if (
|
|
215
|
+
line.upper().startswith("**REASONING PROCESS:")
|
|
216
|
+
or line.startswith("**")
|
|
217
|
+
and "REASONING PROCESS" in line.upper()
|
|
218
|
+
):
|
|
219
|
+
reasoning_section = True
|
|
220
|
+
inference_section = False
|
|
221
|
+
continue
|
|
222
|
+
elif (
|
|
223
|
+
line.upper().startswith("**INFERENCE ITEMS:")
|
|
224
|
+
or line.startswith("**")
|
|
225
|
+
and "INFERENCE ITEMS" in line.upper()
|
|
226
|
+
):
|
|
227
|
+
reasoning_section = False
|
|
228
|
+
inference_section = True
|
|
229
|
+
continue
|
|
230
|
+
|
|
231
|
+
if reasoning_section and line and not line.startswith("**"):
|
|
232
|
+
if not reasoning_process:
|
|
233
|
+
reasoning_process = line.strip()
|
|
234
|
+
else:
|
|
235
|
+
reasoning_process += "\n" + line.strip()
|
|
236
|
+
|
|
237
|
+
# Parse memory items
|
|
238
|
+
elif inference_section:
|
|
239
|
+
line = line.strip()
|
|
240
|
+
if line:
|
|
241
|
+
memory_id = self._generate_memory_id()
|
|
242
|
+
theory_of_mind_items.append(
|
|
243
|
+
{
|
|
244
|
+
"memory_id": memory_id,
|
|
245
|
+
"mentioned_at": session_date,
|
|
246
|
+
"content": line,
|
|
247
|
+
"links": "",
|
|
248
|
+
}
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
except Exception as e:
|
|
252
|
+
logger.error(f"Failed to parse theory of mind from text: {repr(e)}")
|
|
253
|
+
|
|
254
|
+
return reasoning_process, theory_of_mind_items
|