jl-ecms-client 0.2.8__py3-none-any.whl → 0.2.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of jl-ecms-client might be problematic. Click here for more details.
- {jl_ecms_client-0.2.8.dist-info → jl_ecms_client-0.2.22.dist-info}/METADATA +6 -1
- jl_ecms_client-0.2.22.dist-info/RECORD +67 -0
- mirix/__init__.py +41 -0
- mirix/client/client.py +1 -1
- mirix/constants.py +251 -0
- mirix/errors.py +238 -0
- mirix/functions/__init__.py +0 -0
- mirix/functions/ast_parsers.py +113 -0
- mirix/functions/function_sets/__init__.py +1 -0
- mirix/functions/function_sets/base.py +330 -0
- mirix/functions/function_sets/extras.py +271 -0
- mirix/functions/function_sets/memory_tools.py +933 -0
- mirix/functions/functions.py +199 -0
- mirix/functions/helpers.py +311 -0
- mirix/functions/schema_generator.py +511 -0
- mirix/helpers/json_helpers.py +3 -3
- mirix/log.py +163 -0
- mirix/schemas/agent.py +1 -1
- mirix/schemas/block.py +1 -1
- mirix/schemas/embedding_config.py +0 -3
- mirix/schemas/enums.py +12 -0
- mirix/schemas/episodic_memory.py +1 -1
- mirix/schemas/knowledge_vault.py +1 -1
- mirix/schemas/memory.py +1 -1
- mirix/schemas/message.py +1 -1
- mirix/schemas/mirix_request.py +1 -1
- mirix/schemas/procedural_memory.py +1 -1
- mirix/schemas/providers.py +1 -1
- mirix/schemas/resource_memory.py +1 -1
- mirix/schemas/sandbox_config.py +1 -3
- mirix/schemas/semantic_memory.py +1 -1
- mirix/schemas/tool.py +241 -241
- mirix/schemas/user.py +3 -3
- mirix/settings.py +280 -0
- mirix/system.py +261 -0
- jl_ecms_client-0.2.8.dist-info/RECORD +0 -53
- mirix/client/constants.py +0 -60
- {jl_ecms_client-0.2.8.dist-info → jl_ecms_client-0.2.22.dist-info}/WHEEL +0 -0
- {jl_ecms_client-0.2.8.dist-info → jl_ecms_client-0.2.22.dist-info}/licenses/LICENSE +0 -0
- {jl_ecms_client-0.2.8.dist-info → jl_ecms_client-0.2.22.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import json
|
|
3
|
+
from typing import Dict
|
|
4
|
+
|
|
5
|
+
# Registry of known types for annotation resolution
|
|
6
|
+
BUILTIN_TYPES = {
|
|
7
|
+
"int": int,
|
|
8
|
+
"float": float,
|
|
9
|
+
"str": str,
|
|
10
|
+
"dict": dict,
|
|
11
|
+
"list": list,
|
|
12
|
+
"set": set,
|
|
13
|
+
"tuple": tuple,
|
|
14
|
+
"bool": bool,
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def resolve_type(annotation: str):
|
|
19
|
+
"""
|
|
20
|
+
Resolve a type annotation string into a Python type.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
annotation (str): The annotation string (e.g., 'int', 'list', etc.).
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
type: The corresponding Python type.
|
|
27
|
+
|
|
28
|
+
Raises:
|
|
29
|
+
ValueError: If the annotation is unsupported or invalid.
|
|
30
|
+
"""
|
|
31
|
+
if annotation in BUILTIN_TYPES:
|
|
32
|
+
return BUILTIN_TYPES[annotation]
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
parsed = ast.literal_eval(annotation)
|
|
36
|
+
if isinstance(parsed, type):
|
|
37
|
+
return parsed
|
|
38
|
+
raise ValueError(f"Annotation '{annotation}' is not a recognized type.")
|
|
39
|
+
except (ValueError, SyntaxError):
|
|
40
|
+
raise ValueError(f"Unsupported annotation: {annotation}")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def get_function_annotations_from_source(
|
|
44
|
+
source_code: str, function_name: str
|
|
45
|
+
) -> Dict[str, str]:
|
|
46
|
+
"""
|
|
47
|
+
Parse the source code to extract annotations for a given function name.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
source_code (str): The Python source code containing the function.
|
|
51
|
+
function_name (str): The name of the function to extract annotations for.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Dict[str, str]: A dictionary of argument names to their annotation strings.
|
|
55
|
+
|
|
56
|
+
Raises:
|
|
57
|
+
ValueError: If the function is not found in the source code.
|
|
58
|
+
"""
|
|
59
|
+
tree = ast.parse(source_code)
|
|
60
|
+
for node in ast.iter_child_nodes(tree):
|
|
61
|
+
if isinstance(node, ast.FunctionDef) and node.name == function_name:
|
|
62
|
+
annotations = {}
|
|
63
|
+
for arg in node.args.args:
|
|
64
|
+
if arg.annotation is not None:
|
|
65
|
+
annotation_str = ast.unparse(arg.annotation)
|
|
66
|
+
annotations[arg.arg] = annotation_str
|
|
67
|
+
return annotations
|
|
68
|
+
raise ValueError(
|
|
69
|
+
f"Function '{function_name}' not found in the provided source code."
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def coerce_dict_args_by_annotations(
|
|
74
|
+
function_args: dict, annotations: Dict[str, str]
|
|
75
|
+
) -> dict:
|
|
76
|
+
"""
|
|
77
|
+
Coerce arguments in a dictionary to their annotated types.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
function_args (dict): The original function arguments.
|
|
81
|
+
annotations (Dict[str, str]): Argument annotations as strings.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
dict: The updated dictionary with coerced argument types.
|
|
85
|
+
|
|
86
|
+
Raises:
|
|
87
|
+
ValueError: If type coercion fails for an argument.
|
|
88
|
+
"""
|
|
89
|
+
coerced_args = dict(function_args) # Shallow copy for mutation safety
|
|
90
|
+
|
|
91
|
+
for arg_name, value in coerced_args.items():
|
|
92
|
+
if arg_name in annotations:
|
|
93
|
+
annotation_str = annotations[arg_name]
|
|
94
|
+
try:
|
|
95
|
+
# Resolve the type from the annotation
|
|
96
|
+
arg_type = resolve_type(annotation_str)
|
|
97
|
+
|
|
98
|
+
# Handle JSON-like inputs for dict and list types
|
|
99
|
+
if arg_type in {dict, list} and isinstance(value, str):
|
|
100
|
+
try:
|
|
101
|
+
# First, try JSON parsing
|
|
102
|
+
value = json.loads(value)
|
|
103
|
+
except json.JSONDecodeError:
|
|
104
|
+
# Fall back to literal_eval for Python-specific literals
|
|
105
|
+
value = ast.literal_eval(value)
|
|
106
|
+
|
|
107
|
+
# Coerce the value to the resolved type
|
|
108
|
+
coerced_args[arg_name] = arg_type(value)
|
|
109
|
+
except (TypeError, ValueError, json.JSONDecodeError, SyntaxError) as e:
|
|
110
|
+
raise ValueError(
|
|
111
|
+
f"Failed to coerce argument '{arg_name}' to {annotation_str}: {e}"
|
|
112
|
+
)
|
|
113
|
+
return coerced_args
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Function sets package
|
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from mirix.agent import Agent, AgentState
|
|
4
|
+
from mirix.utils import convert_timezone_to_utc
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def send_message(
|
|
8
|
+
self: "Agent", agent_state: "AgentState", message: str
|
|
9
|
+
) -> Optional[str]:
|
|
10
|
+
"""
|
|
11
|
+
Sends a message to the human user. Meanwhile, whenever this function is called, the agent needs to include the `topic` of the current focus. It can be the same as before, it can also be updated when the agent is focusing on something different.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
message (str): Message contents. All unicode (including emojis) are supported.
|
|
15
|
+
topic (str): The focus of the agent right now. It is used to track the most recent topic in the conversation and will be used to retrieve the relevant memories from each memory component.
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
Optional[str]: None is always returned as this function does not produce a response.
|
|
19
|
+
"""
|
|
20
|
+
# FIXME passing of msg_obj here is a hack, unclear if guaranteed to be the correct reference
|
|
21
|
+
self.interface.assistant_message(message) # , msg_obj=self._messages[-1])
|
|
22
|
+
return None
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def send_intermediate_message(
|
|
26
|
+
self: "Agent", agent_state: "AgentState", message: str,
|
|
27
|
+
# topic: str = None
|
|
28
|
+
) -> Optional[str]:
|
|
29
|
+
"""
|
|
30
|
+
Sends an intermediate message to the human user. Meanwhile, whenever this function is called, the agent needs to include the `topic` of the current focus. It should NEVER be any questions or requests for the user but only the agent's current progress on the task.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
message (str): Message contents. All unicode (including emojis) are supported.
|
|
34
|
+
topic (str): The focus of the agent right now. It is used to track the most recent topic in the conversation and will be used to retrieve the relevant memories from each memory component.
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
Optional[str]: None is always returned as this function does not produce a response.
|
|
38
|
+
"""
|
|
39
|
+
# FIXME passing of msg_obj here is a hack, unclear if guaranteed to be the correct reference
|
|
40
|
+
self.interface.assistant_message(message) # , msg_obj=self._messages[-1])
|
|
41
|
+
# agent_state.topic = topic
|
|
42
|
+
return None
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def conversation_search(
|
|
46
|
+
self: "Agent", query: str, page: Optional[int] = 0
|
|
47
|
+
) -> Optional[str]:
|
|
48
|
+
"""
|
|
49
|
+
Search prior conversation history using case-insensitive string matching.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
query (str): String to search for.
|
|
53
|
+
page (int): Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page).
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
str: Query result string
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
import math
|
|
60
|
+
|
|
61
|
+
from mirix.constants import RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
|
|
62
|
+
from mirix.utils import json_dumps
|
|
63
|
+
|
|
64
|
+
if page is None or (isinstance(page, str) and page.lower().strip() == "none"):
|
|
65
|
+
page = 0
|
|
66
|
+
try:
|
|
67
|
+
page = int(page)
|
|
68
|
+
except (ValueError, TypeError):
|
|
69
|
+
raise ValueError("'page' argument must be an integer")
|
|
70
|
+
count = RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
|
|
71
|
+
# TODO: add paging by page number. currently cursor only works with strings.
|
|
72
|
+
# original: start=page * count
|
|
73
|
+
messages = self.message_manager.list_user_messages_for_agent(
|
|
74
|
+
agent_id=self.agent_state.id,
|
|
75
|
+
actor=self.user,
|
|
76
|
+
query_text=query,
|
|
77
|
+
limit=count,
|
|
78
|
+
)
|
|
79
|
+
total = len(messages)
|
|
80
|
+
num_pages = math.ceil(total / count) - 1 # 0 index
|
|
81
|
+
if len(messages) == 0:
|
|
82
|
+
results_str = "No results found."
|
|
83
|
+
else:
|
|
84
|
+
results_pref = (
|
|
85
|
+
f"Showing {len(messages)} of {total} results (page {page}/{num_pages}):"
|
|
86
|
+
)
|
|
87
|
+
results_formatted = [message.text for message in messages]
|
|
88
|
+
results_str = f"{results_pref} {json_dumps(results_formatted)}"
|
|
89
|
+
return results_str
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def search_in_memory(
|
|
93
|
+
self: "Agent",
|
|
94
|
+
memory_type: str,
|
|
95
|
+
query: str,
|
|
96
|
+
search_field: str,
|
|
97
|
+
search_method: str,
|
|
98
|
+
timezone_str: str,
|
|
99
|
+
) -> Optional[str]:
|
|
100
|
+
"""
|
|
101
|
+
Choose which memory to search. All memory types support multiple search methods with different performance characteristics. Most of the time, you should use search over 'details' for episodic memory and semantic memory, 'content' for resource memory (but for resource memory, `embedding` is not supported for content field so you have to use other search methods), 'description' for procedural memory. This is because these fields have the richest information and is more likely to contain the keywords/query. You can always start from a thorough search over the whole memory by setting memory_type as 'all' and search_field as 'null', and then narrow down to specific fields and specific memories.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
memory_type: The type of memory to search in. It should be chosen from the following: "episodic", "resource", "procedural", "knowledge_vault", "semantic", "all". Here "all" means searching in all the memories.
|
|
105
|
+
query: The keywords/query used to search in the memory.
|
|
106
|
+
search_field: The field to search in the memory. It should be chosen from the attributes of the corresponding memory. For "episodic" memory, it can be 'summary', 'details'; for "resource" memory, it can be 'summary', 'content'; for "procedural" memory, it can be 'summary', 'steps'; for "knowledge_vault", it can be 'secret_value', 'caption'; for semantic memory, it can be 'name', 'summary', 'details'. For "all", it should also be "null" as the system will search all memories with default fields.
|
|
107
|
+
search_method: The method to search in the memory. Choose from:
|
|
108
|
+
- 'bm25': BM25 ranking-based full-text search (fast and effective for keyword-based searches)
|
|
109
|
+
- 'embedding': Vector similarity search using embeddings (most powerful, good for conceptual matches)
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
str: Query result string
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
if (
|
|
116
|
+
memory_type == "resource"
|
|
117
|
+
and search_field == "content"
|
|
118
|
+
and search_method == "embedding"
|
|
119
|
+
):
|
|
120
|
+
raise ValueError(
|
|
121
|
+
"embedding is not supported for resource memory's 'content' field."
|
|
122
|
+
)
|
|
123
|
+
if (
|
|
124
|
+
memory_type == "knowledge_vault"
|
|
125
|
+
and search_field == "secret_value"
|
|
126
|
+
and search_method == "embedding"
|
|
127
|
+
):
|
|
128
|
+
raise ValueError(
|
|
129
|
+
"embedding is not supported for knowledge_vault memory's 'secret_value' field."
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
if memory_type == "all":
|
|
133
|
+
search_field = "null"
|
|
134
|
+
|
|
135
|
+
if memory_type == "core":
|
|
136
|
+
# It means the model is an idiot, but we still return the results:
|
|
137
|
+
return self.agent_state.memory.compile(), len(
|
|
138
|
+
self.agent_state.memory.list_block_labels()
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
if memory_type == "episodic" or memory_type == "all":
|
|
142
|
+
episodic_memory = self.episodic_memory_manager.list_episodic_memory(
|
|
143
|
+
actor=self.user,
|
|
144
|
+
agent_state=self.agent_state,
|
|
145
|
+
query=query,
|
|
146
|
+
search_field=search_field if search_field != "null" else "summary",
|
|
147
|
+
search_method=search_method,
|
|
148
|
+
limit=10,
|
|
149
|
+
timezone_str=timezone_str,
|
|
150
|
+
)
|
|
151
|
+
formatted_results_from_episodic = [
|
|
152
|
+
{
|
|
153
|
+
"memory_type": "episodic",
|
|
154
|
+
"id": x.id,
|
|
155
|
+
"timestamp": x.occurred_at,
|
|
156
|
+
"event_type": x.event_type,
|
|
157
|
+
"actor": x.actor,
|
|
158
|
+
"summary": x.summary,
|
|
159
|
+
"details": x.details,
|
|
160
|
+
}
|
|
161
|
+
for x in episodic_memory
|
|
162
|
+
]
|
|
163
|
+
if memory_type == "episodic":
|
|
164
|
+
return formatted_results_from_episodic, len(formatted_results_from_episodic)
|
|
165
|
+
|
|
166
|
+
if memory_type == "resource" or memory_type == "all":
|
|
167
|
+
resource_memories = self.resource_memory_manager.list_resources(
|
|
168
|
+
actor=self.user,
|
|
169
|
+
agent_state=self.agent_state,
|
|
170
|
+
query=query,
|
|
171
|
+
search_field=search_field
|
|
172
|
+
if search_field != "null"
|
|
173
|
+
else ("summary" if search_method == "embedding" else "content"),
|
|
174
|
+
search_method=search_method,
|
|
175
|
+
limit=10,
|
|
176
|
+
timezone_str=timezone_str,
|
|
177
|
+
)
|
|
178
|
+
formatted_results_resource = [
|
|
179
|
+
{
|
|
180
|
+
"memory_type": "resource",
|
|
181
|
+
"id": x.id,
|
|
182
|
+
"resource_type": x.resource_type,
|
|
183
|
+
"summary": x.summary,
|
|
184
|
+
"content": x.content,
|
|
185
|
+
}
|
|
186
|
+
for x in resource_memories
|
|
187
|
+
]
|
|
188
|
+
if memory_type == "resource":
|
|
189
|
+
return formatted_results_resource, len(formatted_results_resource)
|
|
190
|
+
|
|
191
|
+
if memory_type == "procedural" or memory_type == "all":
|
|
192
|
+
procedural_memories = self.procedural_memory_manager.list_procedures(
|
|
193
|
+
actor=self.user,
|
|
194
|
+
agent_state=self.agent_state,
|
|
195
|
+
query=query,
|
|
196
|
+
search_field=search_field if search_field != "null" else "summary",
|
|
197
|
+
search_method=search_method,
|
|
198
|
+
limit=10,
|
|
199
|
+
timezone_str=timezone_str,
|
|
200
|
+
)
|
|
201
|
+
formatted_results_procedural = [
|
|
202
|
+
{
|
|
203
|
+
"memory_type": "procedural",
|
|
204
|
+
"id": x.id,
|
|
205
|
+
"entry_type": x.entry_type,
|
|
206
|
+
"summary": x.summary,
|
|
207
|
+
"steps": x.steps,
|
|
208
|
+
}
|
|
209
|
+
for x in procedural_memories
|
|
210
|
+
]
|
|
211
|
+
if memory_type == "procedural":
|
|
212
|
+
return formatted_results_procedural, len(formatted_results_procedural)
|
|
213
|
+
|
|
214
|
+
if memory_type == "knowledge_vault" or memory_type == "all":
|
|
215
|
+
knowledge_vault_memories = self.knowledge_vault_manager.list_knowledge(
|
|
216
|
+
actor=self.user,
|
|
217
|
+
agent_state=self.agent_state,
|
|
218
|
+
query=query,
|
|
219
|
+
search_field=search_field if search_field != "null" else "caption",
|
|
220
|
+
search_method=search_method,
|
|
221
|
+
limit=10,
|
|
222
|
+
timezone_str=timezone_str,
|
|
223
|
+
)
|
|
224
|
+
formatted_results_knowledge_vault = [
|
|
225
|
+
{
|
|
226
|
+
"memory_type": "knowledge_vault",
|
|
227
|
+
"id": x.id,
|
|
228
|
+
"entry_type": x.entry_type,
|
|
229
|
+
"source": x.source,
|
|
230
|
+
"sensitivity": x.sensitivity,
|
|
231
|
+
"secret_value": x.secret_value,
|
|
232
|
+
"caption": x.caption,
|
|
233
|
+
}
|
|
234
|
+
for x in knowledge_vault_memories
|
|
235
|
+
]
|
|
236
|
+
if memory_type == "knowledge_vault":
|
|
237
|
+
return formatted_results_knowledge_vault, len(
|
|
238
|
+
formatted_results_knowledge_vault
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
if memory_type == "semantic" or memory_type == "all":
|
|
242
|
+
semantic_memories = self.semantic_memory_manager.list_semantic_items(
|
|
243
|
+
actor=self.user,
|
|
244
|
+
agent_state=self.agent_state,
|
|
245
|
+
query=query,
|
|
246
|
+
search_field=search_field if search_field != "null" else "summary",
|
|
247
|
+
search_method=search_method,
|
|
248
|
+
limit=10,
|
|
249
|
+
timezone_str=timezone_str,
|
|
250
|
+
)
|
|
251
|
+
# title, summary, details, source
|
|
252
|
+
formatted_results_semantic = [
|
|
253
|
+
{
|
|
254
|
+
"memory_type": "semantic",
|
|
255
|
+
"id": x.id,
|
|
256
|
+
"name": x.name,
|
|
257
|
+
"summary": x.summary,
|
|
258
|
+
"details": x.details,
|
|
259
|
+
"source": x.source,
|
|
260
|
+
}
|
|
261
|
+
for x in semantic_memories
|
|
262
|
+
]
|
|
263
|
+
if memory_type == "semantic":
|
|
264
|
+
return formatted_results_semantic, len(formatted_results_semantic)
|
|
265
|
+
|
|
266
|
+
else:
|
|
267
|
+
raise ValueError(
|
|
268
|
+
f"Memory type '{memory_type}' is not supported. Please choose from 'episodic', 'resource', 'procedural', 'knowledge_vault', 'semantic'."
|
|
269
|
+
)
|
|
270
|
+
return (
|
|
271
|
+
formatted_results_from_episodic
|
|
272
|
+
+ formatted_results_resource
|
|
273
|
+
+ formatted_results_procedural
|
|
274
|
+
+ formatted_results_knowledge_vault
|
|
275
|
+
+ formatted_results_semantic,
|
|
276
|
+
len(formatted_results_from_episodic)
|
|
277
|
+
+ len(formatted_results_resource)
|
|
278
|
+
+ len(formatted_results_procedural)
|
|
279
|
+
+ len(formatted_results_knowledge_vault)
|
|
280
|
+
+ len(formatted_results_semantic),
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def list_memory_within_timerange(
|
|
285
|
+
self: "Agent", memory_type: str, start_time: str, end_time: str, timezone_str: str
|
|
286
|
+
) -> Optional[str]:
|
|
287
|
+
"""
|
|
288
|
+
List memories around a specific timestamp
|
|
289
|
+
Args:
|
|
290
|
+
memory_type (str): The type of memory to search in. It should be chosen from the following: "episodic", "resource", "procedural", "knowledge_vault", "semantic", "all". Here "all" means searching in all the memories.
|
|
291
|
+
start_time (str): The start time of the time range. It has to be in the form of "%Y-%m-%d %H:%M:%S"
|
|
292
|
+
end_time (str): The end time of the time range. It has to be in the form of "%Y-%m-%d %H:%M:%S"
|
|
293
|
+
"""
|
|
294
|
+
|
|
295
|
+
start_time = convert_timezone_to_utc(start_time, timezone_str)
|
|
296
|
+
end_time = convert_timezone_to_utc(end_time, timezone_str)
|
|
297
|
+
|
|
298
|
+
if memory_type == "episodic" or memory_type == "all":
|
|
299
|
+
episodic_memory = (
|
|
300
|
+
self.episodic_memory_manager.list_episodic_memory_around_timestamp(
|
|
301
|
+
actor=self.user,
|
|
302
|
+
agent_state=self.agent_state,
|
|
303
|
+
start_time=start_time,
|
|
304
|
+
end_time=end_time,
|
|
305
|
+
timezone_str=timezone_str,
|
|
306
|
+
)
|
|
307
|
+
)
|
|
308
|
+
formatted_results_from_episodic = [
|
|
309
|
+
{
|
|
310
|
+
"memory_type": "episodic",
|
|
311
|
+
"id": x.id,
|
|
312
|
+
"timestamp": x.occurred_at,
|
|
313
|
+
"event_type": x.event_type,
|
|
314
|
+
"actor": x.actor,
|
|
315
|
+
"summary": x.summary,
|
|
316
|
+
}
|
|
317
|
+
for x in episodic_memory
|
|
318
|
+
]
|
|
319
|
+
if memory_type == "episodic":
|
|
320
|
+
if len(formatted_results_from_episodic) == 0:
|
|
321
|
+
return "No results found."
|
|
322
|
+
elif len(formatted_results_from_episodic) > 50:
|
|
323
|
+
return "Too many results found. Please narrow down your search."
|
|
324
|
+
else:
|
|
325
|
+
return formatted_results_from_episodic, len(
|
|
326
|
+
formatted_results_from_episodic
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
# currently only episodic memory is supported
|
|
330
|
+
return None
|