jl-ecms-client 0.2.8__py3-none-any.whl → 0.2.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of jl-ecms-client might be problematic. Click here for more details.
- {jl_ecms_client-0.2.8.dist-info → jl_ecms_client-0.2.22.dist-info}/METADATA +6 -1
- jl_ecms_client-0.2.22.dist-info/RECORD +67 -0
- mirix/__init__.py +41 -0
- mirix/client/client.py +1 -1
- mirix/constants.py +251 -0
- mirix/errors.py +238 -0
- mirix/functions/__init__.py +0 -0
- mirix/functions/ast_parsers.py +113 -0
- mirix/functions/function_sets/__init__.py +1 -0
- mirix/functions/function_sets/base.py +330 -0
- mirix/functions/function_sets/extras.py +271 -0
- mirix/functions/function_sets/memory_tools.py +933 -0
- mirix/functions/functions.py +199 -0
- mirix/functions/helpers.py +311 -0
- mirix/functions/schema_generator.py +511 -0
- mirix/helpers/json_helpers.py +3 -3
- mirix/log.py +163 -0
- mirix/schemas/agent.py +1 -1
- mirix/schemas/block.py +1 -1
- mirix/schemas/embedding_config.py +0 -3
- mirix/schemas/enums.py +12 -0
- mirix/schemas/episodic_memory.py +1 -1
- mirix/schemas/knowledge_vault.py +1 -1
- mirix/schemas/memory.py +1 -1
- mirix/schemas/message.py +1 -1
- mirix/schemas/mirix_request.py +1 -1
- mirix/schemas/procedural_memory.py +1 -1
- mirix/schemas/providers.py +1 -1
- mirix/schemas/resource_memory.py +1 -1
- mirix/schemas/sandbox_config.py +1 -3
- mirix/schemas/semantic_memory.py +1 -1
- mirix/schemas/tool.py +241 -241
- mirix/schemas/user.py +3 -3
- mirix/settings.py +280 -0
- mirix/system.py +261 -0
- jl_ecms_client-0.2.8.dist-info/RECORD +0 -53
- mirix/client/constants.py +0 -60
- {jl_ecms_client-0.2.8.dist-info → jl_ecms_client-0.2.22.dist-info}/WHEEL +0 -0
- {jl_ecms_client-0.2.8.dist-info → jl_ecms_client-0.2.22.dist-info}/licenses/LICENSE +0 -0
- {jl_ecms_client-0.2.8.dist-info → jl_ecms_client-0.2.22.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
import importlib
|
|
2
|
+
import inspect
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
from textwrap import dedent # remove indentation
|
|
6
|
+
from types import ModuleType
|
|
7
|
+
from typing import Dict, List, Optional
|
|
8
|
+
|
|
9
|
+
from mirix.errors import MirixToolCreateError
|
|
10
|
+
from mirix.functions.schema_generator import generate_schema
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def derive_openai_json_schema(source_code: str, name: Optional[str] = None) -> dict:
|
|
14
|
+
"""Derives the OpenAI JSON schema for a given function source code.
|
|
15
|
+
|
|
16
|
+
First, attempts to execute the source code in a custom environment with only the necessary imports.
|
|
17
|
+
Then, it generates the schema from the function's docstring and signature.
|
|
18
|
+
"""
|
|
19
|
+
try:
|
|
20
|
+
# Define a custom environment with necessary imports
|
|
21
|
+
env = {
|
|
22
|
+
"Optional": Optional,
|
|
23
|
+
"List": List,
|
|
24
|
+
"Dict": Dict,
|
|
25
|
+
# To support Pydantic models
|
|
26
|
+
# "BaseModel": BaseModel,
|
|
27
|
+
# "Field": Field,
|
|
28
|
+
}
|
|
29
|
+
env.update(globals())
|
|
30
|
+
|
|
31
|
+
# print("About to execute source code...")
|
|
32
|
+
exec(source_code, env)
|
|
33
|
+
# print("Source code executed successfully")
|
|
34
|
+
|
|
35
|
+
functions = [f for f in env if callable(env[f]) and not f.startswith("__")]
|
|
36
|
+
if not functions:
|
|
37
|
+
raise MirixToolCreateError("No callable functions found in source code")
|
|
38
|
+
|
|
39
|
+
# print(f"Found functions: {functions}")
|
|
40
|
+
func = env[functions[-1]]
|
|
41
|
+
|
|
42
|
+
if not hasattr(func, "__doc__") or not func.__doc__:
|
|
43
|
+
raise MirixToolCreateError(f"Function {func.__name__} missing docstring")
|
|
44
|
+
|
|
45
|
+
# print("About to generate schema...")
|
|
46
|
+
try:
|
|
47
|
+
schema = generate_schema(func, name=name)
|
|
48
|
+
# print("Schema generated successfully")
|
|
49
|
+
return schema
|
|
50
|
+
except TypeError as e:
|
|
51
|
+
raise MirixToolCreateError(f"Type error in schema generation: {str(e)}")
|
|
52
|
+
except ValueError as e:
|
|
53
|
+
raise MirixToolCreateError(f"Value error in schema generation: {str(e)}")
|
|
54
|
+
except Exception as e:
|
|
55
|
+
raise MirixToolCreateError(
|
|
56
|
+
f"Unexpected error in schema generation: {str(e)}"
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
except Exception as e:
|
|
60
|
+
import traceback
|
|
61
|
+
|
|
62
|
+
traceback.print_exc()
|
|
63
|
+
raise MirixToolCreateError(f"Schema generation failed: {str(e)}") from e
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def parse_source_code(func) -> str:
|
|
67
|
+
"""Parse the source code of a function and remove indendation"""
|
|
68
|
+
source_code = dedent(inspect.getsource(func))
|
|
69
|
+
return source_code
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def get_function_from_module(module_name: str, function_name: str):
|
|
73
|
+
"""
|
|
74
|
+
Dynamically imports a function from a specified module.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
module_name (str): The name of the module to import (e.g., 'base').
|
|
78
|
+
function_name (str): The name of the function to retrieve.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
Callable: The imported function.
|
|
82
|
+
|
|
83
|
+
Raises:
|
|
84
|
+
ModuleNotFoundError: If the specified module cannot be found.
|
|
85
|
+
AttributeError: If the function is not found in the module.
|
|
86
|
+
"""
|
|
87
|
+
try:
|
|
88
|
+
# Dynamically import the module
|
|
89
|
+
module = importlib.import_module(module_name)
|
|
90
|
+
# Retrieve the function
|
|
91
|
+
return getattr(module, function_name)
|
|
92
|
+
except ModuleNotFoundError:
|
|
93
|
+
raise ModuleNotFoundError(f"Module '{module_name}' not found.")
|
|
94
|
+
except AttributeError:
|
|
95
|
+
raise AttributeError(
|
|
96
|
+
f"Function '{function_name}' not found in module '{module_name}'."
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def get_json_schema_from_module(module_name: str, function_name: str) -> dict:
|
|
101
|
+
"""
|
|
102
|
+
Dynamically loads a specific function from a module and generates its JSON schema.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
module_name (str): The name of the module to import (e.g., 'base').
|
|
106
|
+
function_name (str): The name of the function to retrieve.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
dict: The JSON schema for the specified function.
|
|
110
|
+
|
|
111
|
+
Raises:
|
|
112
|
+
ModuleNotFoundError: If the specified module cannot be found.
|
|
113
|
+
AttributeError: If the function is not found in the module.
|
|
114
|
+
ValueError: If the attribute is not a user-defined function.
|
|
115
|
+
"""
|
|
116
|
+
try:
|
|
117
|
+
# Dynamically import the module
|
|
118
|
+
module = importlib.import_module(module_name)
|
|
119
|
+
|
|
120
|
+
# Retrieve the function
|
|
121
|
+
attr = getattr(module, function_name, None)
|
|
122
|
+
|
|
123
|
+
# Check if it's a user-defined function
|
|
124
|
+
if not (inspect.isfunction(attr) and attr.__module__ == module.__name__):
|
|
125
|
+
raise ValueError(
|
|
126
|
+
f"'{function_name}' is not a user-defined function in module '{module_name}'"
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# Generate schema (assuming a `generate_schema` function exists)
|
|
130
|
+
generated_schema = generate_schema(attr)
|
|
131
|
+
|
|
132
|
+
return generated_schema
|
|
133
|
+
|
|
134
|
+
except ModuleNotFoundError:
|
|
135
|
+
raise ModuleNotFoundError(f"Module '{module_name}' not found.")
|
|
136
|
+
except AttributeError:
|
|
137
|
+
raise AttributeError(
|
|
138
|
+
f"Function '{function_name}' not found in module '{module_name}'."
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def _get_module_source(module: ModuleType) -> str:
|
|
143
|
+
"""Get the source code of a module, handling PyInstaller bundles"""
|
|
144
|
+
|
|
145
|
+
# Check if we're running in a PyInstaller bundle
|
|
146
|
+
if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"):
|
|
147
|
+
# We're in a PyInstaller bundle
|
|
148
|
+
# Map the module name to a file path
|
|
149
|
+
module_name = module.__name__
|
|
150
|
+
|
|
151
|
+
# For function_sets modules, extract the file name
|
|
152
|
+
if module_name.startswith("mirix.functions.function_sets."):
|
|
153
|
+
file_name = module_name.split(".")[-1] + ".py"
|
|
154
|
+
bundled_path = os.path.join(
|
|
155
|
+
sys._MEIPASS, "mirix", "functions", "function_sets", file_name
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
try:
|
|
159
|
+
with open(bundled_path, "r", encoding="utf-8") as f:
|
|
160
|
+
return f.read()
|
|
161
|
+
except FileNotFoundError:
|
|
162
|
+
return f"# Module source file not found: {bundled_path}"
|
|
163
|
+
else:
|
|
164
|
+
return f"# Module source not available for {module_name}"
|
|
165
|
+
else:
|
|
166
|
+
# Normal execution - use inspect.getsource
|
|
167
|
+
try:
|
|
168
|
+
return inspect.getsource(module)
|
|
169
|
+
except OSError:
|
|
170
|
+
return f"# Module source not available for {module.__name__}"
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def load_function_set(module: ModuleType) -> dict:
|
|
174
|
+
"""Load the functions and generate schema for them, given a module object"""
|
|
175
|
+
function_dict = {}
|
|
176
|
+
|
|
177
|
+
for attr_name in dir(module):
|
|
178
|
+
# Get the attribute
|
|
179
|
+
attr = getattr(module, attr_name)
|
|
180
|
+
|
|
181
|
+
# Check if it's a callable function and not a built-in or special method
|
|
182
|
+
if inspect.isfunction(attr) and attr.__module__ == module.__name__:
|
|
183
|
+
if attr_name in function_dict:
|
|
184
|
+
raise ValueError(f"Found a duplicate of function name '{attr_name}'")
|
|
185
|
+
|
|
186
|
+
try:
|
|
187
|
+
generated_schema = generate_schema(attr)
|
|
188
|
+
except Exception as e:
|
|
189
|
+
raise e
|
|
190
|
+
|
|
191
|
+
function_dict[attr_name] = {
|
|
192
|
+
"module": _get_module_source(module),
|
|
193
|
+
"python_function": attr,
|
|
194
|
+
"json_schema": generated_schema,
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
if len(function_dict) == 0:
|
|
198
|
+
raise ValueError(f"No functions found in module {module}")
|
|
199
|
+
return function_dict
|
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
from random import uniform
|
|
4
|
+
from typing import TYPE_CHECKING, Any, Optional, Union
|
|
5
|
+
|
|
6
|
+
import humps
|
|
7
|
+
|
|
8
|
+
# from composio.constants import DEFAULT_ENTITY_ID
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
|
|
11
|
+
from mirix.constants import (
|
|
12
|
+
DEFAULT_MESSAGE_TOOL,
|
|
13
|
+
DEFAULT_MESSAGE_TOOL_KWARG,
|
|
14
|
+
)
|
|
15
|
+
from mirix.log import get_logger
|
|
16
|
+
from mirix.schemas.enums import MessageRole
|
|
17
|
+
from mirix.schemas.message import MessageCreate
|
|
18
|
+
|
|
19
|
+
logger = get_logger(__name__)
|
|
20
|
+
from mirix.schemas.mirix_message import (
|
|
21
|
+
AssistantMessage,
|
|
22
|
+
ReasoningMessage,
|
|
23
|
+
ToolCallMessage,
|
|
24
|
+
)
|
|
25
|
+
from mirix.schemas.mirix_response import MirixResponse
|
|
26
|
+
|
|
27
|
+
if TYPE_CHECKING:
|
|
28
|
+
try:
|
|
29
|
+
from langchain_core.tools import BaseTool as LangChainBaseTool
|
|
30
|
+
except ImportError:
|
|
31
|
+
LangChainBaseTool = Any # type: ignore
|
|
32
|
+
|
|
33
|
+
from mirix.agent.agent import Agent
|
|
34
|
+
|
|
35
|
+
def generate_langchain_tool_wrapper(
|
|
36
|
+
tool: "LangChainBaseTool", additional_imports_module_attr_map: dict[str, str] = None
|
|
37
|
+
) -> tuple[str, str]:
|
|
38
|
+
tool_name = tool.__class__.__name__
|
|
39
|
+
import_statement = f"from langchain_community.tools import {tool_name}"
|
|
40
|
+
extra_module_imports = generate_import_code(additional_imports_module_attr_map)
|
|
41
|
+
|
|
42
|
+
# Safety check that user has passed in all required imports:
|
|
43
|
+
assert_all_classes_are_imported(tool, additional_imports_module_attr_map)
|
|
44
|
+
|
|
45
|
+
tool_instantiation = f"tool = {generate_imported_tool_instantiation_call_str(tool)}"
|
|
46
|
+
run_call = "return tool._run(**kwargs)"
|
|
47
|
+
func_name = humps.decamelize(tool_name)
|
|
48
|
+
|
|
49
|
+
# Combine all parts into the wrapper function
|
|
50
|
+
wrapper_function_str = f"""
|
|
51
|
+
def {func_name}(**kwargs):
|
|
52
|
+
import importlib
|
|
53
|
+
{import_statement}
|
|
54
|
+
{extra_module_imports}
|
|
55
|
+
{tool_instantiation}
|
|
56
|
+
{run_call}
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
# Compile safety check
|
|
60
|
+
assert_code_gen_compilable(wrapper_function_str)
|
|
61
|
+
|
|
62
|
+
return func_name, wrapper_function_str
|
|
63
|
+
|
|
64
|
+
def assert_code_gen_compilable(code_str):
|
|
65
|
+
try:
|
|
66
|
+
compile(code_str, "<string>", "exec")
|
|
67
|
+
except SyntaxError as e:
|
|
68
|
+
logger.debug("Syntax error in code: %s", e)
|
|
69
|
+
|
|
70
|
+
def assert_all_classes_are_imported(
|
|
71
|
+
tool: Union["LangChainBaseTool"], additional_imports_module_attr_map: dict[str, str]
|
|
72
|
+
) -> None:
|
|
73
|
+
# Safety check that user has passed in all required imports:
|
|
74
|
+
tool_name = tool.__class__.__name__
|
|
75
|
+
current_class_imports = {tool_name}
|
|
76
|
+
if additional_imports_module_attr_map:
|
|
77
|
+
current_class_imports.update(set(additional_imports_module_attr_map.values()))
|
|
78
|
+
required_class_imports = set(find_required_class_names_for_import(tool))
|
|
79
|
+
|
|
80
|
+
if not current_class_imports.issuperset(required_class_imports):
|
|
81
|
+
err_msg = f"[ERROR] You are missing module_attr pairs in `additional_imports_module_attr_map`. Currently, you have imports for {current_class_imports}, but the required classes for import are {required_class_imports}"
|
|
82
|
+
logger.debug(err_msg)
|
|
83
|
+
raise RuntimeError(err_msg)
|
|
84
|
+
|
|
85
|
+
def find_required_class_names_for_import(
|
|
86
|
+
obj: Union["LangChainBaseTool", BaseModel],
|
|
87
|
+
) -> list[str]:
|
|
88
|
+
"""
|
|
89
|
+
Finds all the class names for required imports when instantiating the `obj`.
|
|
90
|
+
NOTE: This does not return the full import path, only the class name.
|
|
91
|
+
|
|
92
|
+
We accomplish this by running BFS and deep searching all the BaseModel objects in the obj parameters.
|
|
93
|
+
"""
|
|
94
|
+
class_names = {obj.__class__.__name__}
|
|
95
|
+
queue = [obj]
|
|
96
|
+
|
|
97
|
+
while queue:
|
|
98
|
+
# Get the current object we are inspecting
|
|
99
|
+
curr_obj = queue.pop()
|
|
100
|
+
|
|
101
|
+
# Collect all possible candidates for BaseModel objects
|
|
102
|
+
candidates = []
|
|
103
|
+
if is_base_model(curr_obj):
|
|
104
|
+
# If it is a base model, we get all the values of the object parameters
|
|
105
|
+
# i.e., if obj('b' = <class A>), we would want to inspect <class A>
|
|
106
|
+
fields = dict(curr_obj)
|
|
107
|
+
# Generate code for each field, skipping empty or None values
|
|
108
|
+
candidates = list(fields.values())
|
|
109
|
+
elif isinstance(curr_obj, dict):
|
|
110
|
+
# If it is a dictionary, we get all the values
|
|
111
|
+
# i.e., if obj = {'a': 3, 'b': <class A>}, we would want to inspect <class A>
|
|
112
|
+
candidates = list(curr_obj.values())
|
|
113
|
+
elif isinstance(curr_obj, list):
|
|
114
|
+
# If it is a list, we inspect all the items in the list
|
|
115
|
+
# i.e., if obj = ['a', 3, None, <class A>], we would want to inspect <class A>
|
|
116
|
+
candidates = curr_obj
|
|
117
|
+
|
|
118
|
+
# Filter out all candidates that are not BaseModels
|
|
119
|
+
# In the list example above, ['a', 3, None, <class A>], we want to filter out 'a', 3, and None
|
|
120
|
+
candidates = filter(lambda x: is_base_model(x), candidates)
|
|
121
|
+
|
|
122
|
+
# Classic BFS here
|
|
123
|
+
for c in candidates:
|
|
124
|
+
c_name = c.__class__.__name__
|
|
125
|
+
if c_name not in class_names:
|
|
126
|
+
class_names.add(c_name)
|
|
127
|
+
queue.append(c)
|
|
128
|
+
|
|
129
|
+
return list(class_names)
|
|
130
|
+
|
|
131
|
+
def generate_imported_tool_instantiation_call_str(obj: Any) -> Optional[str]:
|
|
132
|
+
if isinstance(obj, (int, float, str, bool, type(None))):
|
|
133
|
+
# This is the base case
|
|
134
|
+
# If it is a basic Python type, we trivially return the string version of that value
|
|
135
|
+
# Handle basic types
|
|
136
|
+
return repr(obj)
|
|
137
|
+
elif is_base_model(obj):
|
|
138
|
+
# Otherwise, if it is a BaseModel
|
|
139
|
+
# We want to pull out all the parameters, and reformat them into strings
|
|
140
|
+
# e.g. {arg}={value}
|
|
141
|
+
# The reason why this is recursive, is because the value can be another BaseModel that we need to stringify
|
|
142
|
+
model_name = obj.__class__.__name__
|
|
143
|
+
fields = obj.dict()
|
|
144
|
+
# Generate code for each field, skipping empty or None values
|
|
145
|
+
field_assignments = []
|
|
146
|
+
for arg, value in fields.items():
|
|
147
|
+
python_string = generate_imported_tool_instantiation_call_str(value)
|
|
148
|
+
if python_string:
|
|
149
|
+
field_assignments.append(f"{arg}={python_string}")
|
|
150
|
+
|
|
151
|
+
assignments = ", ".join(field_assignments)
|
|
152
|
+
return f"{model_name}({assignments})"
|
|
153
|
+
elif isinstance(obj, dict):
|
|
154
|
+
# Inspect each of the items in the dict and stringify them
|
|
155
|
+
# This is important because the dictionary may contain other BaseModels
|
|
156
|
+
dict_items = []
|
|
157
|
+
for k, v in obj.items():
|
|
158
|
+
python_string = generate_imported_tool_instantiation_call_str(v)
|
|
159
|
+
if python_string:
|
|
160
|
+
dict_items.append(f"{repr(k)}: {python_string}")
|
|
161
|
+
|
|
162
|
+
joined_items = ", ".join(dict_items)
|
|
163
|
+
return f"{{{joined_items}}}"
|
|
164
|
+
elif isinstance(obj, list):
|
|
165
|
+
# Inspect each of the items in the list and stringify them
|
|
166
|
+
# This is important because the list may contain other BaseModels
|
|
167
|
+
list_items = [generate_imported_tool_instantiation_call_str(v) for v in obj]
|
|
168
|
+
filtered_list_items = list(filter(None, list_items))
|
|
169
|
+
list_items = ", ".join(filtered_list_items)
|
|
170
|
+
return f"[{list_items}]"
|
|
171
|
+
else:
|
|
172
|
+
# Otherwise, if it is none of the above, that usually means it is a custom Python class that is NOT a BaseModel
|
|
173
|
+
# Thus, we cannot get enough information about it to stringify it
|
|
174
|
+
# This may cause issues, but we are making the assumption that any of these custom Python types are handled correctly by the parent library, such as LangChain
|
|
175
|
+
# An example would be that WikipediaAPIWrapper has an argument that is a wikipedia (pip install wikipedia) object
|
|
176
|
+
# We cannot stringify this easily, but WikipediaAPIWrapper handles the setting of this parameter internally
|
|
177
|
+
# This assumption seems fair to me, since usually they are external imports, and LangChain should be bundling those as module-level imports within the tool
|
|
178
|
+
# We throw a warning here anyway and provide the class name
|
|
179
|
+
logger.debug(
|
|
180
|
+
f"[WARNING] Skipping parsing unknown class {obj.__class__.__name__} (does not inherit from the Pydantic BaseModel and is not a basic Python type)"
|
|
181
|
+
)
|
|
182
|
+
if obj.__class__.__name__ == "function":
|
|
183
|
+
import inspect
|
|
184
|
+
|
|
185
|
+
logger.debug(inspect.getsource(obj))
|
|
186
|
+
|
|
187
|
+
return None
|
|
188
|
+
|
|
189
|
+
def is_base_model(obj: Any):
|
|
190
|
+
from langchain_core.pydantic_v1 import BaseModel as LangChainBaseModel
|
|
191
|
+
|
|
192
|
+
return isinstance(obj, BaseModel) or isinstance(obj, LangChainBaseModel)
|
|
193
|
+
|
|
194
|
+
def generate_import_code(module_attr_map: Optional[dict]):
|
|
195
|
+
if not module_attr_map:
|
|
196
|
+
return ""
|
|
197
|
+
|
|
198
|
+
code_lines = []
|
|
199
|
+
for module, attr in module_attr_map.items():
|
|
200
|
+
module_name = module.split(".")[-1]
|
|
201
|
+
code_lines.append(
|
|
202
|
+
f"# Load the module\n {module_name} = importlib.import_module('{module}')"
|
|
203
|
+
)
|
|
204
|
+
code_lines.append(f" # Access the {attr} from the module")
|
|
205
|
+
code_lines.append(f" {attr} = getattr({module_name}, '{attr}')")
|
|
206
|
+
return "\n".join(code_lines)
|
|
207
|
+
|
|
208
|
+
def parse_mirix_response_for_assistant_message(
|
|
209
|
+
mirix_response: MirixResponse,
|
|
210
|
+
assistant_message_tool_name: str = DEFAULT_MESSAGE_TOOL,
|
|
211
|
+
assistant_message_tool_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG,
|
|
212
|
+
) -> Optional[str]:
|
|
213
|
+
reasoning_message = ""
|
|
214
|
+
for m in mirix_response.messages:
|
|
215
|
+
if isinstance(m, AssistantMessage):
|
|
216
|
+
return m.assistant_message
|
|
217
|
+
elif (
|
|
218
|
+
isinstance(m, ToolCallMessage)
|
|
219
|
+
and m.tool_call.name == assistant_message_tool_name
|
|
220
|
+
):
|
|
221
|
+
try:
|
|
222
|
+
return json.loads(m.tool_call.arguments)[assistant_message_tool_kwarg]
|
|
223
|
+
except Exception: # TODO: Make this more specific
|
|
224
|
+
continue
|
|
225
|
+
elif isinstance(m, ReasoningMessage):
|
|
226
|
+
# This is not ideal, but we would like to return something rather than nothing
|
|
227
|
+
reasoning_message += f"{m.reasoning}\n"
|
|
228
|
+
|
|
229
|
+
return None
|
|
230
|
+
|
|
231
|
+
async def async_send_message_with_retries(
|
|
232
|
+
server,
|
|
233
|
+
sender_agent: "Agent",
|
|
234
|
+
target_agent_id: str,
|
|
235
|
+
message_text: str,
|
|
236
|
+
max_retries: int,
|
|
237
|
+
timeout: int,
|
|
238
|
+
logging_prefix: Optional[str] = None,
|
|
239
|
+
) -> str:
|
|
240
|
+
"""
|
|
241
|
+
Shared helper coroutine to send a message to an agent with retries and a timeout.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
server: The Mirix server instance (from get_mirix_server()).
|
|
245
|
+
sender_agent (Agent): The agent initiating the send action.
|
|
246
|
+
target_agent_id (str): The ID of the agent to send the message to.
|
|
247
|
+
message_text (str): The text to send as the user message.
|
|
248
|
+
max_retries (int): Maximum number of retries for the request.
|
|
249
|
+
timeout (int): Maximum time to wait for a response (in seconds).
|
|
250
|
+
logging_prefix (str): A prefix to append to logging
|
|
251
|
+
Returns:
|
|
252
|
+
str: The response or an error message.
|
|
253
|
+
"""
|
|
254
|
+
logging_prefix = logging_prefix or "[async_send_message_with_retries]"
|
|
255
|
+
for attempt in range(1, max_retries + 1):
|
|
256
|
+
try:
|
|
257
|
+
messages = [
|
|
258
|
+
MessageCreate(
|
|
259
|
+
role=MessageRole.user,
|
|
260
|
+
text=message_text,
|
|
261
|
+
name=sender_agent.agent_state.name,
|
|
262
|
+
)
|
|
263
|
+
]
|
|
264
|
+
# Wrap in a timeout
|
|
265
|
+
response = await asyncio.wait_for(
|
|
266
|
+
server.send_message_to_agent(
|
|
267
|
+
agent_id=target_agent_id,
|
|
268
|
+
actor=sender_agent.user,
|
|
269
|
+
messages=messages,
|
|
270
|
+
stream_steps=False,
|
|
271
|
+
stream_tokens=False,
|
|
272
|
+
use_assistant_message=True,
|
|
273
|
+
assistant_message_tool_name=DEFAULT_MESSAGE_TOOL,
|
|
274
|
+
assistant_message_tool_kwarg=DEFAULT_MESSAGE_TOOL_KWARG,
|
|
275
|
+
),
|
|
276
|
+
timeout=timeout,
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
# Extract assistant message
|
|
280
|
+
assistant_message = parse_mirix_response_for_assistant_message(
|
|
281
|
+
response,
|
|
282
|
+
assistant_message_tool_name=DEFAULT_MESSAGE_TOOL,
|
|
283
|
+
assistant_message_tool_kwarg=DEFAULT_MESSAGE_TOOL_KWARG,
|
|
284
|
+
)
|
|
285
|
+
if assistant_message:
|
|
286
|
+
msg = f"Agent {target_agent_id} said '{assistant_message}'"
|
|
287
|
+
logger.info("%s - %s", logging_prefix, msg)
|
|
288
|
+
return msg
|
|
289
|
+
else:
|
|
290
|
+
msg = f"(No response from agent {target_agent_id})"
|
|
291
|
+
logger.info("%s - %s", logging_prefix, msg)
|
|
292
|
+
return msg
|
|
293
|
+
except asyncio.TimeoutError:
|
|
294
|
+
error_msg = f"(Timeout on attempt {attempt}/{max_retries} for agent {target_agent_id})"
|
|
295
|
+
logger.warning("%s - %s", logging_prefix, error_msg)
|
|
296
|
+
except Exception as e:
|
|
297
|
+
error_msg = f"(Error on attempt {attempt}/{max_retries} for agent {target_agent_id}: {e})"
|
|
298
|
+
logger.warning("%s - %s", logging_prefix, error_msg)
|
|
299
|
+
|
|
300
|
+
# Exponential backoff before retrying
|
|
301
|
+
if attempt < max_retries:
|
|
302
|
+
backoff = uniform(0.5, 2) * (2**attempt)
|
|
303
|
+
sender_agent.logger.warning(
|
|
304
|
+
f"{logging_prefix} - Retrying the agent to agent send_message...sleeping for {backoff}"
|
|
305
|
+
)
|
|
306
|
+
await asyncio.sleep(backoff)
|
|
307
|
+
else:
|
|
308
|
+
sender_agent.logger.error(
|
|
309
|
+
f"{logging_prefix} - Fatal error during agent to agent send_message: {error_msg}"
|
|
310
|
+
)
|
|
311
|
+
return error_msg
|