aixtools 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aixtools might be problematic. Click here for more details.
- aixtools/__init__.py +5 -0
- aixtools/a2a/__init__.py +5 -0
- aixtools/a2a/app.py +126 -0
- aixtools/a2a/utils.py +115 -0
- aixtools/agents/__init__.py +12 -0
- aixtools/agents/agent.py +164 -0
- aixtools/agents/agent_batch.py +74 -0
- aixtools/app.py +143 -0
- aixtools/context.py +12 -0
- aixtools/db/__init__.py +17 -0
- aixtools/db/database.py +110 -0
- aixtools/db/vector_db.py +115 -0
- aixtools/log_view/__init__.py +17 -0
- aixtools/log_view/app.py +195 -0
- aixtools/log_view/display.py +285 -0
- aixtools/log_view/export.py +51 -0
- aixtools/log_view/filters.py +41 -0
- aixtools/log_view/log_utils.py +26 -0
- aixtools/log_view/node_summary.py +229 -0
- aixtools/logfilters/__init__.py +7 -0
- aixtools/logfilters/context_filter.py +67 -0
- aixtools/logging/__init__.py +30 -0
- aixtools/logging/log_objects.py +227 -0
- aixtools/logging/logging_config.py +116 -0
- aixtools/logging/mcp_log_models.py +102 -0
- aixtools/logging/mcp_logger.py +172 -0
- aixtools/logging/model_patch_logging.py +87 -0
- aixtools/logging/open_telemetry.py +36 -0
- aixtools/mcp/__init__.py +9 -0
- aixtools/mcp/example_client.py +30 -0
- aixtools/mcp/example_server.py +22 -0
- aixtools/mcp/fast_mcp_log.py +31 -0
- aixtools/mcp/faulty_mcp.py +320 -0
- aixtools/model_patch/model_patch.py +65 -0
- aixtools/server/__init__.py +23 -0
- aixtools/server/app_mounter.py +90 -0
- aixtools/server/path.py +72 -0
- aixtools/server/utils.py +70 -0
- aixtools/testing/__init__.py +9 -0
- aixtools/testing/aix_test_model.py +147 -0
- aixtools/testing/mock_tool.py +66 -0
- aixtools/testing/model_patch_cache.py +279 -0
- aixtools/tools/doctor/__init__.py +3 -0
- aixtools/tools/doctor/tool_doctor.py +61 -0
- aixtools/tools/doctor/tool_recommendation.py +44 -0
- aixtools/utils/__init__.py +35 -0
- aixtools/utils/chainlit/cl_agent_show.py +82 -0
- aixtools/utils/chainlit/cl_utils.py +168 -0
- aixtools/utils/config.py +118 -0
- aixtools/utils/config_util.py +69 -0
- aixtools/utils/enum_with_description.py +37 -0
- aixtools/utils/persisted_dict.py +99 -0
- aixtools/utils/utils.py +160 -0
- aixtools-0.1.0.dist-info/METADATA +355 -0
- aixtools-0.1.0.dist-info/RECORD +58 -0
- aixtools-0.1.0.dist-info/WHEEL +5 -0
- aixtools-0.1.0.dist-info/entry_points.txt +2 -0
- aixtools-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Functions for displaying nodes in the Streamlit interface.
|
|
3
|
+
Provides enhanced display capabilities for various object types,
|
|
4
|
+
including dataclasses, with proper handling of nested structures.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import inspect
|
|
8
|
+
import json
|
|
9
|
+
from dataclasses import fields as dataclass_fields
|
|
10
|
+
from dataclasses import is_dataclass
|
|
11
|
+
|
|
12
|
+
import pandas as pd
|
|
13
|
+
import streamlit as st
|
|
14
|
+
from rich.console import Console
|
|
15
|
+
|
|
16
|
+
from aixtools.utils.utils import prepend_all_lines
|
|
17
|
+
|
|
18
|
+
# Toggle for using markdown display instead of JSON
|
|
19
|
+
USE_MARKDOWN = True
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def filter_private_fields(data_dict: dict) -> dict:
|
|
23
|
+
"""Filter out private fields from the data dictionary."""
|
|
24
|
+
return {k: v for k, v in data_dict.items() if not k.startswith("_")}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def filter_private_attributes(obj) -> dict:
|
|
28
|
+
"""
|
|
29
|
+
Filter out private attributes and methods from an object.
|
|
30
|
+
Returns a dictionary of public attributes and their values.
|
|
31
|
+
"""
|
|
32
|
+
if not hasattr(obj, "__dict__"):
|
|
33
|
+
return {}
|
|
34
|
+
|
|
35
|
+
result = {}
|
|
36
|
+
for attr, value in vars(obj).items():
|
|
37
|
+
if not attr.startswith("_"):
|
|
38
|
+
result[attr] = value
|
|
39
|
+
|
|
40
|
+
return result
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def is_method(obj, attr_name: str) -> bool:
|
|
44
|
+
"""Check if an attribute is a method."""
|
|
45
|
+
try:
|
|
46
|
+
attr = getattr(obj, attr_name)
|
|
47
|
+
return inspect.ismethod(attr) or inspect.isfunction(attr)
|
|
48
|
+
except (AttributeError, TypeError):
|
|
49
|
+
return False
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def get_object_type_str(obj) -> str: # noqa: PLR0911, pylint: disable=too-many-return-statements
|
|
53
|
+
"""Get a string representation of the object's type."""
|
|
54
|
+
if obj is None:
|
|
55
|
+
return "null"
|
|
56
|
+
if isinstance(obj, bool):
|
|
57
|
+
return "bool"
|
|
58
|
+
if isinstance(obj, int):
|
|
59
|
+
return "int"
|
|
60
|
+
if isinstance(obj, float):
|
|
61
|
+
return "float"
|
|
62
|
+
if isinstance(obj, str):
|
|
63
|
+
return "str"
|
|
64
|
+
if isinstance(obj, list):
|
|
65
|
+
return f"list[{len(obj)}]"
|
|
66
|
+
if isinstance(obj, tuple):
|
|
67
|
+
return f"tuple[{len(obj)}]"
|
|
68
|
+
if isinstance(obj, dict):
|
|
69
|
+
return f"dict[{len(obj)}]"
|
|
70
|
+
if isinstance(obj, set):
|
|
71
|
+
return f"set[{len(obj)}]"
|
|
72
|
+
if is_dataclass(obj):
|
|
73
|
+
return f"dataclass:{type(obj).__name__}"
|
|
74
|
+
if hasattr(obj, "__dict__"):
|
|
75
|
+
return type(obj).__name__
|
|
76
|
+
|
|
77
|
+
return type(obj).__name__
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def object_to_json_with_types(obj, max_depth: int = 5, current_depth: int = 0): # noqa: PLR0911, PLR0912, pylint: disable=too-many-return-statements,too-many-branches
|
|
81
|
+
"""
|
|
82
|
+
Convert an object to a JSON-serializable dictionary with type information.
|
|
83
|
+
Handles nested objects up to max_depth.
|
|
84
|
+
"""
|
|
85
|
+
# Prevent infinite recursion
|
|
86
|
+
if current_depth > max_depth:
|
|
87
|
+
return {"__type": get_object_type_str(obj), "__value": str(obj)}
|
|
88
|
+
|
|
89
|
+
# Handle None
|
|
90
|
+
if obj is None:
|
|
91
|
+
return None
|
|
92
|
+
|
|
93
|
+
# Handle basic types
|
|
94
|
+
if isinstance(obj, (bool, int, float, str)):
|
|
95
|
+
return obj
|
|
96
|
+
|
|
97
|
+
# Handle lists and tuples
|
|
98
|
+
if isinstance(obj, (list, tuple)):
|
|
99
|
+
items = []
|
|
100
|
+
for item in obj:
|
|
101
|
+
items.append(object_to_json_with_types(item, max_depth, current_depth + 1))
|
|
102
|
+
return items
|
|
103
|
+
|
|
104
|
+
# Handle dictionaries
|
|
105
|
+
if isinstance(obj, dict):
|
|
106
|
+
result = {}
|
|
107
|
+
for key, value in filter_private_fields(obj).items():
|
|
108
|
+
result[key] = object_to_json_with_types(value, max_depth, current_depth + 1)
|
|
109
|
+
return result
|
|
110
|
+
|
|
111
|
+
# Handle sets
|
|
112
|
+
if isinstance(obj, set):
|
|
113
|
+
items = []
|
|
114
|
+
for item in obj:
|
|
115
|
+
items.append(object_to_json_with_types(item, max_depth, current_depth + 1))
|
|
116
|
+
return {"__type": "set", "__items": items}
|
|
117
|
+
|
|
118
|
+
# Handle dataclasses
|
|
119
|
+
if is_dataclass(obj):
|
|
120
|
+
result = {"__type": f"dataclass:{type(obj).__name__}"}
|
|
121
|
+
for field in dataclass_fields(obj):
|
|
122
|
+
if field.name.startswith("_"): # Skip private fields
|
|
123
|
+
continue
|
|
124
|
+
if not hasattr(obj, field.name): # Skip not found
|
|
125
|
+
continue
|
|
126
|
+
value = getattr(obj, field.name)
|
|
127
|
+
result[field.name] = object_to_json_with_types(value, max_depth, current_depth + 1)
|
|
128
|
+
return result
|
|
129
|
+
|
|
130
|
+
# Handle objects with __dict__
|
|
131
|
+
if hasattr(obj, "__dict__"):
|
|
132
|
+
result = {"__type": type(obj).__name__}
|
|
133
|
+
for attr, value in filter_private_attributes(obj).items():
|
|
134
|
+
if not is_method(obj, attr): # Skip methods
|
|
135
|
+
result[attr] = object_to_json_with_types(value, max_depth, current_depth + 1)
|
|
136
|
+
return result
|
|
137
|
+
|
|
138
|
+
# Handle other types
|
|
139
|
+
return {"__type": get_object_type_str(obj), "__value": str(obj)}
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def object_to_markdown( # noqa: PLR0911, PLR0912, PLR0915, pylint: disable=too-many-locals,too-many-return-statements,too-many-branches,too-many-statements
|
|
143
|
+
obj, max_depth: int = 5, current_depth: int = 0, indent: str = ""
|
|
144
|
+
) -> str:
|
|
145
|
+
"""
|
|
146
|
+
Convert an object to a compact markdown representation.
|
|
147
|
+
Handles nested objects up to max_depth.
|
|
148
|
+
"""
|
|
149
|
+
max_display_items = 10 # Show only first MAX_DISPLAY_ITEMS items for large collections, dicts, and sets
|
|
150
|
+
|
|
151
|
+
# Prevent infinite recursion
|
|
152
|
+
if current_depth > max_depth:
|
|
153
|
+
return f"`{get_object_type_str(obj)}`: {str(obj)}"
|
|
154
|
+
|
|
155
|
+
# Handle None
|
|
156
|
+
if obj is None:
|
|
157
|
+
return "`None`"
|
|
158
|
+
|
|
159
|
+
# Handle basic types
|
|
160
|
+
if isinstance(obj, bool):
|
|
161
|
+
return f"`{str(obj).lower()}`"
|
|
162
|
+
|
|
163
|
+
if isinstance(obj, (int, float)):
|
|
164
|
+
return f"`{obj}`"
|
|
165
|
+
|
|
166
|
+
if isinstance(obj, str):
|
|
167
|
+
lines = str(obj).splitlines()
|
|
168
|
+
if len(lines) > 1:
|
|
169
|
+
return f"\n{indent}```\n{prepend_all_lines(obj, prepend=indent)}\n{indent}```\n"
|
|
170
|
+
return obj
|
|
171
|
+
|
|
172
|
+
# Handle lists and tuples
|
|
173
|
+
if isinstance(obj, (list, tuple)):
|
|
174
|
+
if not obj: # Empty collection
|
|
175
|
+
return f"`{get_object_type_str(obj)}`: empty"
|
|
176
|
+
|
|
177
|
+
max_inline_length = 3 # For small collections, show inline
|
|
178
|
+
if (
|
|
179
|
+
len(obj) <= max_inline_length
|
|
180
|
+
and current_depth > 0
|
|
181
|
+
and all(isinstance(x, (bool, int, float, str, type(None))) for x in obj)
|
|
182
|
+
):
|
|
183
|
+
items = [object_to_markdown(item, max_depth, current_depth + 1) for item in obj]
|
|
184
|
+
return f"`{get_object_type_str(obj)}`: [{', '.join(items)}]"
|
|
185
|
+
|
|
186
|
+
# For larger collections, use bullet points
|
|
187
|
+
result = [f"`{get_object_type_str(obj)}`:"]
|
|
188
|
+
for i, item in enumerate(obj):
|
|
189
|
+
if i >= max_display_items and len(obj) > max_display_items + 2:
|
|
190
|
+
result.append(f"{indent}* ... ({len(obj) - 10} more items)")
|
|
191
|
+
break
|
|
192
|
+
item_md = object_to_markdown(item, max_depth, current_depth + 1, indent + " ")
|
|
193
|
+
result.append(f"{indent}* {item_md}")
|
|
194
|
+
return "\n".join(result)
|
|
195
|
+
|
|
196
|
+
# Handle dictionaries
|
|
197
|
+
if isinstance(obj, dict):
|
|
198
|
+
if not obj: # Empty dict
|
|
199
|
+
return "`dict`: empty"
|
|
200
|
+
|
|
201
|
+
result = [f"`dict[{len(obj)}]`:"]
|
|
202
|
+
for i, (key, value) in enumerate(filter_private_fields(obj).items()):
|
|
203
|
+
if i >= max_display_items and len(obj) > max_display_items + 2:
|
|
204
|
+
result.append(f"{indent}* ... ({len(obj) - 10} more items)")
|
|
205
|
+
break
|
|
206
|
+
value_md = object_to_markdown(value, max_depth, current_depth + 1, indent + " ")
|
|
207
|
+
result.append(f"{indent}* **{key}**: {value_md}")
|
|
208
|
+
return "\n".join(result)
|
|
209
|
+
|
|
210
|
+
# Handle sets
|
|
211
|
+
if isinstance(obj, set):
|
|
212
|
+
if not obj: # Empty set
|
|
213
|
+
return "`set`: empty"
|
|
214
|
+
|
|
215
|
+
result = [f"`set[{len(obj)}]`:"]
|
|
216
|
+
for i, item in enumerate(obj):
|
|
217
|
+
if i >= max_display_items and len(obj) > max_display_items + 2:
|
|
218
|
+
result.append(f"{indent}* ... ({len(obj) - 10} more items)")
|
|
219
|
+
break
|
|
220
|
+
item_md = object_to_markdown(item, max_depth, current_depth + 1, indent + " ")
|
|
221
|
+
result.append(f"{indent}* {item_md}")
|
|
222
|
+
return "\n".join(result)
|
|
223
|
+
|
|
224
|
+
# Handle dataclasses
|
|
225
|
+
if is_dataclass(obj):
|
|
226
|
+
result = [f"`{type(obj).__name__}:`"]
|
|
227
|
+
for field in dataclass_fields(obj):
|
|
228
|
+
if field.name.startswith("_"): # Skip private fields
|
|
229
|
+
continue
|
|
230
|
+
if not hasattr(obj, field.name): # Skip not found
|
|
231
|
+
continue
|
|
232
|
+
value = getattr(obj, field.name)
|
|
233
|
+
value_md = object_to_markdown(value, max_depth, current_depth + 1, indent + " ")
|
|
234
|
+
result.append(f"{indent}* **{field.name}**: {value_md}")
|
|
235
|
+
return "\n".join(result)
|
|
236
|
+
|
|
237
|
+
# Handle objects with __dict__
|
|
238
|
+
if hasattr(obj, "__dict__"):
|
|
239
|
+
attrs = filter_private_attributes(obj)
|
|
240
|
+
if not attrs: # No public attributes
|
|
241
|
+
return f"`{type(obj).__name__}`: (no public attributes)"
|
|
242
|
+
|
|
243
|
+
result = [f"`{type(obj).__name__}`:"]
|
|
244
|
+
for attr, value in attrs.items():
|
|
245
|
+
if not is_method(obj, attr): # Skip methods
|
|
246
|
+
value_md = object_to_markdown(value, max_depth, current_depth + 1, indent + " ")
|
|
247
|
+
result.append(f"{indent}* **{attr}**: {value_md}")
|
|
248
|
+
return "\n".join(result)
|
|
249
|
+
|
|
250
|
+
# Handle other types
|
|
251
|
+
return f"`{get_object_type_str(obj)}`: {str(obj)}"
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def format_json_for_display(json_obj) -> str:
|
|
255
|
+
"""Format a JSON object for display with proper indentation."""
|
|
256
|
+
return json.dumps(json_obj, indent=2, default=str)
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def display_node(node, display_format: str) -> None:
|
|
260
|
+
"""
|
|
261
|
+
Display node content based on its type, with enhanced formatting.
|
|
262
|
+
"""
|
|
263
|
+
# Special handling for specific types
|
|
264
|
+
if isinstance(node, pd.DataFrame):
|
|
265
|
+
st.dataframe(node)
|
|
266
|
+
return
|
|
267
|
+
|
|
268
|
+
# Toggle between markdown and JSON display
|
|
269
|
+
match display_format:
|
|
270
|
+
case "Markdown":
|
|
271
|
+
st.markdown(object_to_markdown(node))
|
|
272
|
+
case "JSON":
|
|
273
|
+
st.json(object_to_json_with_types(node))
|
|
274
|
+
case "Rich":
|
|
275
|
+
st.write(rich_print(node))
|
|
276
|
+
case _:
|
|
277
|
+
raise ValueError(f"Unsupported display format: {display_format}")
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def rich_print(node):
|
|
281
|
+
"""Display a node using rich print."""
|
|
282
|
+
console = Console(color_system=None)
|
|
283
|
+
with console.capture() as capture:
|
|
284
|
+
console.print(node)
|
|
285
|
+
return f"```\n{capture.get()}\n```"
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Functions for exporting nodes to various formats.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def export_nodes_to_json(nodes: list) -> str:
|
|
9
|
+
"""Export nodes to a JSON string for download."""
|
|
10
|
+
# Convert nodes to a serializable format
|
|
11
|
+
serializable_nodes = []
|
|
12
|
+
|
|
13
|
+
for node in nodes:
|
|
14
|
+
if hasattr(node, "__dict__"):
|
|
15
|
+
# For objects with attributes
|
|
16
|
+
node_dict = {
|
|
17
|
+
"type": type(node).__name__,
|
|
18
|
+
"attributes": {
|
|
19
|
+
attr: str(value) if not isinstance(value, (dict, list, int, float, bool, type(None))) else value
|
|
20
|
+
for attr, value in vars(node).items()
|
|
21
|
+
if not attr.startswith("_")
|
|
22
|
+
},
|
|
23
|
+
}
|
|
24
|
+
serializable_nodes.append(node_dict)
|
|
25
|
+
elif isinstance(node, dict):
|
|
26
|
+
# For dictionaries
|
|
27
|
+
serializable_nodes.append(
|
|
28
|
+
{
|
|
29
|
+
"type": "dict",
|
|
30
|
+
"content": {
|
|
31
|
+
str(k): str(v) if not isinstance(v, (dict, list, int, float, bool, type(None))) else v
|
|
32
|
+
for k, v in node.items()
|
|
33
|
+
},
|
|
34
|
+
}
|
|
35
|
+
)
|
|
36
|
+
elif isinstance(node, (list, tuple)):
|
|
37
|
+
# For lists and tuples
|
|
38
|
+
serializable_nodes.append(
|
|
39
|
+
{
|
|
40
|
+
"type": "list" if isinstance(node, list) else "tuple",
|
|
41
|
+
"content": [
|
|
42
|
+
str(item) if not isinstance(item, (dict, list, int, float, bool, type(None))) else item
|
|
43
|
+
for item in node
|
|
44
|
+
],
|
|
45
|
+
}
|
|
46
|
+
)
|
|
47
|
+
else:
|
|
48
|
+
# For primitive types
|
|
49
|
+
serializable_nodes.append({"type": type(node).__name__, "value": str(node)})
|
|
50
|
+
|
|
51
|
+
return json.dumps(serializable_nodes, indent=2)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Functions for filtering nodes based on various criteria.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from aixtools.log_view.node_summary import get_node_type
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def filter_nodes(nodes: list, filters: dict[str, Any]) -> list:
|
|
12
|
+
"""Filter nodes based on multiple criteria."""
|
|
13
|
+
if not filters:
|
|
14
|
+
return nodes
|
|
15
|
+
|
|
16
|
+
filtered_nodes = nodes.copy()
|
|
17
|
+
|
|
18
|
+
# Apply text filter if provided
|
|
19
|
+
if "text" in filters and filters["text"]:
|
|
20
|
+
text_filter = filters["text"].lower()
|
|
21
|
+
filtered_nodes = [node for node in filtered_nodes if text_filter in str(node).lower()]
|
|
22
|
+
|
|
23
|
+
# Apply type filter if provided
|
|
24
|
+
if "types" in filters and filters["types"]:
|
|
25
|
+
filtered_nodes = [node for node in filtered_nodes if get_node_type(node) in filters["types"]]
|
|
26
|
+
|
|
27
|
+
# Apply attribute filter if provided
|
|
28
|
+
if "attribute" in filters and filters["attribute"]:
|
|
29
|
+
attr_filter = filters["attribute"]
|
|
30
|
+
filtered_nodes = [node for node in filtered_nodes if hasattr(node, "__dict__") and attr_filter in vars(node)]
|
|
31
|
+
|
|
32
|
+
# Apply regex filter if provided
|
|
33
|
+
if "regex" in filters and filters["regex"]:
|
|
34
|
+
try:
|
|
35
|
+
pattern = re.compile(filters["regex"], re.IGNORECASE)
|
|
36
|
+
filtered_nodes = [node for node in filtered_nodes if pattern.search(str(node))]
|
|
37
|
+
except re.error:
|
|
38
|
+
# Invalid regex pattern, ignore this filter
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
return filtered_nodes
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utility functions for handling log files.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_log_files(log_dir: Path) -> list[Path]:
|
|
10
|
+
"""Get all log files in the specified directory, sorted by modification time (newest first)."""
|
|
11
|
+
if not log_dir.exists():
|
|
12
|
+
return []
|
|
13
|
+
log_files = list(log_dir.glob("agent_run.*.pkl"))
|
|
14
|
+
log_files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
|
|
15
|
+
return log_files
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def format_timestamp_from_filename(filename: str) -> str:
|
|
19
|
+
"""Extract and format the timestamp from a log filename."""
|
|
20
|
+
try:
|
|
21
|
+
# Extract timestamp from format "agent_run.YYYYMMDD_HHMMSS.pkl"
|
|
22
|
+
timestamp_str = filename.split("agent_run.")[1].split(".pkl")[0]
|
|
23
|
+
timestamp = datetime.strptime(timestamp_str, "%Y%m%d_%H%M%S")
|
|
24
|
+
return timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
|
25
|
+
except (IndexError, ValueError):
|
|
26
|
+
return "Unknown date"
|
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utility functions for working with node objects.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import traceback
|
|
7
|
+
|
|
8
|
+
import rich
|
|
9
|
+
from mcp.types import CallToolResult, EmbeddedResource, ImageContent, TextContent
|
|
10
|
+
from pydantic_ai import CallToolsNode, ModelRequestNode, UserPromptNode
|
|
11
|
+
from pydantic_ai.messages import (
|
|
12
|
+
ModelRequest,
|
|
13
|
+
ModelResponse,
|
|
14
|
+
RetryPromptPart,
|
|
15
|
+
SystemPromptPart,
|
|
16
|
+
TextPart,
|
|
17
|
+
ToolCallPart,
|
|
18
|
+
ToolReturnPart,
|
|
19
|
+
UserPromptPart,
|
|
20
|
+
)
|
|
21
|
+
from pydantic_ai.models import ModelRequestParameters
|
|
22
|
+
from pydantic_ai.result import FinalResult
|
|
23
|
+
from pydantic_ai.usage import Usage
|
|
24
|
+
from pydantic_graph import End
|
|
25
|
+
|
|
26
|
+
from aixtools.logging.logging_config import get_logger
|
|
27
|
+
from aixtools.logging.model_patch_logging import ModelRawRequest, ModelRawRequestResult
|
|
28
|
+
from aixtools.utils.utils import escape_newline
|
|
29
|
+
|
|
30
|
+
logger = get_logger(__name__)
|
|
31
|
+
|
|
32
|
+
MAX_STR_LEN = 200
|
|
33
|
+
DEBUG = False
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def has_multiple_lines(s: str) -> bool:
|
|
37
|
+
"""Check if a string has multiple lines."""
|
|
38
|
+
return s.count("\n") > 1
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def get_node_type(node):
|
|
42
|
+
"""Return the type name of a node as a string."""
|
|
43
|
+
return str(type(node).__name__)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def extract_node_types(nodes: list) -> set[str]:
|
|
47
|
+
"""Extract all unique node types from a list of nodes."""
|
|
48
|
+
types = set()
|
|
49
|
+
for node in nodes:
|
|
50
|
+
node_type = get_node_type(node)
|
|
51
|
+
types.add(node_type)
|
|
52
|
+
return types
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def to_str(s, max_len=MAX_STR_LEN):
|
|
56
|
+
"""Format string content with appropriate quoting based on content structure."""
|
|
57
|
+
s = str(s)
|
|
58
|
+
if has_multiple_lines(s):
|
|
59
|
+
s = escape_newline(s)
|
|
60
|
+
if len(s) > max_len:
|
|
61
|
+
s = s[:max_len] + "..."
|
|
62
|
+
return s
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def try_json(s):
|
|
66
|
+
"""Attempt to parse string as JSON, returning parsed object or original string."""
|
|
67
|
+
# Can it be parsed as a JSON object?
|
|
68
|
+
try:
|
|
69
|
+
d = json.loads(s)
|
|
70
|
+
return d
|
|
71
|
+
except Exception: # pylint: disable=broad-exception-caught
|
|
72
|
+
pass
|
|
73
|
+
return s
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class NodeTitle:
|
|
77
|
+
"""Class to create a title for nodes in a human-readable format."""
|
|
78
|
+
|
|
79
|
+
def __init__(self):
|
|
80
|
+
pass
|
|
81
|
+
|
|
82
|
+
def summary(self, node): # noqa: PLR0911, PLR0912, pylint: disable=too-many-return-statements,too-many-branches
|
|
83
|
+
"""Generate a summary string for a node."""
|
|
84
|
+
if node is None:
|
|
85
|
+
return "None"
|
|
86
|
+
_type = str(type(node).__name__)
|
|
87
|
+
if DEBUG:
|
|
88
|
+
rich.print(node)
|
|
89
|
+
try:
|
|
90
|
+
match node:
|
|
91
|
+
case str() | bool() | float() | int():
|
|
92
|
+
return f"`{_type}`: {to_str(node)}"
|
|
93
|
+
case list():
|
|
94
|
+
return to_str(f"`list` ({len(node)}):\n[" + "\n, ".join([self.summary(n) for n in node]) + "]")
|
|
95
|
+
case dict():
|
|
96
|
+
return to_str(
|
|
97
|
+
f"`dict` ({len(node)}): "
|
|
98
|
+
+ "{"
|
|
99
|
+
+ "\n, ".join([f"{k}: {self.summary(v)}" for k, v in node.items()])
|
|
100
|
+
+ "}"
|
|
101
|
+
)
|
|
102
|
+
case tuple():
|
|
103
|
+
if len(node) == 0:
|
|
104
|
+
return "`tuple`: Empty"
|
|
105
|
+
items = [self.summary(n) for n in node]
|
|
106
|
+
items_str = "(" + ", ".join([str(item) for item in items]) + ")"
|
|
107
|
+
return f"`tuple` ({len(node)}): {to_str(items_str)}"
|
|
108
|
+
case CallToolsNode():
|
|
109
|
+
return f"`{_type}`: {to_str(self.summary(node.model_response))}"
|
|
110
|
+
case CallToolResult():
|
|
111
|
+
return f"`{_type}`: {to_str(self.summary_call_tool_result(node))}"
|
|
112
|
+
case End():
|
|
113
|
+
return f"`{_type}`: {to_str(self.summary(node.data))}"
|
|
114
|
+
case FinalResult():
|
|
115
|
+
if hasattr(node, "data"):
|
|
116
|
+
return f"`{_type}`: {to_str(self.summary(node.data))}"
|
|
117
|
+
if node.tool_name:
|
|
118
|
+
return f"`{_type}`: {to_str(node.tool_name)}"
|
|
119
|
+
return f"`{_type}`"
|
|
120
|
+
case ModelRawRequest():
|
|
121
|
+
return f"`{_type}`: {to_str(self.summary_model_raw_request(node))}"
|
|
122
|
+
case ModelRawRequestResult():
|
|
123
|
+
return f"`{_type}`: {to_str(self.summary(node.result))}"
|
|
124
|
+
case ModelRequest():
|
|
125
|
+
return f"`{_type}`: {to_str(self.summary_model_request(node))}"
|
|
126
|
+
case ModelRequestNode():
|
|
127
|
+
return f"`{_type}`: {to_str(self.summary(node.request))}"
|
|
128
|
+
case ModelRequestParameters():
|
|
129
|
+
return f"`{_type}`: {to_str(self.summary_model_request_parameters(node))}"
|
|
130
|
+
case ModelResponse():
|
|
131
|
+
return f"`{_type}`: {to_str(self.summary_model_response(node))}"
|
|
132
|
+
case TextPart() | SystemPromptPart() | UserPromptPart() | ToolReturnPart() | RetryPromptPart():
|
|
133
|
+
return self.summary(node.content)
|
|
134
|
+
case TextContent():
|
|
135
|
+
return self.summary(node.text)
|
|
136
|
+
case ImageContent():
|
|
137
|
+
return f"Image: {node.mimeType}"
|
|
138
|
+
case EmbeddedResource():
|
|
139
|
+
return f"Resource: {node.resource}"
|
|
140
|
+
case UserPromptNode():
|
|
141
|
+
return f"`{_type}`: {to_str(self.summary_user_prompt(node))}"
|
|
142
|
+
case ToolCallPart():
|
|
143
|
+
args = node.args
|
|
144
|
+
if isinstance(args, str):
|
|
145
|
+
args = try_json(args)
|
|
146
|
+
if isinstance(args, dict):
|
|
147
|
+
args = ", ".join([f"{k} = {self.summary(v)}" for k, v in args.items()])
|
|
148
|
+
return f"{node.tool_name}({to_str(args)})"
|
|
149
|
+
case Usage():
|
|
150
|
+
return f"`{_type}`: {to_str(self.summary_usage(node))}"
|
|
151
|
+
case _:
|
|
152
|
+
logger.debug("NodeSummary.summary(): Unknown node type %s", type(node))
|
|
153
|
+
return f"`{type(node)}`: {str(node)}"
|
|
154
|
+
except Exception as e: # pylint: disable=broad-exception-caught
|
|
155
|
+
print(f"Error while summarizing {_type}: {e}")
|
|
156
|
+
traceback.print_exc()
|
|
157
|
+
return f"`{_type}`: {to_str(to_str(node))}"
|
|
158
|
+
|
|
159
|
+
def summary_call_tool_result(self, node: CallToolResult):
|
|
160
|
+
"""Generate summary for CallToolResult node by joining content summaries."""
|
|
161
|
+
out = [self.summary(c) for c in node.content]
|
|
162
|
+
return "\n".join(out)
|
|
163
|
+
|
|
164
|
+
def summary_model_raw_request(self, node: ModelRawRequest):
|
|
165
|
+
"""Format ModelRawRequest node showing args and kwargs in readable format."""
|
|
166
|
+
args = [self.summary(p) for p in node.args]
|
|
167
|
+
kwargs = [f"{k}={self.summary(v)}" for k, v in node.kwargs.items()]
|
|
168
|
+
out = ""
|
|
169
|
+
if len(args) > 0:
|
|
170
|
+
out += ", ".join(args)
|
|
171
|
+
if len(kwargs) > 0:
|
|
172
|
+
if len(out) > 0:
|
|
173
|
+
out += ", "
|
|
174
|
+
out += ", ".join([f"{k} = {self.summary(v)}" for k, v in kwargs])
|
|
175
|
+
return out
|
|
176
|
+
|
|
177
|
+
def summary_model_request(self, node: ModelRequest):
|
|
178
|
+
"""Generate summary for ModelRequest by joining part summaries."""
|
|
179
|
+
out = [self.summary(p) for p in node.parts]
|
|
180
|
+
return "\n".join(out)
|
|
181
|
+
|
|
182
|
+
def summary_model_request_parameters(self, node: ModelRequestParameters):
|
|
183
|
+
"""Format model request parameters with tools and result tools."""
|
|
184
|
+
out = ""
|
|
185
|
+
|
|
186
|
+
if hasattr(node, "function_tools"):
|
|
187
|
+
tools = [self.tool_description(tool_definition) for tool_definition in node.function_tools]
|
|
188
|
+
if len(tools) > 0:
|
|
189
|
+
if len(tools) == 1:
|
|
190
|
+
out += f"Tool: {tools[0]}"
|
|
191
|
+
else:
|
|
192
|
+
out += "Tools:\n" + "\n".join(tools)
|
|
193
|
+
|
|
194
|
+
if hasattr(node, "output_tools"):
|
|
195
|
+
result_tools = [self.tool_description(tool_definition) for tool_definition in node.output_tools]
|
|
196
|
+
if len(result_tools) > 0:
|
|
197
|
+
if len(out) > 0:
|
|
198
|
+
out += "\n"
|
|
199
|
+
out += "Output Tools:\n" + "\n".join(result_tools)
|
|
200
|
+
|
|
201
|
+
return out if len(out) > 0 else ""
|
|
202
|
+
|
|
203
|
+
def summary_model_response(self, node: ModelResponse):
|
|
204
|
+
"""Generate summary for ModelResponse by joining part summaries."""
|
|
205
|
+
out = [self.summary(p) for p in node.parts]
|
|
206
|
+
return "\n".join(out)
|
|
207
|
+
|
|
208
|
+
def summary_usage(self, node: Usage):
|
|
209
|
+
"""Format token usage information showing request and response tokens."""
|
|
210
|
+
return f"tokens: ({node.request_tokens}, {node.response_tokens}"
|
|
211
|
+
|
|
212
|
+
def summary_user_prompt(self, node: UserPromptNode):
|
|
213
|
+
"""Generate summary for UserPromptNode handling both string and list formats."""
|
|
214
|
+
if isinstance(node.user_prompt, str):
|
|
215
|
+
return self.summary(node.user_prompt)
|
|
216
|
+
if node.user_prompt:
|
|
217
|
+
out = [self.summary(p) for p in node.user_prompt]
|
|
218
|
+
return "\n".join(out)
|
|
219
|
+
return "<empty>"
|
|
220
|
+
|
|
221
|
+
def tool_description(self, tool_definition):
|
|
222
|
+
"""Format tool definition with name, description and parameters if multi-line."""
|
|
223
|
+
descr = f"`{tool_definition.name}`: {self.summary(tool_definition.description)}"
|
|
224
|
+
if has_multiple_lines(descr):
|
|
225
|
+
args = ""
|
|
226
|
+
for k, v in tool_definition.parameters_json_schema.items():
|
|
227
|
+
args += f"- {k}: {v}\n"
|
|
228
|
+
return f"`{tool_definition.name}`: {self.summary(tool_definition.description)}\n{args}"
|
|
229
|
+
return descr
|