aixtools 0.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aixtools might be problematic. Click here for more details.

Files changed (88) hide show
  1. aixtools/.chainlit/config.toml +113 -0
  2. aixtools/.chainlit/translations/bn.json +214 -0
  3. aixtools/.chainlit/translations/en-US.json +214 -0
  4. aixtools/.chainlit/translations/gu.json +214 -0
  5. aixtools/.chainlit/translations/he-IL.json +214 -0
  6. aixtools/.chainlit/translations/hi.json +214 -0
  7. aixtools/.chainlit/translations/ja.json +214 -0
  8. aixtools/.chainlit/translations/kn.json +214 -0
  9. aixtools/.chainlit/translations/ml.json +214 -0
  10. aixtools/.chainlit/translations/mr.json +214 -0
  11. aixtools/.chainlit/translations/nl.json +214 -0
  12. aixtools/.chainlit/translations/ta.json +214 -0
  13. aixtools/.chainlit/translations/te.json +214 -0
  14. aixtools/.chainlit/translations/zh-CN.json +214 -0
  15. aixtools/__init__.py +11 -0
  16. aixtools/_version.py +34 -0
  17. aixtools/a2a/app.py +126 -0
  18. aixtools/a2a/google_sdk/__init__.py +0 -0
  19. aixtools/a2a/google_sdk/card.py +27 -0
  20. aixtools/a2a/google_sdk/pydantic_ai_adapter/agent_executor.py +199 -0
  21. aixtools/a2a/google_sdk/pydantic_ai_adapter/storage.py +26 -0
  22. aixtools/a2a/google_sdk/remote_agent_connection.py +88 -0
  23. aixtools/a2a/google_sdk/utils.py +59 -0
  24. aixtools/a2a/utils.py +115 -0
  25. aixtools/agents/__init__.py +12 -0
  26. aixtools/agents/agent.py +164 -0
  27. aixtools/agents/agent_batch.py +71 -0
  28. aixtools/agents/prompt.py +97 -0
  29. aixtools/app.py +143 -0
  30. aixtools/chainlit.md +14 -0
  31. aixtools/compliance/__init__.py +9 -0
  32. aixtools/compliance/private_data.py +138 -0
  33. aixtools/context.py +17 -0
  34. aixtools/db/__init__.py +17 -0
  35. aixtools/db/database.py +110 -0
  36. aixtools/db/vector_db.py +115 -0
  37. aixtools/google/client.py +25 -0
  38. aixtools/log_view/__init__.py +17 -0
  39. aixtools/log_view/app.py +195 -0
  40. aixtools/log_view/display.py +285 -0
  41. aixtools/log_view/export.py +51 -0
  42. aixtools/log_view/filters.py +41 -0
  43. aixtools/log_view/log_utils.py +26 -0
  44. aixtools/log_view/node_summary.py +229 -0
  45. aixtools/logfilters/__init__.py +7 -0
  46. aixtools/logfilters/context_filter.py +67 -0
  47. aixtools/logging/__init__.py +30 -0
  48. aixtools/logging/log_objects.py +227 -0
  49. aixtools/logging/logging_config.py +161 -0
  50. aixtools/logging/mcp_log_models.py +102 -0
  51. aixtools/logging/mcp_logger.py +172 -0
  52. aixtools/logging/model_patch_logging.py +87 -0
  53. aixtools/logging/open_telemetry.py +36 -0
  54. aixtools/mcp/__init__.py +9 -0
  55. aixtools/mcp/client.py +375 -0
  56. aixtools/mcp/example_client.py +30 -0
  57. aixtools/mcp/example_server.py +22 -0
  58. aixtools/mcp/fast_mcp_log.py +31 -0
  59. aixtools/mcp/faulty_mcp.py +319 -0
  60. aixtools/model_patch/model_patch.py +63 -0
  61. aixtools/server/__init__.py +29 -0
  62. aixtools/server/app_mounter.py +90 -0
  63. aixtools/server/path.py +72 -0
  64. aixtools/server/utils.py +70 -0
  65. aixtools/server/workspace_privacy.py +65 -0
  66. aixtools/testing/__init__.py +9 -0
  67. aixtools/testing/aix_test_model.py +149 -0
  68. aixtools/testing/mock_tool.py +66 -0
  69. aixtools/testing/model_patch_cache.py +279 -0
  70. aixtools/tools/doctor/__init__.py +3 -0
  71. aixtools/tools/doctor/tool_doctor.py +61 -0
  72. aixtools/tools/doctor/tool_recommendation.py +44 -0
  73. aixtools/utils/__init__.py +35 -0
  74. aixtools/utils/chainlit/cl_agent_show.py +82 -0
  75. aixtools/utils/chainlit/cl_utils.py +168 -0
  76. aixtools/utils/config.py +131 -0
  77. aixtools/utils/config_util.py +69 -0
  78. aixtools/utils/enum_with_description.py +37 -0
  79. aixtools/utils/files.py +17 -0
  80. aixtools/utils/persisted_dict.py +99 -0
  81. aixtools/utils/utils.py +167 -0
  82. aixtools/vault/__init__.py +7 -0
  83. aixtools/vault/vault.py +137 -0
  84. aixtools-0.0.0.dist-info/METADATA +669 -0
  85. aixtools-0.0.0.dist-info/RECORD +88 -0
  86. aixtools-0.0.0.dist-info/WHEEL +5 -0
  87. aixtools-0.0.0.dist-info/entry_points.txt +2 -0
  88. aixtools-0.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,41 @@
1
+ """
2
+ Functions for filtering nodes based on various criteria.
3
+ """
4
+
5
+ import re
6
+ from typing import Any
7
+
8
+ from aixtools.log_view.node_summary import get_node_type
9
+
10
+
11
+ def filter_nodes(nodes: list, filters: dict[str, Any]) -> list:
12
+ """Filter nodes based on multiple criteria."""
13
+ if not filters:
14
+ return nodes
15
+
16
+ filtered_nodes = nodes.copy()
17
+
18
+ # Apply text filter if provided
19
+ if "text" in filters and filters["text"]:
20
+ text_filter = filters["text"].lower()
21
+ filtered_nodes = [node for node in filtered_nodes if text_filter in str(node).lower()]
22
+
23
+ # Apply type filter if provided
24
+ if "types" in filters and filters["types"]:
25
+ filtered_nodes = [node for node in filtered_nodes if get_node_type(node) in filters["types"]]
26
+
27
+ # Apply attribute filter if provided
28
+ if "attribute" in filters and filters["attribute"]:
29
+ attr_filter = filters["attribute"]
30
+ filtered_nodes = [node for node in filtered_nodes if hasattr(node, "__dict__") and attr_filter in vars(node)]
31
+
32
+ # Apply regex filter if provided
33
+ if "regex" in filters and filters["regex"]:
34
+ try:
35
+ pattern = re.compile(filters["regex"], re.IGNORECASE)
36
+ filtered_nodes = [node for node in filtered_nodes if pattern.search(str(node))]
37
+ except re.error:
38
+ # Invalid regex pattern, ignore this filter
39
+ pass
40
+
41
+ return filtered_nodes
@@ -0,0 +1,26 @@
1
+ """
2
+ Utility functions for handling log files.
3
+ """
4
+
5
+ from datetime import datetime
6
+ from pathlib import Path
7
+
8
+
9
+ def get_log_files(log_dir: Path) -> list[Path]:
10
+ """Get all log files in the specified directory, sorted by modification time (newest first)."""
11
+ if not log_dir.exists():
12
+ return []
13
+ log_files = list(log_dir.glob("agent_run.*.pkl"))
14
+ log_files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
15
+ return log_files
16
+
17
+
18
+ def format_timestamp_from_filename(filename: str) -> str:
19
+ """Extract and format the timestamp from a log filename."""
20
+ try:
21
+ # Extract timestamp from format "agent_run.YYYYMMDD_HHMMSS.pkl"
22
+ timestamp_str = filename.split("agent_run.")[1].split(".pkl")[0]
23
+ timestamp = datetime.strptime(timestamp_str, "%Y%m%d_%H%M%S")
24
+ return timestamp.strftime("%Y-%m-%d %H:%M:%S")
25
+ except (IndexError, ValueError):
26
+ return "Unknown date"
@@ -0,0 +1,229 @@
1
+ """
2
+ Utility functions for working with node objects.
3
+ """
4
+
5
+ import json
6
+ import traceback
7
+
8
+ import rich
9
+ from mcp.types import CallToolResult, EmbeddedResource, ImageContent, TextContent
10
+ from pydantic_ai import CallToolsNode, ModelRequestNode, UserPromptNode
11
+ from pydantic_ai.messages import (
12
+ ModelRequest,
13
+ ModelResponse,
14
+ RetryPromptPart,
15
+ SystemPromptPart,
16
+ TextPart,
17
+ ToolCallPart,
18
+ ToolReturnPart,
19
+ UserPromptPart,
20
+ )
21
+ from pydantic_ai.models import ModelRequestParameters
22
+ from pydantic_ai.result import FinalResult
23
+ from pydantic_ai.usage import Usage
24
+ from pydantic_graph import End
25
+
26
+ from aixtools.logging.logging_config import get_logger
27
+ from aixtools.logging.model_patch_logging import ModelRawRequest, ModelRawRequestResult
28
+ from aixtools.utils.utils import escape_newline
29
+
30
+ logger = get_logger(__name__)
31
+
32
+ MAX_STR_LEN = 200
33
+ DEBUG = False
34
+
35
+
36
+ def has_multiple_lines(s: str) -> bool:
37
+ """Check if a string has multiple lines."""
38
+ return s.count("\n") > 1
39
+
40
+
41
+ def get_node_type(node):
42
+ """Return the type name of a node as a string."""
43
+ return str(type(node).__name__)
44
+
45
+
46
+ def extract_node_types(nodes: list) -> set[str]:
47
+ """Extract all unique node types from a list of nodes."""
48
+ types = set()
49
+ for node in nodes:
50
+ node_type = get_node_type(node)
51
+ types.add(node_type)
52
+ return types
53
+
54
+
55
+ def to_str(s, max_len=MAX_STR_LEN):
56
+ """Format string content with appropriate quoting based on content structure."""
57
+ s = str(s)
58
+ if has_multiple_lines(s):
59
+ s = escape_newline(s)
60
+ if len(s) > max_len:
61
+ s = s[:max_len] + "..."
62
+ return s
63
+
64
+
65
+ def try_json(s):
66
+ """Attempt to parse string as JSON, returning parsed object or original string."""
67
+ # Can it be parsed as a JSON object?
68
+ try:
69
+ d = json.loads(s)
70
+ return d
71
+ except Exception: # pylint: disable=broad-exception-caught
72
+ pass
73
+ return s
74
+
75
+
76
+ class NodeTitle:
77
+ """Class to create a title for nodes in a human-readable format."""
78
+
79
+ def __init__(self):
80
+ pass
81
+
82
+ def summary(self, node): # noqa: PLR0911, PLR0912, pylint: disable=too-many-return-statements,too-many-branches
83
+ """Generate a summary string for a node."""
84
+ if node is None:
85
+ return "None"
86
+ _type = str(type(node).__name__)
87
+ if DEBUG:
88
+ rich.print(node)
89
+ try:
90
+ match node:
91
+ case str() | bool() | float() | int():
92
+ return f"`{_type}`: {to_str(node)}"
93
+ case list():
94
+ return to_str(f"`list` ({len(node)}):\n[" + "\n, ".join([self.summary(n) for n in node]) + "]")
95
+ case dict():
96
+ return to_str(
97
+ f"`dict` ({len(node)}): "
98
+ + "{"
99
+ + "\n, ".join([f"{k}: {self.summary(v)}" for k, v in node.items()])
100
+ + "}"
101
+ )
102
+ case tuple():
103
+ if len(node) == 0:
104
+ return "`tuple`: Empty"
105
+ items = [self.summary(n) for n in node]
106
+ items_str = "(" + ", ".join([str(item) for item in items]) + ")"
107
+ return f"`tuple` ({len(node)}): {to_str(items_str)}"
108
+ case CallToolsNode():
109
+ return f"`{_type}`: {to_str(self.summary(node.model_response))}"
110
+ case CallToolResult():
111
+ return f"`{_type}`: {to_str(self.summary_call_tool_result(node))}"
112
+ case End():
113
+ return f"`{_type}`: {to_str(self.summary(node.data))}"
114
+ case FinalResult():
115
+ if hasattr(node, "data"):
116
+ return f"`{_type}`: {to_str(self.summary(node.data))}"
117
+ if node.tool_name:
118
+ return f"`{_type}`: {to_str(node.tool_name)}"
119
+ return f"`{_type}`"
120
+ case ModelRawRequest():
121
+ return f"`{_type}`: {to_str(self.summary_model_raw_request(node))}"
122
+ case ModelRawRequestResult():
123
+ return f"`{_type}`: {to_str(self.summary(node.result))}"
124
+ case ModelRequest():
125
+ return f"`{_type}`: {to_str(self.summary_model_request(node))}"
126
+ case ModelRequestNode():
127
+ return f"`{_type}`: {to_str(self.summary(node.request))}"
128
+ case ModelRequestParameters():
129
+ return f"`{_type}`: {to_str(self.summary_model_request_parameters(node))}"
130
+ case ModelResponse():
131
+ return f"`{_type}`: {to_str(self.summary_model_response(node))}"
132
+ case TextPart() | SystemPromptPart() | UserPromptPart() | ToolReturnPart() | RetryPromptPart():
133
+ return self.summary(node.content)
134
+ case TextContent():
135
+ return self.summary(node.text)
136
+ case ImageContent():
137
+ return f"Image: {node.mimeType}"
138
+ case EmbeddedResource():
139
+ return f"Resource: {node.resource}"
140
+ case UserPromptNode():
141
+ return f"`{_type}`: {to_str(self.summary_user_prompt(node))}"
142
+ case ToolCallPart():
143
+ args = node.args
144
+ if isinstance(args, str):
145
+ args = try_json(args)
146
+ if isinstance(args, dict):
147
+ args = ", ".join([f"{k} = {self.summary(v)}" for k, v in args.items()])
148
+ return f"{node.tool_name}({to_str(args)})"
149
+ case Usage():
150
+ return f"`{_type}`: {to_str(self.summary_usage(node))}"
151
+ case _:
152
+ logger.debug("NodeSummary.summary(): Unknown node type %s", type(node))
153
+ return f"`{type(node)}`: {str(node)}"
154
+ except Exception as e: # pylint: disable=broad-exception-caught
155
+ print(f"Error while summarizing {_type}: {e}")
156
+ traceback.print_exc()
157
+ return f"`{_type}`: {to_str(to_str(node))}"
158
+
159
+ def summary_call_tool_result(self, node: CallToolResult):
160
+ """Generate summary for CallToolResult node by joining content summaries."""
161
+ out = [self.summary(c) for c in node.content]
162
+ return "\n".join(out)
163
+
164
+ def summary_model_raw_request(self, node: ModelRawRequest):
165
+ """Format ModelRawRequest node showing args and kwargs in readable format."""
166
+ args = [self.summary(p) for p in node.args]
167
+ kwargs = [f"{k}={self.summary(v)}" for k, v in node.kwargs.items()]
168
+ out = ""
169
+ if len(args) > 0:
170
+ out += ", ".join(args)
171
+ if len(kwargs) > 0:
172
+ if len(out) > 0:
173
+ out += ", "
174
+ out += ", ".join([f"{k} = {self.summary(v)}" for k, v in kwargs])
175
+ return out
176
+
177
+ def summary_model_request(self, node: ModelRequest):
178
+ """Generate summary for ModelRequest by joining part summaries."""
179
+ out = [self.summary(p) for p in node.parts]
180
+ return "\n".join(out)
181
+
182
+ def summary_model_request_parameters(self, node: ModelRequestParameters):
183
+ """Format model request parameters with tools and result tools."""
184
+ out = ""
185
+
186
+ if hasattr(node, "function_tools"):
187
+ tools = [self.tool_description(tool_definition) for tool_definition in node.function_tools]
188
+ if len(tools) > 0:
189
+ if len(tools) == 1:
190
+ out += f"Tool: {tools[0]}"
191
+ else:
192
+ out += "Tools:\n" + "\n".join(tools)
193
+
194
+ if hasattr(node, "output_tools"):
195
+ result_tools = [self.tool_description(tool_definition) for tool_definition in node.output_tools]
196
+ if len(result_tools) > 0:
197
+ if len(out) > 0:
198
+ out += "\n"
199
+ out += "Output Tools:\n" + "\n".join(result_tools)
200
+
201
+ return out if len(out) > 0 else ""
202
+
203
+ def summary_model_response(self, node: ModelResponse):
204
+ """Generate summary for ModelResponse by joining part summaries."""
205
+ out = [self.summary(p) for p in node.parts]
206
+ return "\n".join(out)
207
+
208
+ def summary_usage(self, node: Usage):
209
+ """Format token usage information showing request and response tokens."""
210
+ return f"tokens: ({node.request_tokens}, {node.response_tokens}"
211
+
212
+ def summary_user_prompt(self, node: UserPromptNode):
213
+ """Generate summary for UserPromptNode handling both string and list formats."""
214
+ if isinstance(node.user_prompt, str):
215
+ return self.summary(node.user_prompt)
216
+ if node.user_prompt:
217
+ out = [self.summary(p) for p in node.user_prompt]
218
+ return "\n".join(out)
219
+ return "<empty>"
220
+
221
+ def tool_description(self, tool_definition):
222
+ """Format tool definition with name, description and parameters if multi-line."""
223
+ descr = f"`{tool_definition.name}`: {self.summary(tool_definition.description)}"
224
+ if has_multiple_lines(descr):
225
+ args = ""
226
+ for k, v in tool_definition.parameters_json_schema.items():
227
+ args += f"- {k}: {v}\n"
228
+ return f"`{tool_definition.name}`: {self.summary(tool_definition.description)}\n{args}"
229
+ return descr
@@ -0,0 +1,7 @@
1
+ """
2
+ Logging filters for AixTools.
3
+ """
4
+
5
+ from aixtools.logfilters.context_filter import ContextFilter
6
+
7
+ __all__ = ["ContextFilter"]
@@ -0,0 +1,67 @@
1
+ """
2
+ A logging filter for injecting contextual information into log records.
3
+ """
4
+
5
+ import logging
6
+
7
+
8
+ class ContextFilter(logging.Filter): # pylint: disable=too-few-public-methods
9
+ """
10
+ A logging filter that injects a formatted context string (user and session
11
+ IDs) into the log record. It sources the IDs from the active FastMCP
12
+ application context and ignores default values.
13
+ """
14
+
15
+ def _extract_from_mcp_context(self) -> tuple[str | None, str | None]:
16
+ """
17
+ Retrieve session id (aka conversation id) and user id from the MCP context.
18
+ Useful in MCP servers.
19
+ """
20
+ try:
21
+ from aixtools.server.utils import ( # noqa: PLC0415 # pylint: disable=import-outside-toplevel
22
+ get_session_id_tuple,
23
+ )
24
+
25
+ return get_session_id_tuple()
26
+ except (ImportError, RuntimeError, ValueError):
27
+ # Context is not available
28
+ return None, None
29
+
30
+ def filter(self, record: logging.LogRecord) -> bool:
31
+ """
32
+ Adds a `context` string to the log record.
33
+
34
+ The filter attempts to extract user, session (conversation) IDs from
35
+ context variables. If that fails, it falls back to extracting IDs from
36
+ the FastMCP context.
37
+
38
+ If valid IDs are found, the `context` attribute is formatted as
39
+ `[conversation:id user:id]`. Otherwise, it is an empty string.
40
+ """
41
+ user_id = None
42
+ session_id = None
43
+
44
+ try:
45
+ # First, try to get context from the global context variables
46
+ from aixtools.context import ( # noqa: PLC0415 # pylint: disable=import-outside-toplevel
47
+ session_id_var,
48
+ user_id_var,
49
+ )
50
+
51
+ user_id = user_id_var.get()
52
+ session_id = session_id_var.get()
53
+ except ImportError:
54
+ pass
55
+
56
+ if not user_id and not session_id:
57
+ user_id, session_id = self._extract_from_mcp_context()
58
+
59
+ context = ""
60
+ if session_id and not str(session_id).startswith("default"):
61
+ context += f"[{session_id}]"
62
+ if user_id and not str(user_id).startswith("default"):
63
+ context += f"[{user_id}]"
64
+
65
+ record.context = context
66
+
67
+ return True
@@ -0,0 +1,30 @@
1
+ """
2
+ Logging utilities for AI agent operations and model interactions.
3
+ """
4
+
5
+ from aixtools.logging.log_objects import ObjectLogger
6
+ from aixtools.logging.mcp_log_models import (
7
+ BaseLogEntry,
8
+ CodeLogEntry,
9
+ CommandLogEntry,
10
+ Language,
11
+ LogEntry,
12
+ LogType,
13
+ ProcessResult,
14
+ SystemLogEntry,
15
+ )
16
+ from aixtools.logging.mcp_logger import JSONFileMcpLogger, McpLogger
17
+
18
+ __all__ = [
19
+ "ObjectLogger",
20
+ "LogType",
21
+ "Language",
22
+ "ProcessResult",
23
+ "BaseLogEntry",
24
+ "CommandLogEntry",
25
+ "CodeLogEntry",
26
+ "SystemLogEntry",
27
+ "LogEntry",
28
+ "McpLogger",
29
+ "JSONFileMcpLogger",
30
+ ]
@@ -0,0 +1,227 @@
1
+ """
2
+ This module provides functionality to save objects to a log file using pickle.
3
+ It includes a function to check if an object is pickleable and a function to perform a safe deepcopy of objects.
4
+ It also includes a function to save the objects to a log file with a timestamp.
5
+ """
6
+
7
+ import logging
8
+ import pickle
9
+ import traceback
10
+ from copy import copy
11
+ from datetime import datetime
12
+ from pathlib import Path
13
+ from types import NoneType
14
+ from typing import Mapping, Sequence, Union
15
+
16
+ import rich
17
+
18
+ from aixtools.logging.logging_config import get_logger
19
+ from aixtools.utils.config import LOG_LEVEL, LOGS_DIR
20
+
21
+ logger = get_logger(__name__)
22
+
23
+ _is_pickleable_cache = {}
24
+
25
+
26
+ class ExceptionWrapper: # pylint: disable=too-few-public-methods
27
+ """
28
+ A wrapper for exceptions to make them pickleable.
29
+ It stores the exception type and message.
30
+ """
31
+
32
+ def __init__(self, exception):
33
+ self.exc_type = str(type(exception))
34
+ self.exc_value = str(exception)
35
+ self.exc_traceback = traceback.format_exc()
36
+
37
+ def __str__(self):
38
+ return f"{self.exc_type}: {self.exc_value}\n{self.exc_traceback}"
39
+
40
+
41
+ def is_pickleable(obj):
42
+ """
43
+ Check if an object is pickleable.
44
+ Uses a cache to avoid repeated checks for the same type.
45
+ """
46
+ obj_type = type(obj)
47
+ module_name = getattr(obj_type, "__module__", "")
48
+
49
+ # FastMCP json_schema_to_type changes __module__ which causes pickle error but for some reason goes to the cache
50
+ if module_name == "fastmcp.utilities.json_schema_type":
51
+ return False
52
+
53
+ if obj_type not in _is_pickleable_cache:
54
+ try:
55
+ pickle.loads(pickle.dumps(obj))
56
+ _is_pickleable_cache[obj_type] = True
57
+ except Exception: # pylint: disable=broad-exception-caught
58
+ _is_pickleable_cache[obj_type] = False
59
+ return _is_pickleable_cache[obj_type]
60
+
61
+
62
+ def load_from_log(log_file: Path):
63
+ """
64
+ Load objects from a log file.
65
+ It reads the file in binary mode and uses pickle to deserialize the objects.
66
+ Returns a list of objects.
67
+ """
68
+ objects = []
69
+ with open(log_file, "rb") as f:
70
+ while True:
71
+ try:
72
+ obj = pickle.load(f)
73
+ objects.append(obj)
74
+ except EOFError:
75
+ break
76
+ return objects
77
+
78
+
79
+ def safe_deepcopy(obj):
80
+ """
81
+ A safe deepcopy function that handles unpickleable objects.
82
+ It uses 'is_pickleable' to check if the object is serializable and
83
+ performs a shallow copy for unpickleable objects.
84
+ """
85
+ if isinstance(obj, Exception):
86
+ # Wrap exceptions to make them pickleable
87
+ obj = ExceptionWrapper(obj)
88
+
89
+ if is_pickleable(obj):
90
+ return pickle.loads(pickle.dumps(obj)) # Fast path
91
+
92
+ if isinstance(obj, Mapping):
93
+ return {k: safe_deepcopy(v) for k, v in obj.items() if is_pickleable(k)}
94
+
95
+ if isinstance(obj, Sequence) and not isinstance(obj, str):
96
+ return [safe_deepcopy(item) for item in obj]
97
+
98
+ if hasattr(obj, "__dict__"):
99
+ copy_obj = copy(obj)
100
+ for attr, value in vars(obj).items():
101
+ if is_pickleable(value):
102
+ setattr(copy_obj, attr, safe_deepcopy(value))
103
+ else:
104
+ setattr(copy_obj, attr, None) # Remove unpickleable field
105
+ return copy_obj
106
+
107
+ return None # fallback for non-serializable, non-introspectable objects
108
+
109
+
110
+ def save_objects_to_logfile(objects: list, log_dir=LOGS_DIR):
111
+ """Save the objects to a (pickle) log file"""
112
+ with ObjectLogger(log_dir=log_dir) as object_logger:
113
+ for obj in objects:
114
+ object_logger.log(obj)
115
+
116
+
117
+ class ObjectLogger:
118
+ """
119
+ A context manager for logging objects to a file.
120
+ It uses pickle to save the objects and handles exceptions during the save process.
121
+ """
122
+
123
+ def __init__(
124
+ self,
125
+ log_dir=LOGS_DIR,
126
+ verbose: bool = True,
127
+ debug: bool | None = None,
128
+ parent_logger: Union["ObjectLogger", NoneType] = None,
129
+ ):
130
+ self.verbose = verbose
131
+ self.debug = (
132
+ debug if debug is not None else (LOG_LEVEL == logging.DEBUG)
133
+ ) # Use the debug level from the config if not provided
134
+ self.log_dir = log_dir
135
+ self.file = None
136
+ self.parent_logger = parent_logger
137
+ self.init_log_file()
138
+
139
+ def has_parent(self):
140
+ """
141
+ Check if the logger has a parent.
142
+ If it does, it will not create a new log file.
143
+ """
144
+ return self.parent_logger is not None
145
+
146
+ def init_log_file(self):
147
+ """Initialize log file for recording agent operations."""
148
+ if self.has_parent():
149
+ # Do nothing: Delegates to the logger
150
+ return
151
+ # Create log file name
152
+ self.log_dir.mkdir(parents=True, exist_ok=True)
153
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
154
+ self.log_file = self.log_dir / f"agent_run.{timestamp}.pkl"
155
+ logger.info("Logging to %s", self.log_file)
156
+
157
+ def __enter__(self):
158
+ if self.has_parent():
159
+ # Do nothing: Delegates to the logger
160
+ return self
161
+ self.file = open(self.log_file, "ab") # append in binary mode
162
+ return self
163
+
164
+ def log(self, obj):
165
+ """
166
+ Log an object to the file.
167
+ It uses safe_deepcopy to ensure the object is pickleable.
168
+ """
169
+ if self.has_parent():
170
+ # Delegate to the parent logger
171
+ self.parent_logger.log(obj)
172
+ else:
173
+ try:
174
+ if self.debug:
175
+ rich.print(obj, flush=True)
176
+ elif self.verbose:
177
+ print(obj, flush=True)
178
+ obj_to_save = safe_deepcopy(obj)
179
+ pickle.dump(obj_to_save, self.file)
180
+ self.file.flush() # ensure it's written immediately
181
+ except Exception as e: # pylint: disable=broad-exception-caught
182
+ logger.error("Failed to log object: %s", e)
183
+ logger.error(traceback.format_exc())
184
+
185
+ def __exit__(self, exc_type, exc_val, exc_tb):
186
+ if self.has_parent():
187
+ # Do nothing: Delegates to the logger
188
+ pass
189
+ elif self.file:
190
+ self.file.close()
191
+
192
+
193
+ class NullObjectLogger:
194
+ """
195
+ A null logger that does nothing.
196
+ """
197
+
198
+ def __init__(self, **kwargs):
199
+ pass
200
+
201
+ def __enter__(self):
202
+ pass
203
+
204
+ def log(self, obj):
205
+ """Log an object to the configured destination."""
206
+
207
+ def __exit__(self, exc_type, exc_val, exc_tb):
208
+ pass
209
+
210
+
211
+ class PrintObjectLogger:
212
+ """
213
+ Print to stdout
214
+ """
215
+
216
+ def __init__(self, **kwargs):
217
+ pass
218
+
219
+ def __enter__(self):
220
+ pass
221
+
222
+ def log(self, obj):
223
+ """Log an object using rich print for formatted output."""
224
+ rich.print(obj, flush=True)
225
+
226
+ def __exit__(self, exc_type, exc_val, exc_tb):
227
+ pass