waldiez 0.6.0__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of waldiez might be problematic. Click here for more details.

Files changed (188) hide show
  1. waldiez/__init__.py +1 -1
  2. waldiez/_version.py +1 -1
  3. waldiez/cli.py +18 -7
  4. waldiez/cli_extras/jupyter.py +3 -0
  5. waldiez/cli_extras/runner.py +3 -1
  6. waldiez/cli_extras/studio.py +3 -1
  7. waldiez/exporter.py +9 -3
  8. waldiez/exporting/agent/exporter.py +9 -10
  9. waldiez/exporting/agent/extras/captain_agent_extras.py +6 -6
  10. waldiez/exporting/agent/extras/doc_agent_extras.py +6 -6
  11. waldiez/exporting/agent/extras/group_manager_agent_extas.py +34 -23
  12. waldiez/exporting/agent/extras/group_member_extras.py +6 -5
  13. waldiez/exporting/agent/extras/handoffs/after_work.py +1 -1
  14. waldiez/exporting/agent/extras/handoffs/available.py +1 -1
  15. waldiez/exporting/agent/extras/handoffs/condition.py +3 -2
  16. waldiez/exporting/agent/extras/handoffs/handoff.py +1 -1
  17. waldiez/exporting/agent/extras/handoffs/target.py +6 -4
  18. waldiez/exporting/agent/extras/rag/chroma_extras.py +27 -19
  19. waldiez/exporting/agent/extras/rag/mongo_extras.py +8 -8
  20. waldiez/exporting/agent/extras/rag/pgvector_extras.py +5 -5
  21. waldiez/exporting/agent/extras/rag/qdrant_extras.py +5 -4
  22. waldiez/exporting/agent/extras/rag/vector_db_extras.py +1 -1
  23. waldiez/exporting/agent/extras/rag_user_proxy_agent_extras.py +5 -7
  24. waldiez/exporting/agent/extras/reasoning_agent_extras.py +3 -5
  25. waldiez/exporting/chats/exporter.py +4 -4
  26. waldiez/exporting/chats/processor.py +1 -2
  27. waldiez/exporting/chats/utils/common.py +89 -48
  28. waldiez/exporting/chats/utils/group.py +9 -9
  29. waldiez/exporting/chats/utils/nested.py +7 -7
  30. waldiez/exporting/chats/utils/sequential.py +1 -1
  31. waldiez/exporting/chats/utils/single.py +2 -2
  32. waldiez/exporting/core/content.py +7 -7
  33. waldiez/exporting/core/context.py +5 -3
  34. waldiez/exporting/core/exporter.py +5 -3
  35. waldiez/exporting/core/exporters.py +2 -2
  36. waldiez/exporting/core/extras/agent_extras/captain_extras.py +2 -2
  37. waldiez/exporting/core/extras/agent_extras/group_manager_extras.py +2 -2
  38. waldiez/exporting/core/extras/agent_extras/rag_user_extras.py +2 -2
  39. waldiez/exporting/core/extras/agent_extras/standard_extras.py +3 -8
  40. waldiez/exporting/core/extras/base.py +7 -5
  41. waldiez/exporting/core/extras/flow_extras.py +4 -5
  42. waldiez/exporting/core/extras/model_extras.py +2 -2
  43. waldiez/exporting/core/extras/path_resolver.py +1 -2
  44. waldiez/exporting/core/extras/serializer.py +2 -2
  45. waldiez/exporting/core/protocols.py +6 -5
  46. waldiez/exporting/core/result.py +25 -28
  47. waldiez/exporting/core/types.py +10 -10
  48. waldiez/exporting/core/utils/llm_config.py +2 -2
  49. waldiez/exporting/core/validation.py +10 -11
  50. waldiez/exporting/flow/execution_generator.py +98 -10
  51. waldiez/exporting/flow/exporter.py +2 -2
  52. waldiez/exporting/flow/factory.py +2 -2
  53. waldiez/exporting/flow/file_generator.py +4 -2
  54. waldiez/exporting/flow/merger.py +5 -3
  55. waldiez/exporting/flow/orchestrator.py +72 -2
  56. waldiez/exporting/flow/utils/common.py +5 -5
  57. waldiez/exporting/flow/utils/importing.py +6 -7
  58. waldiez/exporting/flow/utils/linting.py +25 -9
  59. waldiez/exporting/flow/utils/logging.py +2 -2
  60. waldiez/exporting/models/exporter.py +8 -8
  61. waldiez/exporting/models/processor.py +5 -5
  62. waldiez/exporting/tools/exporter.py +2 -2
  63. waldiez/exporting/tools/processor.py +7 -4
  64. waldiez/io/__init__.py +8 -4
  65. waldiez/io/_ws.py +10 -6
  66. waldiez/io/models/constants.py +10 -10
  67. waldiez/io/models/content/audio.py +1 -0
  68. waldiez/io/models/content/base.py +20 -18
  69. waldiez/io/models/content/file.py +1 -0
  70. waldiez/io/models/content/image.py +1 -0
  71. waldiez/io/models/content/text.py +1 -0
  72. waldiez/io/models/content/video.py +1 -0
  73. waldiez/io/models/user_input.py +10 -5
  74. waldiez/io/models/user_response.py +17 -16
  75. waldiez/io/mqtt.py +18 -31
  76. waldiez/io/redis.py +18 -22
  77. waldiez/io/structured.py +52 -53
  78. waldiez/io/utils.py +3 -0
  79. waldiez/io/ws.py +5 -1
  80. waldiez/logger.py +16 -3
  81. waldiez/models/agents/__init__.py +3 -0
  82. waldiez/models/agents/agent/agent.py +23 -16
  83. waldiez/models/agents/agent/agent_data.py +25 -22
  84. waldiez/models/agents/agent/code_execution.py +9 -11
  85. waldiez/models/agents/agent/termination_message.py +10 -12
  86. waldiez/models/agents/agent/update_system_message.py +2 -4
  87. waldiez/models/agents/agents.py +8 -8
  88. waldiez/models/agents/assistant/assistant.py +6 -3
  89. waldiez/models/agents/assistant/assistant_data.py +2 -2
  90. waldiez/models/agents/captain/captain_agent.py +7 -4
  91. waldiez/models/agents/captain/captain_agent_data.py +5 -7
  92. waldiez/models/agents/doc_agent/doc_agent.py +7 -4
  93. waldiez/models/agents/doc_agent/doc_agent_data.py +9 -10
  94. waldiez/models/agents/doc_agent/rag_query_engine.py +10 -12
  95. waldiez/models/agents/extra_requirements.py +3 -3
  96. waldiez/models/agents/group_manager/group_manager.py +12 -7
  97. waldiez/models/agents/group_manager/group_manager_data.py +13 -12
  98. waldiez/models/agents/group_manager/speakers.py +17 -19
  99. waldiez/models/agents/rag_user_proxy/rag_user_proxy.py +7 -4
  100. waldiez/models/agents/rag_user_proxy/rag_user_proxy_data.py +4 -1
  101. waldiez/models/agents/rag_user_proxy/retrieve_config.py +69 -63
  102. waldiez/models/agents/rag_user_proxy/vector_db_config.py +19 -19
  103. waldiez/models/agents/reasoning/reasoning_agent.py +7 -4
  104. waldiez/models/agents/reasoning/reasoning_agent_data.py +3 -2
  105. waldiez/models/agents/reasoning/reasoning_agent_reason_config.py +8 -8
  106. waldiez/models/agents/user_proxy/user_proxy.py +6 -3
  107. waldiez/models/agents/user_proxy/user_proxy_data.py +1 -1
  108. waldiez/models/chat/chat.py +27 -20
  109. waldiez/models/chat/chat_data.py +22 -19
  110. waldiez/models/chat/chat_message.py +9 -9
  111. waldiez/models/chat/chat_nested.py +9 -9
  112. waldiez/models/chat/chat_summary.py +6 -6
  113. waldiez/models/common/__init__.py +2 -0
  114. waldiez/models/common/ag2_version.py +2 -0
  115. waldiez/models/common/dict_utils.py +8 -6
  116. waldiez/models/common/handoff.py +18 -17
  117. waldiez/models/common/method_utils.py +7 -7
  118. waldiez/models/common/naming.py +49 -0
  119. waldiez/models/flow/flow.py +11 -6
  120. waldiez/models/flow/flow_data.py +23 -17
  121. waldiez/models/flow/info.py +3 -3
  122. waldiez/models/flow/naming.py +2 -1
  123. waldiez/models/model/_aws.py +11 -13
  124. waldiez/models/model/_llm.py +5 -0
  125. waldiez/models/model/_price.py +2 -4
  126. waldiez/models/model/extra_requirements.py +1 -3
  127. waldiez/models/model/model.py +2 -2
  128. waldiez/models/model/model_data.py +21 -21
  129. waldiez/models/tool/extra_requirements.py +2 -4
  130. waldiez/models/tool/predefined/_duckduckgo.py +1 -0
  131. waldiez/models/tool/predefined/_email.py +1 -0
  132. waldiez/models/tool/predefined/_google.py +1 -0
  133. waldiez/models/tool/predefined/_perplexity.py +1 -0
  134. waldiez/models/tool/predefined/_searxng.py +1 -0
  135. waldiez/models/tool/predefined/_tavily.py +1 -0
  136. waldiez/models/tool/predefined/_wikipedia.py +1 -0
  137. waldiez/models/tool/predefined/_youtube.py +1 -0
  138. waldiez/models/tool/tool.py +8 -5
  139. waldiez/models/tool/tool_data.py +2 -2
  140. waldiez/models/waldiez.py +152 -4
  141. waldiez/runner.py +11 -5
  142. waldiez/running/async_utils.py +192 -0
  143. waldiez/running/base_runner.py +117 -264
  144. waldiez/running/dir_utils.py +52 -0
  145. waldiez/running/environment.py +10 -44
  146. waldiez/running/events_mixin.py +252 -0
  147. waldiez/running/exceptions.py +20 -0
  148. waldiez/running/gen_seq_diagram.py +18 -15
  149. waldiez/running/io_utils.py +216 -0
  150. waldiez/running/protocol.py +11 -5
  151. waldiez/running/requirements_mixin.py +65 -0
  152. waldiez/running/results_mixin.py +926 -0
  153. waldiez/running/standard_runner.py +22 -25
  154. waldiez/running/step_by_step/breakpoints_mixin.py +192 -60
  155. waldiez/running/step_by_step/command_handler.py +3 -0
  156. waldiez/running/step_by_step/events_processor.py +194 -14
  157. waldiez/running/step_by_step/step_by_step_models.py +110 -43
  158. waldiez/running/step_by_step/step_by_step_runner.py +107 -57
  159. waldiez/running/subprocess_runner/__base__.py +9 -1
  160. waldiez/running/subprocess_runner/_async_runner.py +5 -3
  161. waldiez/running/subprocess_runner/_sync_runner.py +6 -2
  162. waldiez/running/subprocess_runner/runner.py +39 -23
  163. waldiez/running/timeline_processor.py +1 -1
  164. waldiez/utils/__init__.py +2 -0
  165. waldiez/utils/conflict_checker.py +4 -4
  166. waldiez/utils/python_manager.py +415 -0
  167. waldiez/ws/_file_handler.py +18 -18
  168. waldiez/ws/_mock.py +2 -1
  169. waldiez/ws/cli.py +36 -12
  170. waldiez/ws/client_manager.py +35 -27
  171. waldiez/ws/errors.py +3 -0
  172. waldiez/ws/models.py +43 -52
  173. waldiez/ws/reloader.py +12 -4
  174. waldiez/ws/server.py +85 -55
  175. waldiez/ws/session_manager.py +8 -9
  176. waldiez/ws/session_stats.py +1 -1
  177. waldiez/ws/utils.py +4 -1
  178. {waldiez-0.6.0.dist-info → waldiez-0.6.1.dist-info}/METADATA +82 -93
  179. waldiez-0.6.1.dist-info/RECORD +254 -0
  180. waldiez/running/post_run.py +0 -186
  181. waldiez/running/pre_run.py +0 -281
  182. waldiez/running/run_results.py +0 -14
  183. waldiez/running/utils.py +0 -625
  184. waldiez-0.6.0.dist-info/RECORD +0 -251
  185. {waldiez-0.6.0.dist-info → waldiez-0.6.1.dist-info}/WHEEL +0 -0
  186. {waldiez-0.6.0.dist-info → waldiez-0.6.1.dist-info}/entry_points.txt +0 -0
  187. {waldiez-0.6.0.dist-info → waldiez-0.6.1.dist-info}/licenses/LICENSE +0 -0
  188. {waldiez-0.6.0.dist-info → waldiez-0.6.1.dist-info}/licenses/NOTICE.md +0 -0
@@ -0,0 +1,926 @@
1
+ # SPDX-License-Identifier: Apache-2.0.
2
+ # Copyright (c) 2024 - 2025 Waldiez and contributors.
3
+
4
+ # pylint: disable=broad-exception-caught,too-many-try-statements,unused-argument
5
+ # pyright: reportUnknownVariableType=false, reportUnknownMemberType=false
6
+ # pyright: reportUnknownArgumentType=false, reportUnusedParameter=false
7
+
8
+ """Waldiez run results module."""
9
+
10
+ import csv
11
+ import datetime
12
+ import json
13
+ import shutil
14
+ import sqlite3
15
+ from pathlib import Path
16
+ from typing import Any, TypedDict
17
+
18
+ import aiofiles
19
+ import anyio.to_thread
20
+
21
+ from .gen_seq_diagram import generate_sequence_diagram
22
+ from .io_utils import get_printer
23
+ from .timeline_processor import TimelineProcessor
24
+
25
+
26
+ class WaldiezRunResults(TypedDict):
27
+ """Results of the Waldiez run."""
28
+
29
+ results: list[dict[str, Any]]
30
+ exception: BaseException | None
31
+ completed: bool
32
+
33
+
34
+ class ResultsMixin:
35
+ """Results related static methods."""
36
+
37
+ # noinspection PyUnusedLocal
38
+ @staticmethod
39
+ def post_run(
40
+ results: list[dict[str, Any]],
41
+ error: BaseException | None,
42
+ temp_dir: Path,
43
+ output_file: str | Path | None,
44
+ flow_name: str,
45
+ waldiez_file: Path,
46
+ uploads_root: Path | None = None,
47
+ skip_mmd: bool = False,
48
+ skip_timeline: bool = False,
49
+ ) -> Path | None:
50
+ """Actions to perform after running the flow.
51
+
52
+ Parameters
53
+ ----------
54
+ results : list[dict[str, Any]]
55
+ The results of the flow run.
56
+ error : BaseException | None
57
+ Optional error during the run.
58
+ temp_dir : Path
59
+ The temporary directory.
60
+ output_file : str | Path | None, optional
61
+ The output file.
62
+ flow_name : str
63
+ The flow name.
64
+ waldiez_file : Path
65
+ The path of the waldiez file used (or dumped) for the run.
66
+ uploads_root : Path | None, optional
67
+ The runtime uploads root, by default None
68
+ skip_mmd : bool, optional
69
+ Whether to skip the mermaid sequence diagram generation,
70
+ by default, False
71
+ skip_timeline : bool, optional
72
+ Whether to skip the timeline processing, by default False
73
+
74
+ Returns
75
+ -------
76
+ Path | None
77
+ The destination directory if output file, else None
78
+ """
79
+ if isinstance(output_file, str):
80
+ output_file = Path(output_file)
81
+ mmd_dir = output_file.parent if output_file else Path.cwd()
82
+ ResultsMixin.ensure_db_outputs(temp_dir)
83
+ if error is not None:
84
+ ResultsMixin.ensure_error_json(temp_dir, error)
85
+ else:
86
+ ResultsMixin.ensure_results_json(temp_dir, results)
87
+ if skip_mmd is False:
88
+ _make_mermaid_diagram(
89
+ temp_dir=temp_dir,
90
+ output_file=output_file,
91
+ flow_name=flow_name,
92
+ mmd_dir=mmd_dir,
93
+ )
94
+ if skip_timeline is False: # pragma: no branch
95
+ _make_timeline_json(temp_dir)
96
+ if output_file:
97
+ destination_dir = output_file.parent
98
+ destination_dir = (
99
+ destination_dir
100
+ / "waldiez_out"
101
+ / datetime.datetime.now().strftime("%Y%m%d%H%M%S")
102
+ )
103
+ destination_dir.mkdir(parents=True, exist_ok=True)
104
+ # copy the contents of the temp dir to the destination dir
105
+ print(f"Copying the results to {destination_dir}")
106
+ _copy_results(
107
+ temp_dir=temp_dir,
108
+ output_file=output_file,
109
+ destination_dir=destination_dir,
110
+ )
111
+ dst_waldiez = destination_dir / waldiez_file.name
112
+ if not dst_waldiez.exists() and waldiez_file.is_file():
113
+ shutil.copyfile(waldiez_file, dst_waldiez)
114
+ return destination_dir
115
+ shutil.rmtree(temp_dir)
116
+ return None
117
+
118
+ @staticmethod
119
+ async def a_post_run(
120
+ results: list[dict[str, Any]],
121
+ error: BaseException | None,
122
+ temp_dir: Path,
123
+ output_file: str | Path | None,
124
+ flow_name: str,
125
+ waldiez_file: Path,
126
+ uploads_root: Path | None = None,
127
+ skip_mmd: bool = False,
128
+ skip_timeline: bool = False,
129
+ ) -> Path | None:
130
+ """Actions to perform after running the flow.
131
+
132
+ Parameters
133
+ ----------
134
+ results : list[dict[str, Any]]
135
+ The results of the flow run.
136
+ error : BaseException | None
137
+ Optional error during the run.
138
+ temp_dir : Path
139
+ The temporary directory.
140
+ output_file : output_file : str | Path | None, optional
141
+ The output file.
142
+ flow_name : str
143
+ The flow name.
144
+ waldiez_file : Path
145
+ The path of the waldiez file used (or dumped) for the run.
146
+ uploads_root : Path | None, optional
147
+ The runtime uploads root, by default None
148
+ skip_mmd : bool, optional
149
+ Whether to skip the mermaid sequence diagram generation,
150
+ by default, False
151
+ skip_timeline : bool, optional
152
+ Whether to skip the timeline processing, by default False
153
+
154
+ Returns
155
+ -------
156
+ Path | None
157
+ The destination directory if output file, else None
158
+ """
159
+ return await anyio.to_thread.run_sync(
160
+ ResultsMixin.post_run,
161
+ results,
162
+ error,
163
+ temp_dir,
164
+ output_file,
165
+ flow_name,
166
+ waldiez_file,
167
+ uploads_root,
168
+ skip_mmd,
169
+ skip_timeline,
170
+ )
171
+
172
+ @staticmethod
173
+ def ensure_results_json(
174
+ output_dir: Path,
175
+ results: list[dict[str, Any]],
176
+ ) -> None:
177
+ """Ensure results.json exists in the output.
178
+
179
+ Parameters
180
+ ----------
181
+ output_dir : Path
182
+ The directory with the outputs.
183
+ results : list[dict[str, Any]]
184
+ The returned results.
185
+ """
186
+ from_json = _get_results_from_json(output_dir)
187
+ if from_json:
188
+ _store_full_results(output_dir)
189
+ return
190
+ _remove_results_json(output_dir)
191
+ results_json = output_dir / "results.json"
192
+ try:
193
+ with open(
194
+ results_json, "w", encoding="utf-8", newline="\n"
195
+ ) as file:
196
+ file.write(json.dumps({"results": results}))
197
+ except BaseException:
198
+ return
199
+ _store_full_results(output_dir)
200
+
201
+ @staticmethod
202
+ async def a_ensure_results_json(
203
+ output_dir: Path, results: list[dict[str, Any]]
204
+ ) -> None:
205
+ """Ensure results.json exists in the output.
206
+
207
+ Parameters
208
+ ----------
209
+ output_dir : Path
210
+ The directory with the outputs.
211
+ results : list[dict[str, Any]]
212
+ The returned results.
213
+ """
214
+ from_json = await _a_get_results_from_json(output_dir)
215
+ if from_json:
216
+ await _a_store_full_results(output_dir)
217
+ return
218
+ _remove_results_json(output_dir)
219
+ results_json = output_dir / "results.json"
220
+ try:
221
+ async with aiofiles.open(
222
+ results_json, "w", encoding="utf-8", newline="\n"
223
+ ) as file:
224
+ await file.write(json.dumps({"results": results}))
225
+ except BaseException:
226
+ return
227
+ await _a_store_full_results(output_dir)
228
+
229
+ @staticmethod
230
+ def ensure_error_json(output_dir: Path, error: BaseException) -> None:
231
+ """Ensure an error.json exists in the output.
232
+
233
+ Parameters
234
+ ----------
235
+ output_dir : Path
236
+ The path of the output
237
+ error : BaseException
238
+ The error that happened.
239
+ """
240
+ _ensure_error_json(output_dir, error)
241
+
242
+ @staticmethod
243
+ def get_results(
244
+ results: list[dict[str, Any]],
245
+ output_dir: Path,
246
+ ) -> list[dict[str, Any]]:
247
+ """Gather the results.
248
+
249
+ Parameters
250
+ ----------
251
+ results : list[dict[str, Any]]
252
+ The returned results from the module call.
253
+ output_dir : Path
254
+ The output directory to look for results.json
255
+
256
+ Returns
257
+ -------
258
+ list[dict[str, Any]]
259
+ The final results.
260
+ """
261
+ if (output_dir / "results.json").exists():
262
+ return ResultsMixin.read_from_output(output_dir)
263
+ if (output_dir / "error.json").exists():
264
+ return ResultsMixin.read_results_error(output_dir / "error.json")
265
+ return results
266
+
267
+ @staticmethod
268
+ async def a_get_results(
269
+ results: list[dict[str, Any]],
270
+ output_dir: Path,
271
+ ) -> list[dict[str, Any]]:
272
+ """Gather the results.
273
+
274
+ Parameters
275
+ ----------
276
+ results : list[dict[str, Any]]
277
+ The returned results from the module call.
278
+ output_dir : Path
279
+ The output directory to look for results.json
280
+
281
+ Returns
282
+ -------
283
+ list[dict[str, Any]]
284
+ The final results.
285
+ """
286
+ if (output_dir / "results.json").exists():
287
+ return await ResultsMixin.a_read_from_output(output_dir)
288
+ if (output_dir / "error.json").exists():
289
+ return await ResultsMixin.a_read_results_error(
290
+ output_dir / "error.json"
291
+ )
292
+ return results
293
+
294
+ @staticmethod
295
+ async def a_read_from_output(
296
+ output_dir: Path,
297
+ ) -> list[dict[str, Any]]:
298
+ """Read from output dir results.json or error.json.
299
+
300
+ Parameters
301
+ ----------
302
+ output_dir : Path
303
+ The output directory to check for results.json or error.json
304
+
305
+ Return
306
+ ------
307
+ list[dict[str, Any]]
308
+ The parsed results.
309
+ """
310
+ error_json = output_dir / "error.json"
311
+ results_json = output_dir / "results.json"
312
+ try:
313
+ if results_json.is_file():
314
+ async with aiofiles.open(
315
+ results_json, "r", encoding="utf-8"
316
+ ) as file:
317
+ results = await file.read()
318
+ return json.loads(results).get("results", [])
319
+ if error_json.is_file():
320
+ async with aiofiles.open(
321
+ error_json, "r", encoding="utf-8"
322
+ ) as file:
323
+ results = await file.read()
324
+ reason = json.loads(results).get("error", "Flow failed")
325
+ return [{"error": reason}]
326
+ except BaseException as e:
327
+ return [{"error": str(e)}]
328
+ return [{"error": "Could not gather result details."}]
329
+
330
+ @staticmethod
331
+ def read_from_output(
332
+ output_dir: Path,
333
+ ) -> list[dict[str, Any]]:
334
+ """Read from output dir results.json or error.json.
335
+
336
+ Parameters
337
+ ----------
338
+ output_dir : Path
339
+ The output directory to check for results.json or error.json
340
+
341
+ Return
342
+ ------
343
+ list[dict[str, Any]]
344
+ The parsed results.
345
+ """
346
+ error_json = output_dir / "error.json"
347
+ results_json = output_dir / "results.json"
348
+ try:
349
+ if results_json.is_file():
350
+ with open(results_json, "r", encoding="utf-8") as file:
351
+ results = file.read()
352
+ return json.loads(results).get("results", [])
353
+ if error_json.is_file():
354
+ with open(error_json, "r", encoding="utf-8") as file:
355
+ results = file.read()
356
+ reason = json.loads(results).get("error", "Flow failed")
357
+ return [{"error": reason}]
358
+ except BaseException as e:
359
+ return [{"error": str(e)}]
360
+ return [{"error": "Could not gather result details."}]
361
+
362
+ @staticmethod
363
+ def read_results_error(error_json: Path) -> list[dict[str, Any]]:
364
+ """Read the error from error.json.
365
+
366
+ Parameters
367
+ ----------
368
+ error_json : Path
369
+ The path of error.json
370
+
371
+ Returns
372
+ -------
373
+ list[dict[str, Any]]
374
+ The parsed error details.
375
+ """
376
+ if not error_json.is_file(): # pragma: no cover
377
+ return [{"error": "No results generated"}]
378
+ try:
379
+ with open(error_json, "r", encoding="utf-8") as error_file:
380
+ error_content = error_file.read()
381
+ error_details = json.loads(error_content)
382
+ if isinstance(error_details, dict):
383
+ return [error_details]
384
+ if isinstance(error_details, list):
385
+ return error_details
386
+ except BaseException as error:
387
+ return [{"error": str(error)}]
388
+ return [{"error": "Failed to get error details"}]
389
+
390
+ @staticmethod
391
+ async def a_read_results_error(error_json: Path) -> list[dict[str, Any]]:
392
+ """Read the error from error.json.
393
+
394
+ Parameters
395
+ ----------
396
+ error_json : Path
397
+ The path of error.json
398
+
399
+ Returns
400
+ -------
401
+ list[dict[str, Any]]
402
+ The parsed error details.
403
+ """
404
+ if not error_json.is_file(): # pragma: no cover
405
+ return [{"error": "No results generated"}]
406
+ try:
407
+ async with aiofiles.open(
408
+ error_json, "r", encoding="utf-8"
409
+ ) as error_file:
410
+ error_content = await error_file.read()
411
+ error_details = json.loads(error_content)
412
+ if isinstance(error_details, dict):
413
+ return [error_details]
414
+ if isinstance(error_details, list):
415
+ return error_details
416
+ except BaseException as error:
417
+ return [{"error": str(error)}]
418
+ return [{"error": "Failed to get error details"}]
419
+
420
+ @staticmethod
421
+ def ensure_db_outputs(output_dir: Path) -> None:
422
+ """Ensure the csv and json files are generated if a flow.db exists.
423
+
424
+ Parameters
425
+ ----------
426
+ output_dir : Path
427
+ The output directory.
428
+ """
429
+ flow_db = output_dir / "flow.db"
430
+ if not flow_db.is_file():
431
+ return
432
+ tables = [
433
+ "chat_completions",
434
+ "agents",
435
+ "oai_wrappers",
436
+ "oai_clients",
437
+ "version",
438
+ "events",
439
+ "function_calls",
440
+ ]
441
+ dest = output_dir / "logs"
442
+ dest.mkdir(parents=True, exist_ok=True)
443
+ for table in tables:
444
+ table_csv = dest / f"{table}.csv"
445
+ table_json = dest / f"{table}.json"
446
+ if not table_csv.exists() or not table_json.exists():
447
+ _get_sqlite_out(str(flow_db), table, str(table_csv))
448
+
449
+
450
+ # noinspection PyBroadException
451
+ def _make_mermaid_diagram(
452
+ temp_dir: Path,
453
+ output_file: str | Path | None,
454
+ flow_name: str,
455
+ mmd_dir: Path,
456
+ ) -> None:
457
+ events_csv_path = temp_dir / "logs" / "events.csv"
458
+ if events_csv_path.exists():
459
+ print("Generating mermaid sequence diagram...")
460
+ mmd_path = temp_dir / f"{flow_name}.mmd"
461
+ generate_sequence_diagram(events_csv_path, mmd_path)
462
+ if (
463
+ not output_file
464
+ and mmd_path.exists()
465
+ and mmd_path != mmd_dir / f"{flow_name}.mmd"
466
+ ):
467
+ try:
468
+ shutil.copyfile(mmd_path, mmd_dir / f"{flow_name}.mmd")
469
+ except BaseException:
470
+ pass
471
+
472
+
473
+ # noinspection PyBroadException
474
+ def _make_timeline_json(
475
+ output_dir: Path,
476
+ ) -> None:
477
+ """Make the timeline JSON file."""
478
+ events_csv_path = output_dir / "logs" / "events.csv"
479
+ if events_csv_path.exists():
480
+ log_files = TimelineProcessor.get_files(output_dir / "logs")
481
+ if any(log_files.values()): # pragma: no branch
482
+ output_file = output_dir / "timeline.json"
483
+ # pylint: disable=too-many-try-statements
484
+ try:
485
+ processor = TimelineProcessor()
486
+ processor.load_csv_files(
487
+ agents_file=log_files["agents"],
488
+ chat_file=log_files["chat"],
489
+ events_file=log_files["events"],
490
+ functions_file=log_files["functions"],
491
+ )
492
+ results = processor.process_timeline()
493
+ with open(
494
+ output_file, "w", encoding="utf-8", newline="\n"
495
+ ) as f:
496
+ json.dump(results, f, indent=2, default=str)
497
+ short_results = TimelineProcessor.get_short_results(results)
498
+ printer = get_printer()
499
+ printer(
500
+ json.dumps(
501
+ {"type": "timeline", "content": short_results},
502
+ default=str,
503
+ ),
504
+ flush=True,
505
+ )
506
+ except BaseException:
507
+ pass
508
+
509
+
510
+ def _copy_results(
511
+ temp_dir: Path,
512
+ output_file: Path,
513
+ destination_dir: Path,
514
+ ) -> None:
515
+ """Copy the results to the output directory."""
516
+ temp_dir.mkdir(parents=True, exist_ok=True)
517
+ output_dir = output_file.parent
518
+ for item in temp_dir.iterdir():
519
+ # skip cache files
520
+ if (
521
+ item.name.startswith("__pycache__")
522
+ or item.name.endswith((".pyc", ".pyo", ".pyd"))
523
+ or item.name == ".cache"
524
+ or item.name == ".env"
525
+ ):
526
+ continue
527
+ if item.is_file():
528
+ # let's also copy the "tree of thoughts" image
529
+ # to the output directory
530
+ if item.name.endswith("tree_of_thoughts.png") or item.name.endswith(
531
+ "reasoning_tree.json"
532
+ ):
533
+ shutil.copy(item, output_dir / item.name)
534
+ shutil.copy(item, destination_dir)
535
+ else:
536
+ shutil.copytree(item, destination_dir / item.name)
537
+ if output_file.is_file():
538
+ if output_file.suffix == ".waldiez":
539
+ output_file = output_file.with_suffix(".py")
540
+ if output_file.suffix == ".py": # pragma: no branch
541
+ src = temp_dir / output_file.name
542
+ if src.exists():
543
+ dst = destination_dir / output_file.name
544
+ if dst.exists():
545
+ dst.unlink()
546
+ shutil.copyfile(src, output_dir / output_file.name)
547
+
548
+
549
+ def _get_sqlite_out(dbname: str, table: str, csv_file: str) -> None:
550
+ """Convert a sqlite table to csv and json files.
551
+
552
+ Parameters
553
+ ----------
554
+ dbname : str
555
+ The sqlite database name.
556
+ table : str
557
+ The table name.
558
+ csv_file : str
559
+ The csv file name.
560
+ """
561
+ conn = sqlite3.connect(dbname)
562
+ query = f"SELECT * FROM {table}" # nosec
563
+ try:
564
+ cursor = conn.execute(query)
565
+ except BaseException:
566
+ conn.close()
567
+ return
568
+ rows = cursor.fetchall()
569
+ column_names = [description[0] for description in cursor.description]
570
+ data = [dict(zip(column_names, row, strict=True)) for row in rows]
571
+ conn.close()
572
+ with open(csv_file, "w", newline="", encoding="utf-8") as file:
573
+ csv_writer = csv.DictWriter(file, fieldnames=column_names)
574
+ csv_writer.writeheader()
575
+ csv_writer.writerows(data)
576
+ json_file = csv_file.replace(".csv", ".json")
577
+ with open(json_file, "w", encoding="utf-8", newline="\n") as file:
578
+ json.dump(data, file, indent=4, ensure_ascii=False)
579
+
580
+
581
+ def _calculate_total_cost(
582
+ chat_completions: list[dict[str, Any]],
583
+ ) -> float | None:
584
+ """Calculate total cost from all chat completions."""
585
+ total_cost = 0.0
586
+
587
+ for completion in chat_completions:
588
+ cost = completion.get("cost")
589
+ if cost is not None:
590
+ total_cost += cost
591
+
592
+ return total_cost if total_cost > 0 else None
593
+
594
+
595
+ def _extract_last_context_variables(
596
+ events: list[dict[str, Any]],
597
+ ) -> dict[str, Any] | None:
598
+ """Extract context_variables from the last event that contains them."""
599
+ for event in reversed(events):
600
+ event_type = event.get("type")
601
+ content_data = event.get("content", {})
602
+
603
+ # Check in executed_function events
604
+ if event_type == "executed_function":
605
+ content = content_data.get("content", {})
606
+ context_vars = content.get("context_variables", {})
607
+ if context_vars and "data" in context_vars:
608
+ return context_vars["data"]
609
+
610
+ # Check in run_completion events
611
+ if event_type == "run_completion":
612
+ if "context_variables" in content_data:
613
+ return content_data["context_variables"]
614
+
615
+ return None
616
+
617
+
618
+ def _extract_last_speaker(events: list[dict[str, Any]]) -> str | None:
619
+ """Extract the last speaker from run_completion or last text event."""
620
+ # Look for run_completion events
621
+ for event in reversed(events):
622
+ event_type = event.get("type")
623
+ content_data = event.get("content", {})
624
+
625
+ if event_type == "run_completion":
626
+ if "last_speaker" in content_data:
627
+ return content_data["last_speaker"]
628
+ # Or get from history
629
+ if "history" in content_data:
630
+ history = content_data["history"]
631
+ if history and len(history) > 0:
632
+ last_msg = history[-1]
633
+ if isinstance(last_msg, dict) and "name" in last_msg:
634
+ return str(last_msg["name"])
635
+
636
+ # Fallback: get last text event sender
637
+ for event in reversed(events):
638
+ if event.get("type") == "text":
639
+ content_data = event.get("content", {})
640
+ sender = content_data.get("sender", "")
641
+ if sender and sender != "manager":
642
+ return sender
643
+
644
+ return None
645
+
646
+
647
+ def _extract_messages_from_events(
648
+ events: list[dict[str, Any]],
649
+ ) -> list[dict[str, Any]]:
650
+ """Extract conversation messages from events array.
651
+
652
+ Looks for events with type 'text' or LLM responses
653
+ and builds a message list.
654
+ """
655
+ messages = []
656
+ seen_messages = set()
657
+
658
+ for event in events:
659
+ event_type = event.get("type")
660
+ content_data = event.get("content", {})
661
+
662
+ # Handle text events
663
+ if event_type == "text":
664
+ content = content_data.get("content", "")
665
+ sender = content_data.get("sender", "")
666
+ # recipient = content_data.get("recipient", "")
667
+
668
+ # Skip handoff messages and empty content
669
+ if content.startswith("[Handing off"):
670
+ continue
671
+ if not content or content == "None":
672
+ continue
673
+
674
+ # Create unique key to avoid duplicates
675
+ msg_key = f"{sender}:{content[:50]}"
676
+ if msg_key in seen_messages:
677
+ continue
678
+ seen_messages.add(msg_key)
679
+
680
+ # Determine role
681
+ role = "user" if sender == "user" else "assistant"
682
+
683
+ messages.append({"content": content, "role": role, "name": sender})
684
+
685
+ return messages
686
+
687
+
688
+ def _extract_summary_from_events(events: list[dict[str, Any]]) -> str | None:
689
+ """Extract summary from the last meaningful event.
690
+
691
+ Looks for "run_completion" events or the last assistant message.
692
+ """
693
+ # Look for run_completion events
694
+ for event in reversed(events):
695
+ event_type = event.get("type")
696
+
697
+ if event_type == "run_completion":
698
+ content_data = event.get("content", {})
699
+ # The summary might be in the content or history
700
+ if "summary" in content_data:
701
+ return content_data["summary"]
702
+ if "history" in content_data:
703
+ history = content_data["history"]
704
+ if history and len(history) > 0:
705
+ last_msg = history[-1]
706
+ if isinstance(last_msg, dict) and "content" in last_msg:
707
+ return last_msg["content"]
708
+
709
+ # Fallback: get last text message
710
+ for event in reversed(events):
711
+ if event.get("type") == "text":
712
+ content_data = event.get("content", {})
713
+ content = content_data.get("content", "")
714
+ if (
715
+ content
716
+ and not content.startswith("[Handing off")
717
+ and content != "None"
718
+ ):
719
+ return content
720
+
721
+ return None
722
+
723
+
724
+ def _results_are_empty(results: Any) -> bool:
725
+ """Check if the results are empty or not."""
726
+ to_check = results if isinstance(results, list) else [results]
727
+ for item in to_check:
728
+ if not isinstance(item, dict):
729
+ return True
730
+ events = item.get("events", [])
731
+ if isinstance(events, list) and len(events) > 0:
732
+ return False
733
+ messages = item.get("messages", [])
734
+ if isinstance(messages, list) and len(messages) > 0:
735
+ return False
736
+ return True
737
+
738
+
739
+ async def _a_get_results_from_json(output_dir: Path) -> list[dict[str, Any]]:
740
+ """Get the results dumped in results.json if any."""
741
+ results_json = output_dir / "results.json"
742
+ if not results_json.is_file():
743
+ return []
744
+ try:
745
+ async with aiofiles.open(results_json, "r", encoding="utf-8") as file:
746
+ file_data = await file.read()
747
+ data = json.loads(file_data)
748
+ except BaseException:
749
+ return []
750
+ if isinstance(data, dict):
751
+ results = data.get("results", [])
752
+ elif isinstance(data, list):
753
+ results = data
754
+ else:
755
+ return []
756
+ if _results_are_empty(results):
757
+ return []
758
+ return results
759
+
760
+
761
+ def _get_results_from_json(output_dir: Path) -> list[dict[str, Any]]:
762
+ """Get the results dumped in results.json if any."""
763
+ results_json = output_dir / "results.json"
764
+ if not results_json.is_file():
765
+ return []
766
+ try:
767
+ with open(results_json, "r", encoding="utf-8") as file:
768
+ data = json.loads(file.read())
769
+ except BaseException:
770
+ return []
771
+ if isinstance(data, dict):
772
+ results = data.get("results", [])
773
+ elif isinstance(data, list):
774
+ results = data
775
+ else:
776
+ return []
777
+ if _results_are_empty(results):
778
+ return []
779
+ return results
780
+
781
+
782
+ def _remove_results_json(output_dir: Path) -> None:
783
+ results_json = output_dir / "results.json"
784
+ if results_json.exists():
785
+ try:
786
+ results_json.unlink(missing_ok=True)
787
+ except BaseException:
788
+ pass
789
+
790
+
791
+ def _fill_results_from_logs(run_dir: Path) -> dict[str, list[dict[str, Any]]]:
792
+ """Fill missing fields in results.json from log files.
793
+
794
+ For each result entry:
795
+ - If no messages: get from events array by parsing msgs from chat history
796
+ - If no summary: get from last "run_completion" event
797
+ - If no cost: get from logs/chat_completions.json
798
+ - If no context_variables: get from LAST event that has context_variables
799
+ - If no last_speaker: get from "run_completion" event
800
+
801
+ Parameters
802
+ ----------
803
+ run_dir : Path
804
+ Path to the run directory
805
+
806
+ Returns
807
+ -------
808
+ dict[str, list[dict[str, Any]]]
809
+ Updated results dictionary with filled fields
810
+ """
811
+ run_path = Path(run_dir)
812
+ results_path = run_path / "results.json"
813
+ logs_path = run_path / "logs"
814
+ chat_completions_path = logs_path / "chat_completions.json"
815
+
816
+ # Load results.json
817
+ with open(results_path, "r", encoding="utf-8") as f:
818
+ results_data = json.load(f)
819
+
820
+ # Load chat_completions for cost data
821
+ chat_completions = []
822
+ if chat_completions_path.exists():
823
+ with open(chat_completions_path, "r", encoding="utf-8") as f:
824
+ chat_completions = json.load(f)
825
+
826
+ # Process each result
827
+ for result in results_data.get("results", []):
828
+ events = result.get("events", [])
829
+
830
+ # Fill messages if empty
831
+ if not result.get("messages"):
832
+ result["messages"] = _extract_messages_from_events(events)
833
+
834
+ # Fill summary if empty
835
+ if not result.get("summary"):
836
+ result["summary"] = _extract_summary_from_events(events)
837
+
838
+ # Fill cost if empty/null
839
+ if result.get("cost") is None:
840
+ result["cost"] = _calculate_total_cost(chat_completions)
841
+
842
+ # Fill context_variables if empty/null
843
+ if result.get("context_variables") is None:
844
+ result["context_variables"] = _extract_last_context_variables(
845
+ events
846
+ )
847
+
848
+ # Fill last_speaker if empty/null
849
+ if result.get("last_speaker") is None:
850
+ result["last_speaker"] = _extract_last_speaker(events)
851
+
852
+ return results_data
853
+
854
+
855
+ def _ensure_error_json(output_dir: Path, error: BaseException) -> None:
856
+ existing = output_dir / "error.json"
857
+ if not existing.exists():
858
+ with open(existing, "w", encoding="utf-8", newline="\n") as file:
859
+ file.write(json.dumps({"error": str(error)}))
860
+ _remove_results_json(output_dir)
861
+
862
+
863
+ async def _a_ensure_error_json(output_dir: Path, error: BaseException) -> None:
864
+ existing = output_dir / "error.json"
865
+ if not existing.exists():
866
+ async with aiofiles.open(
867
+ existing, "w", encoding="utf-8", newline="\n"
868
+ ) as file:
869
+ await file.write(json.dumps({"error": str(error)}))
870
+ _remove_results_json(output_dir)
871
+
872
+
873
+ def _store_full_results(
874
+ output_dir: Path,
875
+ ) -> None:
876
+ results_json = output_dir / "results.json"
877
+ if results_json.exists():
878
+ try:
879
+ with open(
880
+ results_json, "r", encoding="utf-8", newline="\n"
881
+ ) as file:
882
+ results_data = json.loads(file.read())
883
+ results_list = results_data.get("results", [])
884
+ except BaseException as error:
885
+ _ensure_error_json(output_dir, error)
886
+ return
887
+ if not isinstance(results_list, list) or not results_list:
888
+ _ensure_error_json(output_dir, RuntimeError("No results generated"))
889
+ return
890
+ try:
891
+ filled = _fill_results_from_logs(output_dir)
892
+ with open(
893
+ results_json, "w", encoding="utf-8", newline="\n"
894
+ ) as file:
895
+ file.write(json.dumps(filled))
896
+ except BaseException:
897
+ pass
898
+
899
+
900
+ async def _a_store_full_results(
901
+ output_dir: Path,
902
+ ) -> None:
903
+ results_json = output_dir / "results.json"
904
+ if results_json.exists():
905
+ try:
906
+ async with aiofiles.open(
907
+ results_json, "r", encoding="utf-8", newline="\n"
908
+ ) as file:
909
+ results_data = json.loads(await file.read())
910
+ results_list = results_data.get("results", [])
911
+ except BaseException as error:
912
+ await _a_ensure_error_json(output_dir, error)
913
+ return
914
+ if not isinstance(results_list, list) or not results_list:
915
+ await _a_ensure_error_json(
916
+ output_dir, RuntimeError("No results generated")
917
+ )
918
+ return
919
+ try:
920
+ filled = _fill_results_from_logs(output_dir)
921
+ async with aiofiles.open(
922
+ results_json, "w", encoding="utf-8", newline="\n"
923
+ ) as file:
924
+ await file.write(json.dumps(filled))
925
+ except BaseException:
926
+ pass