ai-parrot 0.17.2__cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (535) hide show
  1. agentui/.prettierrc +15 -0
  2. agentui/QUICKSTART.md +272 -0
  3. agentui/README.md +59 -0
  4. agentui/env.example +16 -0
  5. agentui/jsconfig.json +14 -0
  6. agentui/package-lock.json +4242 -0
  7. agentui/package.json +34 -0
  8. agentui/scripts/postinstall/apply-patches.mjs +260 -0
  9. agentui/src/app.css +61 -0
  10. agentui/src/app.d.ts +13 -0
  11. agentui/src/app.html +12 -0
  12. agentui/src/components/LoadingSpinner.svelte +64 -0
  13. agentui/src/components/ThemeSwitcher.svelte +159 -0
  14. agentui/src/components/index.js +4 -0
  15. agentui/src/lib/api/bots.ts +60 -0
  16. agentui/src/lib/api/chat.ts +22 -0
  17. agentui/src/lib/api/http.ts +25 -0
  18. agentui/src/lib/components/BotCard.svelte +33 -0
  19. agentui/src/lib/components/ChatBubble.svelte +63 -0
  20. agentui/src/lib/components/Toast.svelte +21 -0
  21. agentui/src/lib/config.ts +20 -0
  22. agentui/src/lib/stores/auth.svelte.ts +73 -0
  23. agentui/src/lib/stores/theme.svelte.js +64 -0
  24. agentui/src/lib/stores/toast.svelte.ts +31 -0
  25. agentui/src/lib/utils/conversation.ts +39 -0
  26. agentui/src/routes/+layout.svelte +20 -0
  27. agentui/src/routes/+page.svelte +232 -0
  28. agentui/src/routes/login/+page.svelte +200 -0
  29. agentui/src/routes/talk/[agentId]/+page.svelte +297 -0
  30. agentui/src/routes/talk/[agentId]/+page.ts +7 -0
  31. agentui/static/README.md +1 -0
  32. agentui/svelte.config.js +11 -0
  33. agentui/tailwind.config.ts +53 -0
  34. agentui/tsconfig.json +3 -0
  35. agentui/vite.config.ts +10 -0
  36. ai_parrot-0.17.2.dist-info/METADATA +472 -0
  37. ai_parrot-0.17.2.dist-info/RECORD +535 -0
  38. ai_parrot-0.17.2.dist-info/WHEEL +6 -0
  39. ai_parrot-0.17.2.dist-info/entry_points.txt +2 -0
  40. ai_parrot-0.17.2.dist-info/licenses/LICENSE +21 -0
  41. ai_parrot-0.17.2.dist-info/top_level.txt +6 -0
  42. crew-builder/.prettierrc +15 -0
  43. crew-builder/QUICKSTART.md +259 -0
  44. crew-builder/README.md +113 -0
  45. crew-builder/env.example +17 -0
  46. crew-builder/jsconfig.json +14 -0
  47. crew-builder/package-lock.json +4182 -0
  48. crew-builder/package.json +37 -0
  49. crew-builder/scripts/postinstall/apply-patches.mjs +260 -0
  50. crew-builder/src/app.css +62 -0
  51. crew-builder/src/app.d.ts +13 -0
  52. crew-builder/src/app.html +12 -0
  53. crew-builder/src/components/LoadingSpinner.svelte +64 -0
  54. crew-builder/src/components/ThemeSwitcher.svelte +149 -0
  55. crew-builder/src/components/index.js +9 -0
  56. crew-builder/src/lib/api/bots.ts +60 -0
  57. crew-builder/src/lib/api/chat.ts +80 -0
  58. crew-builder/src/lib/api/client.ts +56 -0
  59. crew-builder/src/lib/api/crew/crew.ts +136 -0
  60. crew-builder/src/lib/api/index.ts +5 -0
  61. crew-builder/src/lib/api/o365/auth.ts +65 -0
  62. crew-builder/src/lib/auth/auth.ts +54 -0
  63. crew-builder/src/lib/components/AgentNode.svelte +43 -0
  64. crew-builder/src/lib/components/BotCard.svelte +33 -0
  65. crew-builder/src/lib/components/ChatBubble.svelte +67 -0
  66. crew-builder/src/lib/components/ConfigPanel.svelte +278 -0
  67. crew-builder/src/lib/components/JsonTreeNode.svelte +76 -0
  68. crew-builder/src/lib/components/JsonViewer.svelte +24 -0
  69. crew-builder/src/lib/components/MarkdownEditor.svelte +48 -0
  70. crew-builder/src/lib/components/ThemeToggle.svelte +36 -0
  71. crew-builder/src/lib/components/Toast.svelte +67 -0
  72. crew-builder/src/lib/components/Toolbar.svelte +157 -0
  73. crew-builder/src/lib/components/index.ts +10 -0
  74. crew-builder/src/lib/config.ts +8 -0
  75. crew-builder/src/lib/stores/auth.svelte.ts +228 -0
  76. crew-builder/src/lib/stores/crewStore.ts +369 -0
  77. crew-builder/src/lib/stores/theme.svelte.js +145 -0
  78. crew-builder/src/lib/stores/toast.svelte.ts +69 -0
  79. crew-builder/src/lib/utils/conversation.ts +39 -0
  80. crew-builder/src/lib/utils/markdown.ts +122 -0
  81. crew-builder/src/lib/utils/talkHistory.ts +47 -0
  82. crew-builder/src/routes/+layout.svelte +20 -0
  83. crew-builder/src/routes/+page.svelte +539 -0
  84. crew-builder/src/routes/agents/+page.svelte +247 -0
  85. crew-builder/src/routes/agents/[agentId]/+page.svelte +288 -0
  86. crew-builder/src/routes/agents/[agentId]/+page.ts +7 -0
  87. crew-builder/src/routes/builder/+page.svelte +204 -0
  88. crew-builder/src/routes/crew/ask/+page.svelte +1052 -0
  89. crew-builder/src/routes/crew/ask/+page.ts +1 -0
  90. crew-builder/src/routes/integrations/o365/+page.svelte +304 -0
  91. crew-builder/src/routes/login/+page.svelte +197 -0
  92. crew-builder/src/routes/talk/[agentId]/+page.svelte +487 -0
  93. crew-builder/src/routes/talk/[agentId]/+page.ts +7 -0
  94. crew-builder/static/README.md +1 -0
  95. crew-builder/svelte.config.js +11 -0
  96. crew-builder/tailwind.config.ts +53 -0
  97. crew-builder/tsconfig.json +3 -0
  98. crew-builder/vite.config.ts +10 -0
  99. mcp_servers/calculator_server.py +309 -0
  100. parrot/__init__.py +27 -0
  101. parrot/__pycache__/__init__.cpython-310.pyc +0 -0
  102. parrot/__pycache__/version.cpython-310.pyc +0 -0
  103. parrot/_version.py +34 -0
  104. parrot/a2a/__init__.py +48 -0
  105. parrot/a2a/client.py +658 -0
  106. parrot/a2a/discovery.py +89 -0
  107. parrot/a2a/mixin.py +257 -0
  108. parrot/a2a/models.py +376 -0
  109. parrot/a2a/server.py +770 -0
  110. parrot/agents/__init__.py +29 -0
  111. parrot/bots/__init__.py +12 -0
  112. parrot/bots/a2a_agent.py +19 -0
  113. parrot/bots/abstract.py +3139 -0
  114. parrot/bots/agent.py +1129 -0
  115. parrot/bots/basic.py +9 -0
  116. parrot/bots/chatbot.py +669 -0
  117. parrot/bots/data.py +1618 -0
  118. parrot/bots/database/__init__.py +5 -0
  119. parrot/bots/database/abstract.py +3071 -0
  120. parrot/bots/database/cache.py +286 -0
  121. parrot/bots/database/models.py +468 -0
  122. parrot/bots/database/prompts.py +154 -0
  123. parrot/bots/database/retries.py +98 -0
  124. parrot/bots/database/router.py +269 -0
  125. parrot/bots/database/sql.py +41 -0
  126. parrot/bots/db/__init__.py +6 -0
  127. parrot/bots/db/abstract.py +556 -0
  128. parrot/bots/db/bigquery.py +602 -0
  129. parrot/bots/db/cache.py +85 -0
  130. parrot/bots/db/documentdb.py +668 -0
  131. parrot/bots/db/elastic.py +1014 -0
  132. parrot/bots/db/influx.py +898 -0
  133. parrot/bots/db/mock.py +96 -0
  134. parrot/bots/db/multi.py +783 -0
  135. parrot/bots/db/prompts.py +185 -0
  136. parrot/bots/db/sql.py +1255 -0
  137. parrot/bots/db/tools.py +212 -0
  138. parrot/bots/document.py +680 -0
  139. parrot/bots/hrbot.py +15 -0
  140. parrot/bots/kb.py +170 -0
  141. parrot/bots/mcp.py +36 -0
  142. parrot/bots/orchestration/README.md +463 -0
  143. parrot/bots/orchestration/__init__.py +1 -0
  144. parrot/bots/orchestration/agent.py +155 -0
  145. parrot/bots/orchestration/crew.py +3330 -0
  146. parrot/bots/orchestration/fsm.py +1179 -0
  147. parrot/bots/orchestration/hr.py +434 -0
  148. parrot/bots/orchestration/storage/__init__.py +4 -0
  149. parrot/bots/orchestration/storage/memory.py +100 -0
  150. parrot/bots/orchestration/storage/mixin.py +119 -0
  151. parrot/bots/orchestration/verify.py +202 -0
  152. parrot/bots/product.py +204 -0
  153. parrot/bots/prompts/__init__.py +96 -0
  154. parrot/bots/prompts/agents.py +155 -0
  155. parrot/bots/prompts/data.py +216 -0
  156. parrot/bots/prompts/output_generation.py +8 -0
  157. parrot/bots/scraper/__init__.py +3 -0
  158. parrot/bots/scraper/models.py +122 -0
  159. parrot/bots/scraper/scraper.py +1173 -0
  160. parrot/bots/scraper/templates.py +115 -0
  161. parrot/bots/stores/__init__.py +5 -0
  162. parrot/bots/stores/local.py +172 -0
  163. parrot/bots/webdev.py +81 -0
  164. parrot/cli.py +17 -0
  165. parrot/clients/__init__.py +16 -0
  166. parrot/clients/base.py +1491 -0
  167. parrot/clients/claude.py +1191 -0
  168. parrot/clients/factory.py +129 -0
  169. parrot/clients/google.py +4567 -0
  170. parrot/clients/gpt.py +1975 -0
  171. parrot/clients/grok.py +432 -0
  172. parrot/clients/groq.py +986 -0
  173. parrot/clients/hf.py +582 -0
  174. parrot/clients/models.py +18 -0
  175. parrot/conf.py +395 -0
  176. parrot/embeddings/__init__.py +9 -0
  177. parrot/embeddings/base.py +157 -0
  178. parrot/embeddings/google.py +98 -0
  179. parrot/embeddings/huggingface.py +74 -0
  180. parrot/embeddings/openai.py +84 -0
  181. parrot/embeddings/processor.py +88 -0
  182. parrot/exceptions.c +13868 -0
  183. parrot/exceptions.cpython-310-x86_64-linux-gnu.so +0 -0
  184. parrot/exceptions.pxd +22 -0
  185. parrot/exceptions.pxi +15 -0
  186. parrot/exceptions.pyx +44 -0
  187. parrot/generators/__init__.py +29 -0
  188. parrot/generators/base.py +200 -0
  189. parrot/generators/html.py +293 -0
  190. parrot/generators/react.py +205 -0
  191. parrot/generators/streamlit.py +203 -0
  192. parrot/generators/template.py +105 -0
  193. parrot/handlers/__init__.py +4 -0
  194. parrot/handlers/agent.py +861 -0
  195. parrot/handlers/agents/__init__.py +1 -0
  196. parrot/handlers/agents/abstract.py +900 -0
  197. parrot/handlers/bots.py +338 -0
  198. parrot/handlers/chat.py +915 -0
  199. parrot/handlers/creation.sql +192 -0
  200. parrot/handlers/crew/ARCHITECTURE.md +362 -0
  201. parrot/handlers/crew/README_BOTMANAGER_PERSISTENCE.md +303 -0
  202. parrot/handlers/crew/README_REDIS_PERSISTENCE.md +366 -0
  203. parrot/handlers/crew/__init__.py +0 -0
  204. parrot/handlers/crew/handler.py +801 -0
  205. parrot/handlers/crew/models.py +229 -0
  206. parrot/handlers/crew/redis_persistence.py +523 -0
  207. parrot/handlers/jobs/__init__.py +10 -0
  208. parrot/handlers/jobs/job.py +384 -0
  209. parrot/handlers/jobs/mixin.py +627 -0
  210. parrot/handlers/jobs/models.py +115 -0
  211. parrot/handlers/jobs/worker.py +31 -0
  212. parrot/handlers/models.py +596 -0
  213. parrot/handlers/o365_auth.py +105 -0
  214. parrot/handlers/stream.py +337 -0
  215. parrot/interfaces/__init__.py +6 -0
  216. parrot/interfaces/aws.py +143 -0
  217. parrot/interfaces/credentials.py +113 -0
  218. parrot/interfaces/database.py +27 -0
  219. parrot/interfaces/google.py +1123 -0
  220. parrot/interfaces/hierarchy.py +1227 -0
  221. parrot/interfaces/http.py +651 -0
  222. parrot/interfaces/images/__init__.py +0 -0
  223. parrot/interfaces/images/plugins/__init__.py +24 -0
  224. parrot/interfaces/images/plugins/abstract.py +58 -0
  225. parrot/interfaces/images/plugins/analisys.py +148 -0
  226. parrot/interfaces/images/plugins/classify.py +150 -0
  227. parrot/interfaces/images/plugins/classifybase.py +182 -0
  228. parrot/interfaces/images/plugins/detect.py +150 -0
  229. parrot/interfaces/images/plugins/exif.py +1103 -0
  230. parrot/interfaces/images/plugins/hash.py +52 -0
  231. parrot/interfaces/images/plugins/vision.py +104 -0
  232. parrot/interfaces/images/plugins/yolo.py +66 -0
  233. parrot/interfaces/images/plugins/zerodetect.py +197 -0
  234. parrot/interfaces/o365.py +978 -0
  235. parrot/interfaces/onedrive.py +822 -0
  236. parrot/interfaces/sharepoint.py +1435 -0
  237. parrot/interfaces/soap.py +257 -0
  238. parrot/loaders/__init__.py +8 -0
  239. parrot/loaders/abstract.py +1131 -0
  240. parrot/loaders/audio.py +199 -0
  241. parrot/loaders/basepdf.py +53 -0
  242. parrot/loaders/basevideo.py +1568 -0
  243. parrot/loaders/csv.py +409 -0
  244. parrot/loaders/docx.py +116 -0
  245. parrot/loaders/epubloader.py +316 -0
  246. parrot/loaders/excel.py +199 -0
  247. parrot/loaders/factory.py +55 -0
  248. parrot/loaders/files/__init__.py +0 -0
  249. parrot/loaders/files/abstract.py +39 -0
  250. parrot/loaders/files/html.py +26 -0
  251. parrot/loaders/files/text.py +63 -0
  252. parrot/loaders/html.py +152 -0
  253. parrot/loaders/markdown.py +442 -0
  254. parrot/loaders/pdf.py +373 -0
  255. parrot/loaders/pdfmark.py +320 -0
  256. parrot/loaders/pdftables.py +506 -0
  257. parrot/loaders/ppt.py +476 -0
  258. parrot/loaders/qa.py +63 -0
  259. parrot/loaders/splitters/__init__.py +10 -0
  260. parrot/loaders/splitters/base.py +138 -0
  261. parrot/loaders/splitters/md.py +228 -0
  262. parrot/loaders/splitters/token.py +143 -0
  263. parrot/loaders/txt.py +26 -0
  264. parrot/loaders/video.py +89 -0
  265. parrot/loaders/videolocal.py +218 -0
  266. parrot/loaders/videounderstanding.py +377 -0
  267. parrot/loaders/vimeo.py +167 -0
  268. parrot/loaders/web.py +599 -0
  269. parrot/loaders/youtube.py +504 -0
  270. parrot/manager/__init__.py +5 -0
  271. parrot/manager/manager.py +1030 -0
  272. parrot/mcp/__init__.py +28 -0
  273. parrot/mcp/adapter.py +105 -0
  274. parrot/mcp/cli.py +174 -0
  275. parrot/mcp/client.py +119 -0
  276. parrot/mcp/config.py +75 -0
  277. parrot/mcp/integration.py +842 -0
  278. parrot/mcp/oauth.py +933 -0
  279. parrot/mcp/server.py +225 -0
  280. parrot/mcp/transports/__init__.py +3 -0
  281. parrot/mcp/transports/base.py +279 -0
  282. parrot/mcp/transports/grpc_session.py +163 -0
  283. parrot/mcp/transports/http.py +312 -0
  284. parrot/mcp/transports/mcp.proto +108 -0
  285. parrot/mcp/transports/quic.py +1082 -0
  286. parrot/mcp/transports/sse.py +330 -0
  287. parrot/mcp/transports/stdio.py +309 -0
  288. parrot/mcp/transports/unix.py +395 -0
  289. parrot/mcp/transports/websocket.py +547 -0
  290. parrot/memory/__init__.py +16 -0
  291. parrot/memory/abstract.py +209 -0
  292. parrot/memory/agent.py +32 -0
  293. parrot/memory/cache.py +175 -0
  294. parrot/memory/core.py +555 -0
  295. parrot/memory/file.py +153 -0
  296. parrot/memory/mem.py +131 -0
  297. parrot/memory/redis.py +613 -0
  298. parrot/models/__init__.py +46 -0
  299. parrot/models/basic.py +118 -0
  300. parrot/models/compliance.py +208 -0
  301. parrot/models/crew.py +395 -0
  302. parrot/models/detections.py +654 -0
  303. parrot/models/generation.py +85 -0
  304. parrot/models/google.py +223 -0
  305. parrot/models/groq.py +23 -0
  306. parrot/models/openai.py +30 -0
  307. parrot/models/outputs.py +285 -0
  308. parrot/models/responses.py +938 -0
  309. parrot/notifications/__init__.py +743 -0
  310. parrot/openapi/__init__.py +3 -0
  311. parrot/openapi/components.yaml +641 -0
  312. parrot/openapi/config.py +322 -0
  313. parrot/outputs/__init__.py +32 -0
  314. parrot/outputs/formats/__init__.py +108 -0
  315. parrot/outputs/formats/altair.py +359 -0
  316. parrot/outputs/formats/application.py +122 -0
  317. parrot/outputs/formats/base.py +351 -0
  318. parrot/outputs/formats/bokeh.py +356 -0
  319. parrot/outputs/formats/card.py +424 -0
  320. parrot/outputs/formats/chart.py +436 -0
  321. parrot/outputs/formats/d3.py +255 -0
  322. parrot/outputs/formats/echarts.py +310 -0
  323. parrot/outputs/formats/generators/__init__.py +0 -0
  324. parrot/outputs/formats/generators/abstract.py +61 -0
  325. parrot/outputs/formats/generators/panel.py +145 -0
  326. parrot/outputs/formats/generators/streamlit.py +86 -0
  327. parrot/outputs/formats/generators/terminal.py +63 -0
  328. parrot/outputs/formats/holoviews.py +310 -0
  329. parrot/outputs/formats/html.py +147 -0
  330. parrot/outputs/formats/jinja2.py +46 -0
  331. parrot/outputs/formats/json.py +87 -0
  332. parrot/outputs/formats/map.py +933 -0
  333. parrot/outputs/formats/markdown.py +172 -0
  334. parrot/outputs/formats/matplotlib.py +237 -0
  335. parrot/outputs/formats/mixins/__init__.py +0 -0
  336. parrot/outputs/formats/mixins/emaps.py +855 -0
  337. parrot/outputs/formats/plotly.py +341 -0
  338. parrot/outputs/formats/seaborn.py +310 -0
  339. parrot/outputs/formats/table.py +397 -0
  340. parrot/outputs/formats/template_report.py +138 -0
  341. parrot/outputs/formats/yaml.py +125 -0
  342. parrot/outputs/formatter.py +152 -0
  343. parrot/outputs/templates/__init__.py +95 -0
  344. parrot/pipelines/__init__.py +0 -0
  345. parrot/pipelines/abstract.py +210 -0
  346. parrot/pipelines/detector.py +124 -0
  347. parrot/pipelines/models.py +90 -0
  348. parrot/pipelines/planogram.py +3002 -0
  349. parrot/pipelines/table.sql +97 -0
  350. parrot/plugins/__init__.py +106 -0
  351. parrot/plugins/importer.py +80 -0
  352. parrot/py.typed +0 -0
  353. parrot/registry/__init__.py +18 -0
  354. parrot/registry/registry.py +594 -0
  355. parrot/scheduler/__init__.py +1189 -0
  356. parrot/scheduler/models.py +60 -0
  357. parrot/security/__init__.py +16 -0
  358. parrot/security/prompt_injection.py +268 -0
  359. parrot/security/security_events.sql +25 -0
  360. parrot/services/__init__.py +1 -0
  361. parrot/services/mcp/__init__.py +8 -0
  362. parrot/services/mcp/config.py +13 -0
  363. parrot/services/mcp/server.py +295 -0
  364. parrot/services/o365_remote_auth.py +235 -0
  365. parrot/stores/__init__.py +7 -0
  366. parrot/stores/abstract.py +352 -0
  367. parrot/stores/arango.py +1090 -0
  368. parrot/stores/bigquery.py +1377 -0
  369. parrot/stores/cache.py +106 -0
  370. parrot/stores/empty.py +10 -0
  371. parrot/stores/faiss_store.py +1157 -0
  372. parrot/stores/kb/__init__.py +9 -0
  373. parrot/stores/kb/abstract.py +68 -0
  374. parrot/stores/kb/cache.py +165 -0
  375. parrot/stores/kb/doc.py +325 -0
  376. parrot/stores/kb/hierarchy.py +346 -0
  377. parrot/stores/kb/local.py +457 -0
  378. parrot/stores/kb/prompt.py +28 -0
  379. parrot/stores/kb/redis.py +659 -0
  380. parrot/stores/kb/store.py +115 -0
  381. parrot/stores/kb/user.py +374 -0
  382. parrot/stores/models.py +59 -0
  383. parrot/stores/pgvector.py +3 -0
  384. parrot/stores/postgres.py +2853 -0
  385. parrot/stores/utils/__init__.py +0 -0
  386. parrot/stores/utils/chunking.py +197 -0
  387. parrot/telemetry/__init__.py +3 -0
  388. parrot/telemetry/mixin.py +111 -0
  389. parrot/template/__init__.py +3 -0
  390. parrot/template/engine.py +259 -0
  391. parrot/tools/__init__.py +23 -0
  392. parrot/tools/abstract.py +644 -0
  393. parrot/tools/agent.py +363 -0
  394. parrot/tools/arangodbsearch.py +537 -0
  395. parrot/tools/arxiv_tool.py +188 -0
  396. parrot/tools/calculator/__init__.py +3 -0
  397. parrot/tools/calculator/operations/__init__.py +38 -0
  398. parrot/tools/calculator/operations/calculus.py +80 -0
  399. parrot/tools/calculator/operations/statistics.py +76 -0
  400. parrot/tools/calculator/tool.py +150 -0
  401. parrot/tools/cloudwatch.py +988 -0
  402. parrot/tools/codeinterpreter/__init__.py +127 -0
  403. parrot/tools/codeinterpreter/executor.py +371 -0
  404. parrot/tools/codeinterpreter/internals.py +473 -0
  405. parrot/tools/codeinterpreter/models.py +643 -0
  406. parrot/tools/codeinterpreter/prompts.py +224 -0
  407. parrot/tools/codeinterpreter/tool.py +664 -0
  408. parrot/tools/company_info/__init__.py +6 -0
  409. parrot/tools/company_info/tool.py +1138 -0
  410. parrot/tools/correlationanalysis.py +437 -0
  411. parrot/tools/database/abstract.py +286 -0
  412. parrot/tools/database/bq.py +115 -0
  413. parrot/tools/database/cache.py +284 -0
  414. parrot/tools/database/models.py +95 -0
  415. parrot/tools/database/pg.py +343 -0
  416. parrot/tools/databasequery.py +1159 -0
  417. parrot/tools/db.py +1800 -0
  418. parrot/tools/ddgo.py +370 -0
  419. parrot/tools/decorators.py +271 -0
  420. parrot/tools/dftohtml.py +282 -0
  421. parrot/tools/document.py +549 -0
  422. parrot/tools/ecs.py +819 -0
  423. parrot/tools/edareport.py +368 -0
  424. parrot/tools/elasticsearch.py +1049 -0
  425. parrot/tools/employees.py +462 -0
  426. parrot/tools/epson/__init__.py +96 -0
  427. parrot/tools/excel.py +683 -0
  428. parrot/tools/file/__init__.py +13 -0
  429. parrot/tools/file/abstract.py +76 -0
  430. parrot/tools/file/gcs.py +378 -0
  431. parrot/tools/file/local.py +284 -0
  432. parrot/tools/file/s3.py +511 -0
  433. parrot/tools/file/tmp.py +309 -0
  434. parrot/tools/file/tool.py +501 -0
  435. parrot/tools/file_reader.py +129 -0
  436. parrot/tools/flowtask/__init__.py +19 -0
  437. parrot/tools/flowtask/tool.py +761 -0
  438. parrot/tools/gittoolkit.py +508 -0
  439. parrot/tools/google/__init__.py +18 -0
  440. parrot/tools/google/base.py +169 -0
  441. parrot/tools/google/tools.py +1251 -0
  442. parrot/tools/googlelocation.py +5 -0
  443. parrot/tools/googleroutes.py +5 -0
  444. parrot/tools/googlesearch.py +5 -0
  445. parrot/tools/googlesitesearch.py +5 -0
  446. parrot/tools/googlevoice.py +2 -0
  447. parrot/tools/gvoice.py +695 -0
  448. parrot/tools/ibisworld/README.md +225 -0
  449. parrot/tools/ibisworld/__init__.py +11 -0
  450. parrot/tools/ibisworld/tool.py +366 -0
  451. parrot/tools/jiratoolkit.py +1718 -0
  452. parrot/tools/manager.py +1098 -0
  453. parrot/tools/math.py +152 -0
  454. parrot/tools/metadata.py +476 -0
  455. parrot/tools/msteams.py +1621 -0
  456. parrot/tools/msword.py +635 -0
  457. parrot/tools/multidb.py +580 -0
  458. parrot/tools/multistoresearch.py +369 -0
  459. parrot/tools/networkninja.py +167 -0
  460. parrot/tools/nextstop/__init__.py +4 -0
  461. parrot/tools/nextstop/base.py +286 -0
  462. parrot/tools/nextstop/employee.py +733 -0
  463. parrot/tools/nextstop/store.py +462 -0
  464. parrot/tools/notification.py +435 -0
  465. parrot/tools/o365/__init__.py +42 -0
  466. parrot/tools/o365/base.py +295 -0
  467. parrot/tools/o365/bundle.py +522 -0
  468. parrot/tools/o365/events.py +554 -0
  469. parrot/tools/o365/mail.py +992 -0
  470. parrot/tools/o365/onedrive.py +497 -0
  471. parrot/tools/o365/sharepoint.py +641 -0
  472. parrot/tools/openapi_toolkit.py +904 -0
  473. parrot/tools/openweather.py +527 -0
  474. parrot/tools/pdfprint.py +1001 -0
  475. parrot/tools/powerbi.py +518 -0
  476. parrot/tools/powerpoint.py +1113 -0
  477. parrot/tools/pricestool.py +146 -0
  478. parrot/tools/products/__init__.py +246 -0
  479. parrot/tools/prophet_tool.py +171 -0
  480. parrot/tools/pythonpandas.py +630 -0
  481. parrot/tools/pythonrepl.py +910 -0
  482. parrot/tools/qsource.py +436 -0
  483. parrot/tools/querytoolkit.py +395 -0
  484. parrot/tools/quickeda.py +827 -0
  485. parrot/tools/resttool.py +553 -0
  486. parrot/tools/retail/__init__.py +0 -0
  487. parrot/tools/retail/bby.py +528 -0
  488. parrot/tools/sandboxtool.py +703 -0
  489. parrot/tools/sassie/__init__.py +352 -0
  490. parrot/tools/scraping/__init__.py +7 -0
  491. parrot/tools/scraping/docs/select.md +466 -0
  492. parrot/tools/scraping/documentation.md +1278 -0
  493. parrot/tools/scraping/driver.py +436 -0
  494. parrot/tools/scraping/models.py +576 -0
  495. parrot/tools/scraping/options.py +85 -0
  496. parrot/tools/scraping/orchestrator.py +517 -0
  497. parrot/tools/scraping/readme.md +740 -0
  498. parrot/tools/scraping/tool.py +3115 -0
  499. parrot/tools/seasonaldetection.py +642 -0
  500. parrot/tools/shell_tool/__init__.py +5 -0
  501. parrot/tools/shell_tool/actions.py +408 -0
  502. parrot/tools/shell_tool/engine.py +155 -0
  503. parrot/tools/shell_tool/models.py +322 -0
  504. parrot/tools/shell_tool/tool.py +442 -0
  505. parrot/tools/site_search.py +214 -0
  506. parrot/tools/textfile.py +418 -0
  507. parrot/tools/think.py +378 -0
  508. parrot/tools/toolkit.py +298 -0
  509. parrot/tools/webapp_tool.py +187 -0
  510. parrot/tools/whatif.py +1279 -0
  511. parrot/tools/workday/MULTI_WSDL_EXAMPLE.md +249 -0
  512. parrot/tools/workday/__init__.py +6 -0
  513. parrot/tools/workday/models.py +1389 -0
  514. parrot/tools/workday/tool.py +1293 -0
  515. parrot/tools/yfinance_tool.py +306 -0
  516. parrot/tools/zipcode.py +217 -0
  517. parrot/utils/__init__.py +2 -0
  518. parrot/utils/helpers.py +73 -0
  519. parrot/utils/parsers/__init__.py +5 -0
  520. parrot/utils/parsers/toml.c +12078 -0
  521. parrot/utils/parsers/toml.cpython-310-x86_64-linux-gnu.so +0 -0
  522. parrot/utils/parsers/toml.pyx +21 -0
  523. parrot/utils/toml.py +11 -0
  524. parrot/utils/types.cpp +20936 -0
  525. parrot/utils/types.cpython-310-x86_64-linux-gnu.so +0 -0
  526. parrot/utils/types.pyx +213 -0
  527. parrot/utils/uv.py +11 -0
  528. parrot/version.py +10 -0
  529. parrot/yaml-rs/Cargo.lock +350 -0
  530. parrot/yaml-rs/Cargo.toml +19 -0
  531. parrot/yaml-rs/pyproject.toml +19 -0
  532. parrot/yaml-rs/python/yaml_rs/__init__.py +81 -0
  533. parrot/yaml-rs/src/lib.rs +222 -0
  534. requirements/docker-compose.yml +24 -0
  535. requirements/requirements-dev.txt +21 -0
@@ -0,0 +1,3330 @@
1
+ """
2
+ Agent Crew with Parallel, Sequential, Flow, and Loop-Based Execution
3
+ =========================================================================
4
+ Orchestrates complex agent workflows using finite state machines.
5
+ Supports parallel execution, conditional transitions, iterative loops,
6
+ and result aggregation.
7
+
8
+ 1. Sequential: Pipeline pattern where agents execute one after another
9
+ 2. Parallel: All agents execute simultaneously with asyncio.gather()
10
+ 3. Flow: DAG-based execution with dependencies and parallel execution where possible
11
+ 4. Loop: Iterative execution that reuses the latest output until a condition is met
12
+
13
+ This implementation uses a graph-based approach for flexibility with dynamic workflows.
14
+ """
15
+ from __future__ import annotations
16
+ from typing import (
17
+ List, Dict, Any, Union, Optional, Literal, Set, Callable, Awaitable, Tuple
18
+ )
19
+ from enum import Enum
20
+ from dataclasses import dataclass, field
21
+ from datetime import datetime
22
+ import contextlib
23
+ import asyncio
24
+ import uuid
25
+ from tqdm.asyncio import tqdm as async_tqdm
26
+ from navconfig.logging import logging
27
+ from datamodel.parsers.json import json_encoder, json_decoder # pylint: disable=E0611 # noqa
28
+ from ..agent import BasicAgent
29
+ from ..abstract import AbstractBot
30
+ from ...clients import AbstractClient
31
+ from ...clients.factory import SUPPORTED_CLIENTS
32
+ from ...clients.google import GoogleGenAIClient
33
+ from ...tools.manager import ToolManager
34
+ from ...tools.agent import AgentTool
35
+ from ...tools.abstract import AbstractTool
36
+ from ...tools.agent import AgentContext
37
+ from ...models.responses import (
38
+ AIMessage,
39
+ AgentResponse
40
+ )
41
+ from ...models.crew import (
42
+ CrewResult,
43
+ AgentExecutionInfo,
44
+ build_agent_metadata,
45
+ determine_run_status,
46
+ AgentResult
47
+ )
48
+ from .storage import ExecutionMemory
49
+
50
+
51
+ AgentRef = Union[str, BasicAgent, AbstractBot]
52
+ DependencyResults = Dict[str, str]
53
+ PromptBuilder = Callable[[AgentContext, DependencyResults], Union[str, Awaitable[str]]]
54
+
55
+
56
+ @dataclass
57
+ class AgentTask:
58
+ """Represents a task to be executed by an agent in the Crew."""
59
+ task_id: str
60
+ agent_name: str
61
+ input_data: Any
62
+ dependencies: Set[str] = field(default_factory=set)
63
+ context: Dict[str, Any] = field(default_factory=dict)
64
+ completed: bool = False
65
+ result: Optional[str] = None
66
+ error: Optional[str] = None
67
+ execution_time: float = 0.0
68
+ status: Literal["pending", "running", "completed", "failed"] = "pending"
69
+
70
+ @dataclass
71
+ class FlowContext:
72
+ """
73
+ Maintains the execution context across the workflow.
74
+
75
+ This context object tracks the state of the entire workflow execution,
76
+ including which agents have completed, their results, and any errors.
77
+ It serves as the "memory" of the workflow as it progresses.
78
+ """
79
+ initial_task: str
80
+ results: Dict[str, Any] = field(default_factory=dict)
81
+ responses: Dict[str, Any] = field(default_factory=dict)
82
+ agent_metadata: Dict[str, AgentExecutionInfo] = field(default_factory=dict)
83
+ completion_order: List[str] = field(default_factory=list)
84
+ errors: Dict[str, Exception] = field(default_factory=dict)
85
+ active_tasks: Set[str] = field(default_factory=set)
86
+ completed_tasks: Set[str] = field(default_factory=set)
87
+
88
+ def can_execute(self, agent_name: str, dependencies: Set[str]) -> bool:
89
+ """
90
+ Check if all dependencies are satisfied for an agent to execute.
91
+
92
+ An agent can only execute when all the agents it depends on have
93
+ successfully completed their execution.
94
+ """
95
+ return dependencies.issubset(self.completed_tasks)
96
+
97
+ def mark_completed(
98
+ self,
99
+ agent_name: str,
100
+ result: Any = None,
101
+ response: Any = None,
102
+ metadata: Optional[AgentExecutionInfo] = None
103
+ ):
104
+ """
105
+ Mark an agent as completed and store its result.
106
+
107
+ This updates the workflow state to reflect that an agent has finished,
108
+ making it possible for dependent agents to begin execution.
109
+ """
110
+ self.completed_tasks.add(agent_name)
111
+ self.completion_order.append(agent_name)
112
+ self.active_tasks.discard(agent_name)
113
+ if result is not None:
114
+ self.results[agent_name] = result
115
+ if response is not None:
116
+ self.responses[agent_name] = response
117
+ if metadata is not None:
118
+ self.agent_metadata[agent_name] = metadata
119
+
120
+ def get_input_for_agent(self, agent_name: str, dependencies: Set[str]) -> Dict[str, Any]:
121
+ """
122
+ Prepare input data for an agent based on its dependencies.
123
+
124
+ This method aggregates the results from all dependency agents and
125
+ packages them in a way that the target agent can use. If the agent
126
+ has no dependencies, it receives the initial task.
127
+ """
128
+ if not dependencies:
129
+ return {"task": self.initial_task}
130
+
131
+ return {
132
+ "task": self.initial_task,
133
+ "dependencies": {
134
+ dep: self.results.get(dep)
135
+ for dep in dependencies
136
+ if dep in self.results
137
+ }
138
+ }
139
+
140
+ class AgentNode:
141
+ """Represents a node in the workflow graph (an agent with its dependencies)."""
142
+
143
+ def __init__(self, agent: Union[BasicAgent, AbstractBot], dependencies: Optional[Set[str]] = None):
144
+ self.agent = agent
145
+ self.dependencies = dependencies or set()
146
+ self.successors: Set[str] = set()
147
+
148
+ def _format_prompt(self, input_data: Dict[str, Any]) -> str:
149
+ """
150
+ Format the input data dictionary into a string prompt.
151
+
152
+ This method converts the structured input data (task + dependencies)
153
+ into a natural language prompt that the agent can understand.
154
+ """
155
+ if not input_data:
156
+ return ""
157
+
158
+ # Start with the main task
159
+ task = input_data.get("task", "")
160
+
161
+ # If there are no dependencies, just return the task
162
+ dependencies = input_data.get("dependencies", {})
163
+ if not dependencies:
164
+ return task
165
+
166
+ # Build a prompt that includes results from dependent agents
167
+ prompt_parts = [f"Task: {task}\n", "\nContext from previous agents:\n"]
168
+
169
+ for dep_agent, dep_result in dependencies.items():
170
+ prompt_parts.extend((f"\n--- From {dep_agent} ---", str(dep_result), ""))
171
+
172
+ return "\n".join(prompt_parts)
173
+
174
+ async def execute(self, context: FlowContext) -> Any:
175
+ """Execute the agent with context from previous agents."""
176
+ # Get input data based on dependencies
177
+ input_data = context.get_input_for_agent(self.agent.name, self.dependencies)
178
+
179
+ # If this is the first agent, use initial task
180
+ if not input_data and not self.dependencies:
181
+ input_data = {"task": context.initial_task}
182
+
183
+ # Execute the agent and track time
184
+ start_time = asyncio.get_event_loop().time()
185
+ prompt = self._format_prompt(input_data)
186
+ try:
187
+ response = await self.agent.ask(question=prompt)
188
+ end_time = asyncio.get_event_loop().time()
189
+ execution_time = end_time - start_time
190
+ # Extract output text
191
+ output = response.content if hasattr(response, 'content') else str(response.output if hasattr(response, 'output') else response)
192
+
193
+ return {
194
+ 'response': response,
195
+ 'output': output,
196
+ 'execution_time': end_time - start_time,
197
+ 'prompt': prompt
198
+ }
199
+
200
+ except Exception as e:
201
+ end_time = asyncio.get_event_loop().time()
202
+ execution_time = end_time - start_time
203
+ # Build agent metadata for failed execution
204
+ # TODO: Save the error of execution
205
+ agent_info = build_agent_metadata(
206
+ agent_id=self.agent.name,
207
+ agent=self.agent,
208
+ response=None,
209
+ output=None,
210
+ execution_time=execution_time,
211
+ status='failed',
212
+ error=str(e)
213
+ )
214
+ raise
215
+
216
+
217
+ class AgentCrew:
218
+ """
219
+ Enhanced AgentCrew supporting multiple execution modes.
220
+
221
+ This crew orchestrator provides multiple ways to execute agents:
222
+
223
+ 1. SEQUENTIAL (run_sequential): Agents execute in a pipeline, where each
224
+ agent processes the output of the previous agent. This is useful for
225
+ multi-stage processing where each stage refines or transforms the data.
226
+
227
+ 2. PARALLEL (run_parallel): Multiple agents execute simultaneously on
228
+ different tasks using asyncio.gather(). This is useful when you have
229
+ multiple independent analyses or tasks that can be performed concurrently.
230
+
231
+ 3. FLOW (run_flow): Agents execute based on a dependency graph (DAG),
232
+ automatically parallelizing independent agents while respecting dependencies.
233
+ This is the most flexible mode, supporting complex workflows like:
234
+ - One agent → multiple agents (fan-out/parallel processing)
235
+ - Multiple agents → one agent (fan-in/synchronization)
236
+ - Complex multi-stage pipelines with parallel branches
237
+
238
+ 4. LOOP (run_loop): Agents execute sequentially in repeated iterations,
239
+ reusing the previous iteration's output as the next iteration's input until
240
+ an LLM-evaluated stopping condition is satisfied or a safety limit is
241
+ reached.
242
+
243
+ Features:
244
+ - Shared tool manager across agents
245
+ - Comprehensive execution logging
246
+ - Result aggregation and context passing
247
+ - Error handling and recovery
248
+ - Optional LLM for result synthesis
249
+ - Rate limiting with semaphores
250
+ - Circular dependency detection
251
+ """
252
+
253
+ # Default truncation length for logging and summaries
254
+ default_truncation_length: int = 200
255
+
256
+ def __init__(
257
+ self,
258
+ name: str = "AgentCrew",
259
+ agents: List[Union[BasicAgent, AbstractBot]] = None,
260
+ shared_tool_manager: ToolManager = None,
261
+ max_parallel_tasks: int = 10,
262
+ llm: Optional[Union[str, AbstractClient]] = None,
263
+ auto_configure: bool = True,
264
+ truncation_length: Optional[int] = None,
265
+ truncate_context_summary: bool = True,
266
+ embedding_model: Any = None,
267
+ enable_analysis: bool = False,
268
+ dimension: int = 384, # NEW
269
+ index_type: str = "Flat", # NEW: "Flat", "FlatIP", o "HNSW"
270
+ **kwargs
271
+ ):
272
+ """
273
+ Initialize the AgentCrew.
274
+
275
+ Args:
276
+ name: Name of the crew
277
+ agents: List of agents to add to the crew
278
+ shared_tool_manager: Optional shared tool manager for all agents
279
+ max_parallel_tasks: Maximum number of parallel tasks (for rate limiting)
280
+ """
281
+ self.name = name or 'AgentCrew'
282
+ self.agents: Dict[str, Union[BasicAgent, AbstractBot]] = {}
283
+ self._auto_configure: bool = auto_configure
284
+ # internal tools:
285
+ self.tools: List[AbstractTool] = []
286
+ self.shared_tool_manager = shared_tool_manager or ToolManager()
287
+ self.max_parallel_tasks = max_parallel_tasks
288
+ self.execution_log: List[Dict[str, Any]] = []
289
+ self.logger = logging.getLogger(f"parrot.crews.{self.name}")
290
+ self.semaphore = asyncio.Semaphore(max_parallel_tasks)
291
+ if isinstance(llm, str):
292
+ self._llm = SUPPORTED_CLIENTS.get(llm.lower(), None)
293
+ elif isinstance(llm, AbstractClient):
294
+ self._llm = llm # Optional LLM for orchestration tasks
295
+ else:
296
+ self._llm = None
297
+ self.truncation_length = (
298
+ truncation_length
299
+ if truncation_length is not None
300
+ else self.__class__.default_truncation_length
301
+ )
302
+ self.truncate_context_summary = truncate_context_summary
303
+ # Workflow graph for flow-based execution
304
+ self.workflow_graph: Dict[str, AgentNode] = {}
305
+ self.initial_agent: Optional[str] = None
306
+ self.final_agents: Set[str] = set()
307
+ self.use_tqdm: bool = kwargs.get('use_tqdm', True)
308
+ # Internal tracking of per-agent initialization guards
309
+ self._agent_locks: Dict[int, asyncio.Lock] = {}
310
+ # Execution Memory:
311
+ self.enable_analysis = enable_analysis
312
+ self.embedding_model = embedding_model if enable_analysis else None
313
+ self.execution_memory = ExecutionMemory(
314
+ embedding_model=embedding_model,
315
+ dimension=dimension,
316
+ index_type=index_type
317
+ )
318
+ self._summary = None
319
+ self.last_crew_result: Optional[CrewResult] = None
320
+ # Add agents if provided
321
+ if agents:
322
+ for agent in agents:
323
+ self.add_agent(agent)
324
+ self.workflow_graph[agent.name] = AgentNode(agent)
325
+
326
+ def _register_agents_as_tools(self):
327
+ """
328
+ Register each agent as a tool in the LLM's tool manager.
329
+ """
330
+ if not self._llm:
331
+ return
332
+
333
+ for agent_id, agent in self.agents.items():
334
+ try:
335
+ agent_tool = agent.as_tool(
336
+ tool_name=f"agent_{agent_id}",
337
+ tool_description=(
338
+ f"Agent {agent.name}: {agent.description} "
339
+ f"Re-execute to gather additional information. "
340
+ f"Use when the user needs more details or updated data from this agent."
341
+ ),
342
+ use_conversation_method=False # no conversation history
343
+ )
344
+
345
+ # Add to LLM's tool manager
346
+ if hasattr(self._llm, 'tool_manager'):
347
+ self._llm.tool_manager.add_tool(agent_tool)
348
+
349
+ self.logger.debug(
350
+ f"Registered {agent.name} as tool 'agent_{agent_id}' in LLM orchestrator"
351
+ )
352
+ except Exception as e:
353
+ self.logger.warning(
354
+ f"Failed to register {agent.name} as tool: {e}"
355
+ )
356
+
357
+ def add_agent(self, agent: Union[BasicAgent, AbstractBot], agent_id: str = None) -> None:
358
+ """Add an agent to the crew."""
359
+ agent_id = agent_id or agent.name
360
+ self.agents[agent_id] = agent
361
+
362
+ # Share tools with new agent
363
+ if self.shared_tool_manager:
364
+ for tool_name in self.shared_tool_manager.list_tools():
365
+ tool = self.shared_tool_manager.get_tool(tool_name)
366
+ if tool and not agent.tool_manager.get_tool(tool_name):
367
+ agent.tool_manager.add_tool(tool, tool_name)
368
+
369
+ # wrap agent as tool for use by main Agent:
370
+ agent_tool = AgentTool(
371
+ agent=agent,
372
+ tool_name=agent_id,
373
+ tool_description=getattr(agent, 'description', f"Execute {agent.name}"),
374
+ use_conversation_method=True,
375
+ execution_memory=self.execution_memory
376
+ )
377
+
378
+ self.tools.append(agent_tool)
379
+ self.logger.info(f"Added agent '{agent_id}' to crew")
380
+
381
+ # Register as tool in LLM orchestrator (if exists)
382
+ if self._llm:
383
+ self._register_agents_as_tools()
384
+
385
+ self.logger.info(f"Added agent '{agent_id}' to crew")
386
+
387
+ def remove_agent(self, agent_id: str) -> bool:
388
+ """Remove an agent from the crew."""
389
+ if agent_id in self.agents:
390
+ del self.agents[agent_id]
391
+ self.logger.info(
392
+ f"Removed agent '{agent_id}' from crew"
393
+ )
394
+ return True
395
+ return False
396
+
397
+ def add_shared_tool(self, tool: AbstractTool, tool_name: str = None) -> None:
398
+ """Add a tool shared across all agents."""
399
+ self.shared_tool_manager.add_tool(tool, tool_name)
400
+
401
+ # Add to all existing agents
402
+ for agent in self.agents.values():
403
+ if not agent.tool_manager.get_tool(tool_name or tool.name):
404
+ agent.tool_manager.add_tool(tool, tool_name)
405
+
406
+ def task_flow(self, source_agent: Any, target_agents: Any):
407
+ """
408
+ Define a task flow from source agent(s) to target agent(s).
409
+
410
+ This method builds the workflow graph by defining dependencies between agents.
411
+ It supports flexible configurations for different workflow patterns:
412
+
413
+ - Single to multiple (fan-out): One agent's output goes to multiple agents
414
+ for parallel processing
415
+ - Multiple to single (fan-in): Multiple agents' outputs are aggregated by
416
+ a single agent
417
+ - Single to single: Simple sequential dependency
418
+
419
+ The workflow graph is used by run_flow() to determine execution order and
420
+ identify opportunities for parallel execution.
421
+
422
+ Args:
423
+ source_agent: The agent (or list of agents) that must complete first
424
+ target_agents: The agent (or list of agents) that depend on source completion
425
+
426
+ Examples:
427
+ # Single source to multiple targets (parallel execution after writer completes)
428
+ crew.task_flow(writer, [editor1, editor2])
429
+
430
+ # Multiple sources to single target (final_reviewer waits for both editors)
431
+ crew.task_flow([editor1, editor2], final_reviewer)
432
+
433
+ # Single to single (simple sequential dependency)
434
+ crew.task_flow(writer, editor1)
435
+ """
436
+ # Normalize inputs to lists for uniform processing
437
+ sources = source_agent if isinstance(source_agent, list) else [source_agent]
438
+ targets = target_agents if isinstance(target_agents, list) else [target_agents]
439
+
440
+ # Build the dependency graph
441
+ for source in sources:
442
+ source_name = source.name
443
+ node = self.workflow_graph[source_name]
444
+
445
+ for target in targets:
446
+ target_name = target.name
447
+ target_node = self.workflow_graph[target_name]
448
+ # Add dependency: target depends on source
449
+ # This means target cannot execute until source completes
450
+ target_node.dependencies.add(source_name)
451
+ # Track successors for the source
452
+ # This helps us traverse the graph forward
453
+ node.successors.add(target_name)
454
+
455
+ # Automatically detect initial and final agents based on the graph structure
456
+ self._update_flow_metadata()
457
+
458
+ def _update_flow_metadata(self):
459
+ """
460
+ Update metadata about the workflow (initial and final agents).
461
+
462
+ Initial agents are those with no dependencies - they can start immediately.
463
+ Final agents are those with no successors - the workflow is complete when they finish.
464
+
465
+ This metadata is used by run_flow() to know when to start and when to stop.
466
+ """
467
+ # Find agents with no dependencies (initial agents)
468
+ agents_with_deps = {
469
+ name for name, node in self.workflow_graph.items()
470
+ if node.dependencies
471
+ }
472
+ potential_initial = set(self.workflow_graph.keys()) - agents_with_deps
473
+
474
+ if potential_initial and not self.initial_agent:
475
+ # For now, assume single entry point. Could be extended for multiple entry points.
476
+ self.initial_agent = next(iter(potential_initial))
477
+
478
+ # Find agents with no successors (final agents)
479
+ self.final_agents = {
480
+ name for name, node in self.workflow_graph.items()
481
+ if not node.successors
482
+ }
483
+
484
+ async def _execute_parallel_agents(
485
+ self,
486
+ agent_names: Set[str],
487
+ context: FlowContext
488
+ ) -> CrewResult:
489
+ """
490
+ Execute multiple agents in parallel and collect their results.
491
+
492
+ This is the internal method that enables parallel execution of agents
493
+ within the flow-based execution mode. It's called by run_flow() whenever
494
+ multiple agents are ready to execute simultaneously.
495
+
496
+ Args:
497
+ agent_names: Set of agent names that are ready to execute
498
+ context: The current FlowContext tracking execution state
499
+ Returns:
500
+ CrewResult with results from all executed agents
501
+ """
502
+ tasks = []
503
+ agent_name_map = []
504
+
505
+ for agent_name in agent_names:
506
+ node = self.workflow_graph[agent_name]
507
+ # get readiness of agent in AgentNode:
508
+ agent = node.agent
509
+ if agent_name not in self.agents:
510
+ self.logger.warning(
511
+ f"Agent '{agent_name}' not found in crew, skipping"
512
+ )
513
+ continue
514
+ await self._ensure_agent_ready(agent)
515
+ # Double-check dependencies are satisfied (defensive programming)
516
+ if context.can_execute(agent_name, node.dependencies):
517
+ context.active_tasks.add(agent_name)
518
+ tasks.append(node.execute(context))
519
+ agent_name_map.append(agent_name)
520
+
521
+ # Execute all tasks in parallel
522
+ results = await asyncio.gather(*tasks, return_exceptions=True)
523
+
524
+ # Process results and handle errors
525
+ execution_results = {}
526
+ for agent_name, result in zip(agent_name_map, results):
527
+ node = self.workflow_graph[agent_name]
528
+ if isinstance(result, Exception):
529
+ context.errors[agent_name] = result
530
+ context.active_tasks.discard(agent_name)
531
+ self.logger.error(
532
+ f"Error executing {agent_name}: {result}"
533
+ )
534
+ context.responses[agent_name] = None
535
+ context.agent_metadata[agent_name] = build_agent_metadata(
536
+ agent_name,
537
+ node.agent,
538
+ None,
539
+ None,
540
+ 0.0,
541
+ 'failed',
542
+ str(result)
543
+ )
544
+ self.execution_log.append({
545
+ 'agent_id': agent_name,
546
+ 'agent_name': node.agent.name,
547
+ 'output': str(result),
548
+ 'execution_time': 0,
549
+ 'success': False,
550
+ 'error': str(result)
551
+ })
552
+
553
+ # Save failed execution to memory if context has execution_memory
554
+ if hasattr(context, 'execution_memory') and context.execution_memory:
555
+ agent_result = AgentResult(
556
+ agent_id=agent_name,
557
+ agent_name=node.agent.name,
558
+ task=context.initial_task,
559
+ result=str(result),
560
+ metadata={
561
+ 'success': False,
562
+ 'error': str(result),
563
+ 'mode': 'flow',
564
+ 'user_id': getattr(context, 'user_id', 'crew_user'),
565
+ 'session_id': getattr(context, 'session_id', 'unknown')
566
+ },
567
+ execution_time=0.0
568
+ )
569
+ context.execution_memory.add_result(
570
+ agent_result,
571
+ vectorize=False
572
+ )
573
+ else:
574
+ output = result.get('output') if isinstance(result, dict) else result
575
+ raw_response = result.get('response') if isinstance(result, dict) else result
576
+ execution_time = result.get('execution_time', 0.0) if isinstance(result, dict) else 0.0
577
+ metadata = build_agent_metadata(
578
+ agent_name,
579
+ node.agent,
580
+ raw_response,
581
+ output,
582
+ execution_time,
583
+ 'completed'
584
+ )
585
+ context.mark_completed(
586
+ agent_name,
587
+ output,
588
+ raw_response,
589
+ metadata
590
+ )
591
+ context.active_tasks.discard(agent_name)
592
+ execution_results[agent_name] = output
593
+ self.execution_log.append({
594
+ 'agent_id': agent_name,
595
+ 'agent_name': node.agent.name,
596
+ 'input': self._truncate_text(result.get('prompt', '') if isinstance(result, dict) else ''),
597
+ 'output': self._truncate_text(output),
598
+ 'execution_time': execution_time,
599
+ 'success': True
600
+ })
601
+
602
+ # Save successful execution to memory if context has execution_memory
603
+ if hasattr(context, 'execution_memory') and context.execution_memory:
604
+ agent_input = result.get('prompt', '') if isinstance(result, dict) else context.initial_task
605
+ agent_result = AgentResult(
606
+ agent_id=agent_name,
607
+ agent_name=node.agent.name,
608
+ task=agent_input,
609
+ result=output,
610
+ metadata={
611
+ 'success': True,
612
+ 'mode': 'flow',
613
+ 'user_id': getattr(context, 'user_id', 'crew_user'),
614
+ 'session_id': getattr(context, 'session_id', 'unknown'),
615
+ 'result_type': type(output).__name__
616
+ },
617
+ execution_time=execution_time
618
+ )
619
+ # Vectorize only if analysis enabled
620
+ context.execution_memory.add_result(
621
+ agent_result,
622
+ vectorize=True
623
+ )
624
+ # Update execution order
625
+ if agent_name not in context.execution_memory.execution_order:
626
+ context.execution_memory.execution_order.append(agent_name)
627
+
628
+ return execution_results
629
+
630
+ async def _get_ready_agents(self, context: FlowContext) -> Set[str]:
631
+ """
632
+ Get all agents that are ready to execute based on their dependencies.
633
+
634
+ An agent is ready if:
635
+ 1. All its dependencies are completed
636
+ 2. It hasn't been executed yet
637
+ 3. It's not currently executing
638
+
639
+ This method is called repeatedly by run_flow() to determine which agents
640
+ can execute in the next wave of parallel execution.
641
+ """
642
+ return {
643
+ agent_name
644
+ for agent_name, node in self.workflow_graph.items()
645
+ if (
646
+ agent_name not in context.completed_tasks
647
+ and agent_name not in context.active_tasks
648
+ and context.can_execute(agent_name, node.dependencies)
649
+ )
650
+ }
651
+
652
+ def _agent_is_configured(self, agent: Union[BasicAgent, AbstractBot]) -> bool:
653
+ """Check if an agent is configured, using a lock to prevent race conditions."""
654
+ status = getattr(agent, "is_configured", False)
655
+ if callable(status):
656
+ with contextlib.suppress(TypeError):
657
+ status = status()
658
+ return bool(status)
659
+
660
+ async def _ensure_agent_ready(self, agent: Union[BasicAgent, AbstractBot]) -> None:
661
+ """Ensure the agent is configured before execution.
662
+
663
+ Agents require their underlying LLM client to be instantiated before
664
+ they can answer questions. Many examples explicitly call
665
+ ``await agent.configure()`` during setup, but it is easy to forget this
666
+ step when building complex flows programmatically. When configuration
667
+ is skipped the agent's ``_llm`` attribute remains ``None`` (or points to
668
+ an un-instantiated client class), leading to runtime errors such as
669
+ ``'NoneType' object does not support the asynchronous context manager
670
+ protocol`` when ``agent.ask`` is executed.
671
+
672
+ To make the crew orchestration more robust we lazily configure agents
673
+ the first time they are used. We guard the configuration with a
674
+ per-agent lock so that concurrent executions of the same agent do not
675
+ race to configure it multiple times.
676
+ """
677
+
678
+ if self._agent_is_configured(agent):
679
+ return
680
+
681
+ agent_id = id(agent)
682
+ lock = self._agent_locks.get(agent_id)
683
+ if lock is None:
684
+ lock = asyncio.Lock()
685
+ self._agent_locks[agent_id] = lock
686
+
687
+ async with lock:
688
+ if not self._agent_is_configured(agent):
689
+ try:
690
+ self.logger.info(
691
+ f"Auto-configuring agent '{agent.name}'"
692
+ )
693
+ await agent.configure()
694
+ self.logger.info(
695
+ f"Agent '{agent.name}' configured successfully"
696
+ )
697
+ except Exception as e:
698
+ self.logger.error(
699
+ f"Failed to configure agent '{agent.name}': {e}",
700
+ exc_info=True,
701
+ )
702
+ raise
703
+
704
+ async def _execute_agent(
705
+ self,
706
+ agent: Union[BasicAgent, AbstractBot],
707
+ query: str,
708
+ session_id: str,
709
+ user_id: str,
710
+ index: int,
711
+ context: AgentContext,
712
+ model: Optional[str] = None,
713
+ max_tokens: Optional[int] = None
714
+ ) -> Any:
715
+ """
716
+ Execute a single agent with proper rate limiting and error handling.
717
+
718
+ This internal method wraps the agent execution with a semaphore for
719
+ rate limiting and handles the different execution methods that agents
720
+ might implement.
721
+ """
722
+ await self._ensure_agent_ready(agent)
723
+ async with self.semaphore:
724
+ if hasattr(agent, 'ask'):
725
+ return await agent.ask(
726
+ question=query,
727
+ session_id=f"{session_id}_agent_{index}",
728
+ user_id=user_id,
729
+ use_conversation_history=True,
730
+ model=model,
731
+ max_tokens=max_tokens,
732
+ **context.shared_data
733
+ )
734
+ if hasattr(agent, 'conversation'):
735
+ return await agent.conversation(
736
+ question=query,
737
+ session_id=f"{session_id}_agent_{index}",
738
+ user_id=user_id,
739
+ use_conversation_history=True,
740
+ model=model,
741
+ max_tokens=max_tokens,
742
+ **context.shared_data
743
+ )
744
+ if hasattr(agent, 'invoke'):
745
+ return await agent.invoke(
746
+ question=query,
747
+ session_id=f"{session_id}_agent_{index}",
748
+ user_id=user_id,
749
+ use_conversation_history=False,
750
+ **context.shared_data
751
+ )
752
+ else:
753
+ raise ValueError(
754
+ f"Agent {agent.name} does not support conversation, ask, or invoke methods"
755
+ )
756
+
757
+ def _extract_result(self, response: Any) -> str:
758
+ """Extract result string from response."""
759
+ if isinstance(response, (AIMessage, AgentResponse)) or hasattr(
760
+ response, 'content'
761
+ ):
762
+ return response.content
763
+ else:
764
+ return str(response)
765
+
766
+ def _build_context_summary(self, context: AgentContext) -> str:
767
+ """Build summary of previous results."""
768
+ summaries = []
769
+ for agent_name, result in context.agent_results.items():
770
+ truncated = self._truncate_text(
771
+ result,
772
+ enabled=self.truncate_context_summary
773
+ )
774
+ summaries.append(f"- {agent_name}: {truncated}")
775
+ return "\n".join(summaries)
776
+
777
+ def _truncate_text(self, text: Optional[str], *, enabled: bool = True) -> str:
778
+ """Truncate text using configured length."""
779
+ if text is None or not enabled:
780
+ return text or ""
781
+
782
+ if self.truncation_length is None or self.truncation_length <= 0:
783
+ return text
784
+
785
+ if len(text) <= self.truncation_length:
786
+ return text
787
+
788
+ return f"{text[:self.truncation_length]}..."
789
+
790
+ def _build_loop_first_agent_prompt(
791
+ self,
792
+ *,
793
+ initial_task: str,
794
+ iteration_input: str,
795
+ iteration_number: int,
796
+ ) -> str:
797
+ """Compose the prompt for the first agent in each loop iteration."""
798
+ if iteration_number == 1:
799
+ return iteration_input
800
+
801
+ return (
802
+ f"Initial task: {initial_task}\n"
803
+ f"This is loop iteration {iteration_number}."
804
+ f"\nPrevious iteration output:\n{iteration_input}"
805
+ )
806
+
807
+ def _build_shared_state_summary(self, shared_state: Dict[str, Any]) -> str:
808
+ """Create a human-readable summary from the shared loop state."""
809
+ history = shared_state.get('history', [])
810
+ if not history:
811
+ return "No prior agent outputs."
812
+
813
+ lines = []
814
+ for entry in history[-10:]:
815
+ iteration = entry.get('iteration')
816
+ agent_id = entry.get('agent_id')
817
+ output = entry.get('output')
818
+ lines.append(
819
+ f"Iteration {iteration} - {agent_id}: {self._truncate_text(str(output))}"
820
+ )
821
+ return "\n".join(lines)
822
+
823
+ async def _evaluate_loop_condition(
824
+ self,
825
+ *,
826
+ condition: str,
827
+ shared_state: Dict[str, Any],
828
+ last_output: Optional[str],
829
+ iteration: int,
830
+ user_id: Optional[str],
831
+ session_id: Optional[str],
832
+ max_tokens: int,
833
+ temperature: float,
834
+ ) -> bool:
835
+ """Ask the configured LLM whether the loop condition has been satisfied."""
836
+ if not condition:
837
+ return False
838
+
839
+ history_summary = []
840
+ for entry in shared_state.get('history', []):
841
+ iteration_no = entry.get('iteration')
842
+ agent_id = entry.get('agent_id')
843
+ output = entry.get('output')
844
+ history_summary.append(
845
+ f"Iteration {iteration_no} - {agent_id}: {output}"
846
+ )
847
+
848
+ history_text = "\n".join(history_summary) or "(no outputs yet)"
849
+ prompt = (
850
+ "You are monitoring an autonomous team of agents running in a loop.\n"
851
+ f"Initial task: {shared_state.get('initial_task')}\n"
852
+ f"Stopping condition: {condition}\n"
853
+ f"Current iteration: {iteration}\n"
854
+ "Shared state history:\n"
855
+ f"{history_text}\n\n"
856
+ f"Most recent output: {last_output}\n\n"
857
+ "Decide if the loop should stop. Respond with a single word:"
858
+ " YES to stop the loop because the condition is met, or NO to"
859
+ " continue running."
860
+ )
861
+
862
+ try:
863
+ async with self._llm as client:
864
+ response = await client.ask(
865
+ prompt=prompt,
866
+ max_tokens=max_tokens,
867
+ temperature=temperature,
868
+ user_id=user_id,
869
+ session_id=f"{session_id}_loop_condition",
870
+ use_conversation_history=False
871
+ )
872
+ except Exception as exc:
873
+ self.logger.error(
874
+ f"Failed to evaluate loop condition with LLM: {exc}",
875
+ exc_info=True
876
+ )
877
+ return False
878
+
879
+ decision_text = self._extract_result(response).strip().lower()
880
+ if not decision_text:
881
+ return False
882
+
883
+ if decision_text.startswith('yes') or ' stop' in decision_text:
884
+ return True
885
+
886
+ return False
887
+
888
+ async def _synthesize_results(
889
+ self,
890
+ crew_result: CrewResult,
891
+ synthesis_prompt: Optional[str] = None,
892
+ user_id: Optional[str] = None,
893
+ session_id: Optional[str] = None,
894
+ max_tokens: int = 4096,
895
+ temperature: float = 0.1,
896
+ **kwargs
897
+ ) -> CrewResult:
898
+ """
899
+ Synthesize crew results using LLM if synthesis_prompt is provided.
900
+
901
+ This method takes the results from any execution mode and uses an LLM
902
+ to create a synthesized, coherent response.
903
+
904
+ Args:
905
+ crew_result: Result from run_sequential/parallel/flow
906
+ synthesis_prompt: Prompt for synthesis (if None, returns original result)
907
+ user_id: User identifier
908
+ session_id: Session identifier
909
+ max_tokens: Max tokens for synthesis
910
+ temperature: Temperature for synthesis
911
+ **kwargs: Additional LLM arguments
912
+
913
+ Returns:
914
+ CrewResult with synthesized output if synthesis was performed,
915
+ otherwise returns original crew_result
916
+ """
917
+ # If no synthesis prompt or no LLM, return original result
918
+ if not synthesis_prompt or not self._llm:
919
+ return crew_result
920
+
921
+ # Build context from agent results
922
+ context_parts = ["# Agent Execution Results\n"]
923
+
924
+ for i, (agent_id, result) in enumerate(zip(crew_result.agent_ids, crew_result.results)):
925
+ agent = self.agents.get(agent_id)
926
+ agent_name = agent.name if agent else agent_id
927
+
928
+ context_parts.extend([
929
+ f"\n## Agent {i+1}: {agent_name}\n",
930
+ str(result),
931
+ "\n---\n"
932
+ ])
933
+
934
+ research_context = "\n".join(context_parts)
935
+
936
+ # Build final prompt
937
+ final_prompt = f"""{research_context}
938
+
939
+ {synthesis_prompt}"""
940
+
941
+ # Call LLM for synthesis
942
+ self.logger.info("Synthesizing results with LLM")
943
+
944
+ try:
945
+ async with self._llm as client:
946
+ synthesis_response = await client.ask(
947
+ prompt=final_prompt,
948
+ max_tokens=max_tokens,
949
+ temperature=temperature,
950
+ user_id=user_id or 'crew_user',
951
+ session_id=session_id or str(uuid.uuid4()),
952
+ use_conversation_history=False,
953
+ **kwargs
954
+ )
955
+
956
+ # Extract synthesized content
957
+ synthesized_output = (
958
+ synthesis_response.content
959
+ if hasattr(synthesis_response, 'content')
960
+ else str(synthesis_response)
961
+ )
962
+
963
+ # Return updated CrewResult with synthesized output
964
+ return CrewResult(
965
+ output=synthesized_output, # Synthesized output
966
+ response=crew_result.response,
967
+ results=crew_result.results, # Keep original results
968
+ agent_ids=crew_result.agent_ids,
969
+ agents=crew_result.agents,
970
+ errors=crew_result.errors,
971
+ execution_log=crew_result.execution_log,
972
+ total_time=crew_result.total_time,
973
+ status=crew_result.status,
974
+ metadata={
975
+ **crew_result.metadata,
976
+ 'synthesized': True,
977
+ 'synthesis_prompt': synthesis_prompt,
978
+ 'original_output': crew_result.output
979
+ }
980
+ )
981
+
982
+ except Exception as e:
983
+ self.logger.error(f"Error during synthesis: {e}", exc_info=True)
984
+ # Return original result if synthesis fails
985
+ return crew_result
986
+
987
+ # -------------------------------
988
+ # Execution Methods (run_parallel, sequential, loop, flow)
989
+ # -------------------------------
990
+
991
+ async def run_sequential(
992
+ self,
993
+ query: str,
994
+ agent_sequence: List[str] = None,
995
+ user_id: str = None,
996
+ session_id: str = None,
997
+ pass_full_context: bool = True,
998
+ synthesis_prompt: Optional[str] = None,
999
+ max_tokens: int = 4096,
1000
+ temperature: float = 0.1,
1001
+ model: Optional[str] = 'gemini-2.5-pro',
1002
+ **kwargs
1003
+ ) -> CrewResult:
1004
+ """
1005
+ Execute agents in sequence (pipeline pattern).
1006
+
1007
+ In sequential execution, agents form a pipeline where each agent processes
1008
+ the output of the previous agent. This is like an assembly line where each
1009
+ station performs its specific task on the work-in-progress before passing
1010
+ it to the next station.
1011
+
1012
+ This mode is useful when:
1013
+ - Each agent refines or transforms the previous agent's output
1014
+ - You have a clear multi-stage process (e.g., research → summarize → format)
1015
+ - Later agents need the complete context of all previous work
1016
+
1017
+ Args:
1018
+ query: The initial query/task to start the pipeline
1019
+ agent_sequence: Ordered list of agent IDs to execute (None = all agents in order)
1020
+ user_id: User identifier for tracking and logging
1021
+ session_id: Session identifier for conversation history
1022
+ pass_full_context: If True, each agent sees all previous results;
1023
+ if False, each agent only sees the immediately previous result
1024
+ synthesis_prompt: Optional prompt to synthesize all results with LLM
1025
+ max_tokens: Max tokens for synthesis (if synthesis_prompt provided)
1026
+ temperature: Temperature for synthesis LLM
1027
+ **kwargs: Additional arguments passed to each agent
1028
+
1029
+ Returns:
1030
+ Dictionary containing:
1031
+ - final_result: The output from the last agent
1032
+ - execution_log: Detailed log of each agent's execution
1033
+ - agent_results: Dictionary mapping agent_id to its result
1034
+ - success: Whether all agents executed successfully
1035
+ """
1036
+ if not self.agents:
1037
+ return CrewResult(
1038
+ output='No agents in crew',
1039
+ execution_log=[],
1040
+ status='failed',
1041
+ total_time=0.0,
1042
+ metadata={'mode': 'sequential'}
1043
+ )
1044
+
1045
+ # Determine agent sequence
1046
+ if agent_sequence is None:
1047
+ agent_sequence = list(self.agents.keys())
1048
+
1049
+ # Setup session identifiers
1050
+ session_id = session_id or str(uuid.uuid4())
1051
+ user_id = user_id or 'crew_user'
1052
+
1053
+ # Initialize execution memory
1054
+ self.execution_memory = ExecutionMemory(
1055
+ original_query=query,
1056
+ embedding_model=self.embedding_model if self.enable_analysis else None,
1057
+ dimension=getattr(self, 'dimension', 384),
1058
+ index_type=getattr(self, 'index_type', 'Flat')
1059
+ )
1060
+ # Set execution order for sequential mode
1061
+ agent_sequence_ids = agent_sequence if agent_sequence is not None else list(self.agents.keys())
1062
+ self.execution_memory.execution_order = [
1063
+ agent_id for agent_id in agent_sequence_ids
1064
+ if agent_id in self.agents
1065
+ ]
1066
+
1067
+ # Initialize context to track execution across agents
1068
+ current_input = query
1069
+ crew_context = AgentContext(
1070
+ user_id=user_id,
1071
+ session_id=session_id,
1072
+ original_query=query,
1073
+ shared_data={
1074
+ **kwargs,
1075
+ 'execution_memory': self.execution_memory,
1076
+ },
1077
+ agent_results={}
1078
+ )
1079
+
1080
+ self.execution_log = []
1081
+ start_time = asyncio.get_event_loop().time()
1082
+
1083
+ responses: Dict[str, Any] = {}
1084
+ results: List[Any] = []
1085
+ agent_ids: List[str] = []
1086
+ agents_info: List[AgentExecutionInfo] = []
1087
+ errors: Dict[str, str] = {}
1088
+ success_count = 0
1089
+ failure_count = 0
1090
+
1091
+ # Execute agents in sequence
1092
+ for i, agent_id in enumerate(agent_sequence):
1093
+ if agent_id not in self.agents:
1094
+ self.logger.warning(f"Agent '{agent_id}' not found in crew, skipping")
1095
+ continue
1096
+
1097
+ agent = self.agents[agent_id]
1098
+
1099
+ try:
1100
+ agent_start_time = asyncio.get_event_loop().time()
1101
+
1102
+ # Prepare input based on context passing mode
1103
+ if i == 0:
1104
+ # First agent gets the initial query
1105
+ agent_input = query
1106
+ elif pass_full_context:
1107
+ # Pass full context of all previous agents' work
1108
+ context_summary = self._build_context_summary(crew_context)
1109
+ agent_input = f"""Original query: {query}
1110
+ Previous processing:
1111
+ {context_summary}
1112
+
1113
+ Current task: {current_input}"""
1114
+ else:
1115
+ # Pass only the immediately previous result
1116
+ agent_input = current_input
1117
+
1118
+ # Execute agent
1119
+ response = await self._execute_agent(
1120
+ agent, agent_input, session_id, user_id, i, crew_context, model, max_tokens
1121
+ )
1122
+
1123
+ result = self._extract_result(response)
1124
+ agent_end_time = asyncio.get_event_loop().time()
1125
+ execution_time = agent_end_time - agent_start_time
1126
+
1127
+ # Log execution details
1128
+ log_entry = {
1129
+ 'agent_id': agent_id,
1130
+ 'agent_name': agent.name,
1131
+ 'agent_index': i,
1132
+ 'input': self._truncate_text(agent_input),
1133
+ 'output': self._truncate_text(result),
1134
+ 'full_output': result,
1135
+ 'execution_time': execution_time,
1136
+ 'success': True
1137
+ }
1138
+ self.execution_log.append(log_entry)
1139
+
1140
+ # Store result and prepare for next agent
1141
+ crew_context.agent_results[agent_id] = result
1142
+ current_input = result
1143
+ responses[agent_id] = response
1144
+ agents_info.append(
1145
+ build_agent_metadata(
1146
+ agent_id,
1147
+ agent,
1148
+ response,
1149
+ result,
1150
+ execution_time,
1151
+ 'completed'
1152
+ )
1153
+ )
1154
+ results.append(result)
1155
+ agent_ids.append(agent_id)
1156
+
1157
+ # Save successful execution to memory
1158
+ agent_result = AgentResult(
1159
+ agent_id=agent_id,
1160
+ agent_name=agent.name,
1161
+ task=agent_input,
1162
+ result=result,
1163
+ metadata={
1164
+ 'success': True,
1165
+ 'mode': 'sequential',
1166
+ 'user_id': user_id,
1167
+ 'session_id': session_id,
1168
+ 'index': i,
1169
+ 'result_type': type(result).__name__
1170
+ },
1171
+ execution_time=execution_time
1172
+ )
1173
+ # Vectorize only if analysis enabled
1174
+ self.execution_memory.add_result(
1175
+ agent_result,
1176
+ vectorize=True
1177
+ )
1178
+
1179
+ success_count += 1
1180
+
1181
+ except Exception as e:
1182
+ error_msg = f"Error executing agent {agent_id}: {str(e)}"
1183
+ self.logger.error(error_msg, exc_info=True)
1184
+
1185
+ log_entry = {
1186
+ 'agent_id': agent_id,
1187
+ 'agent_name': agent.name,
1188
+ 'agent_index': i,
1189
+ 'input': current_input,
1190
+ 'output': error_msg,
1191
+ 'execution_time': 0,
1192
+ 'success': False,
1193
+ 'error': str(e)
1194
+ }
1195
+ self.execution_log.append(log_entry)
1196
+ current_input = error_msg
1197
+ errors[agent_id] = str(e)
1198
+ agents_info.append(
1199
+ build_agent_metadata(
1200
+ agent_id,
1201
+ agent,
1202
+ None,
1203
+ error_msg,
1204
+ 0.0,
1205
+ 'failed',
1206
+ str(e)
1207
+ )
1208
+ )
1209
+ results.append(error_msg)
1210
+ agent_ids.append(agent_id)
1211
+
1212
+ # Save failed execution to memory
1213
+ agent_result = AgentResult(
1214
+ agent_id=agent_id,
1215
+ agent_name=agent.name,
1216
+ task=current_input,
1217
+ result=error_msg,
1218
+ metadata={
1219
+ 'success': False,
1220
+ 'error': str(e),
1221
+ 'mode': 'sequential',
1222
+ 'user_id': user_id,
1223
+ 'session_id': session_id,
1224
+ 'index': i
1225
+ },
1226
+ execution_time=0.0
1227
+ )
1228
+ self.execution_memory.add_result(
1229
+ agent_result,
1230
+ vectorize=False
1231
+ )
1232
+
1233
+ failure_count += 1
1234
+
1235
+ end_time = asyncio.get_event_loop().time()
1236
+ total_time = end_time - start_time
1237
+ status = determine_run_status(success_count, failure_count)
1238
+
1239
+ result = CrewResult(
1240
+ output=current_input,
1241
+ response=responses,
1242
+ results=results,
1243
+ agent_ids=agent_ids,
1244
+ agents=agents_info,
1245
+ errors=errors,
1246
+ execution_log=self.execution_log,
1247
+ total_time=total_time,
1248
+ status=status,
1249
+ metadata={'mode': 'sequential', 'agent_sequence': agent_sequence}
1250
+ )
1251
+ if synthesis_prompt:
1252
+ result = await self._synthesize_results(
1253
+ crew_result=result,
1254
+ synthesis_prompt=synthesis_prompt,
1255
+ user_id=user_id,
1256
+ session_id=session_id,
1257
+ max_tokens=max_tokens,
1258
+ temperature=temperature,
1259
+ **kwargs
1260
+ )
1261
+
1262
+ return result
1263
+
1264
+ async def run_loop(
1265
+ self,
1266
+ initial_task: str,
1267
+ condition: str,
1268
+ agent_sequence: Optional[List[str]] = None,
1269
+ max_iterations: int = 2,
1270
+ user_id: str = None,
1271
+ session_id: str = None,
1272
+ pass_full_context: bool = True,
1273
+ synthesis_prompt: Optional[str] = None,
1274
+ max_tokens: int = 4096,
1275
+ temperature: float = 0.1,
1276
+ **kwargs
1277
+ ) -> CrewResult:
1278
+ """Execute agents iteratively until the stopping condition is met.
1279
+
1280
+ Loop execution reuses the final output from each iteration as the input
1281
+ for the next iteration. After every iteration the crew uses the
1282
+ configured LLM to decide if the provided condition has been satisfied.
1283
+
1284
+ Args:
1285
+ initial_task: The initial task/question that triggers the loop.
1286
+ condition: Natural language description of the success criteria.
1287
+ agent_sequence: Ordered list of agent IDs for each iteration
1288
+ (defaults to all registered agents in insertion order).
1289
+ max_iterations: Safety limit on number of iterations to run.
1290
+ user_id: Optional identifier propagated to agents and LLM.
1291
+ session_id: Optional identifier propagated to agents and LLM.
1292
+ pass_full_context: If True, downstream agents receive summaries of
1293
+ previous outputs from the current iteration.
1294
+ synthesis_prompt: Optional prompt to synthesize final results.
1295
+ max_tokens: Token limit when synthesizing or evaluating condition.
1296
+ temperature: Temperature used for synthesis or condition evaluation.
1297
+ **kwargs: Additional parameters forwarded to agent executions.
1298
+
1299
+ Returns:
1300
+ CrewResult describing the entire loop execution history.
1301
+
1302
+ Raises:
1303
+ ValueError: If no agents are registered or no LLM is configured to
1304
+ evaluate the stopping condition.
1305
+ """
1306
+ if not self.agents:
1307
+ return CrewResult(
1308
+ output='No agents in crew',
1309
+ execution_log=[],
1310
+ status='failed',
1311
+ total_time=0.0,
1312
+ metadata={'mode': 'loop', 'iterations': 0, 'condition_met': False}
1313
+ )
1314
+
1315
+ if not self._llm:
1316
+ # Let's create an LLM session if none is provided:
1317
+ self._llm = GoogleGenAIClient(
1318
+ model='gemini-2.5-pro',
1319
+ max_tokens=8192
1320
+ )
1321
+
1322
+ agent_sequence = agent_sequence or list(self.agents.keys())
1323
+ if not agent_sequence:
1324
+ return CrewResult(
1325
+ output='No agents configured for loop execution',
1326
+ execution_log=[],
1327
+ status='failed',
1328
+ total_time=0.0,
1329
+ metadata={'mode': 'loop', 'iterations': 0, 'condition_met': False}
1330
+ )
1331
+
1332
+ session_id = session_id or str(uuid.uuid4())
1333
+ user_id = user_id or 'crew_user'
1334
+
1335
+ # Initialize execution memory
1336
+ self.execution_memory = ExecutionMemory(
1337
+ original_query=initial_task,
1338
+ embedding_model=self.embedding_model if self.enable_analysis else None,
1339
+ dimension=getattr(self, 'dimension', 384),
1340
+ index_type=getattr(self, 'index_type', 'Flat')
1341
+ )
1342
+ # Set execution order for loop mode (agents in sequence, repeated per iteration)
1343
+ self.execution_memory.execution_order = [
1344
+ agent_id for agent_id in agent_sequence
1345
+ if agent_id in self.agents
1346
+ ]
1347
+
1348
+ self.execution_log = []
1349
+ overall_start = asyncio.get_event_loop().time()
1350
+
1351
+ shared_state: Dict[str, Any] = {
1352
+ 'initial_task': initial_task,
1353
+ 'history': [],
1354
+ 'iteration_outputs': [],
1355
+ 'last_output': initial_task,
1356
+ }
1357
+
1358
+ responses: Dict[str, Any] = {}
1359
+ results: List[Any] = []
1360
+ agent_ids: List[str] = []
1361
+ agents_info: List[AgentExecutionInfo] = []
1362
+ errors: Dict[str, str] = {}
1363
+ success_count = 0
1364
+ failure_count = 0
1365
+
1366
+ current_input = initial_task
1367
+ condition_met = False
1368
+
1369
+ iterations_run = 0
1370
+
1371
+ for iteration_index in range(max_iterations):
1372
+ self.logger.notice(
1373
+ f'Starting iteration {iteration_index + 1}/{max_iterations}'
1374
+ )
1375
+ iterations_run = iteration_index + 1
1376
+ crew_context = AgentContext(
1377
+ user_id=user_id,
1378
+ session_id=session_id,
1379
+ original_query=initial_task,
1380
+ shared_data={
1381
+ **kwargs,
1382
+ 'shared_state': shared_state,
1383
+ 'execution_memory': self.execution_memory,
1384
+ },
1385
+ agent_results={}
1386
+ )
1387
+
1388
+ iteration_success = True
1389
+ for agent_position, agent_id in enumerate(agent_sequence):
1390
+ if agent_id not in self.agents:
1391
+ self.logger.warning(
1392
+ f"Agent '{agent_id}' not found in crew during loop execution, skipping"
1393
+ )
1394
+ iteration_success = False
1395
+ execution_id = f"{agent_id}#iteration{iterations_run}"
1396
+ error_message = 'Agent not found'
1397
+ self.execution_log.append({
1398
+ 'agent_id': agent_id,
1399
+ 'execution_id': execution_id,
1400
+ 'iteration': iterations_run,
1401
+ 'agent_name': agent_id,
1402
+ 'agent_index': agent_position,
1403
+ 'input': self._truncate_text(current_input),
1404
+ 'output': error_message,
1405
+ 'execution_time': 0.0,
1406
+ 'success': False,
1407
+ 'error': error_message,
1408
+ })
1409
+ agents_info.append(
1410
+ build_agent_metadata(
1411
+ execution_id,
1412
+ None,
1413
+ None,
1414
+ None,
1415
+ 0.0,
1416
+ 'failed',
1417
+ error_message,
1418
+ )
1419
+ )
1420
+ results.append(error_message)
1421
+ agent_ids.append(execution_id)
1422
+ errors[execution_id] = error_message
1423
+
1424
+ # Save failed execution to memory
1425
+ agent_result = AgentResult(
1426
+ agent_id=execution_id,
1427
+ agent_name=agent_id,
1428
+ task=current_input,
1429
+ result=error_message,
1430
+ metadata={
1431
+ 'success': False,
1432
+ 'error': error_message,
1433
+ 'mode': 'loop',
1434
+ 'iteration': iterations_run,
1435
+ 'user_id': user_id,
1436
+ 'session_id': session_id,
1437
+ 'agent_position': agent_position
1438
+ },
1439
+ execution_time=0.0
1440
+ )
1441
+ self.execution_memory.add_result(
1442
+ agent_result,
1443
+ vectorize=False
1444
+ )
1445
+
1446
+ failure_count += 1
1447
+ continue
1448
+
1449
+ agent = self.agents[agent_id]
1450
+ await self._ensure_agent_ready(agent)
1451
+
1452
+ if agent_position == 0:
1453
+ agent_input = self._build_loop_first_agent_prompt(
1454
+ initial_task=initial_task,
1455
+ iteration_input=current_input,
1456
+ iteration_number=iterations_run,
1457
+ )
1458
+ elif pass_full_context:
1459
+ context_summary = self._build_context_summary(crew_context)
1460
+ shared_summary = self._build_shared_state_summary(shared_state)
1461
+ agent_input = (
1462
+ f"Original task: {initial_task}\n"
1463
+ f"Loop iteration: {iterations_run}\n"
1464
+ f"Shared state so far:\n{shared_summary}\n\n"
1465
+ f"Previous results this iteration:\n{context_summary}\n\n"
1466
+ f"Continue the work based on the latest result: {current_input}"
1467
+ ).strip()
1468
+ else:
1469
+ agent_input = current_input
1470
+
1471
+ try:
1472
+ agent_start = asyncio.get_event_loop().time()
1473
+ response = await self._execute_agent(
1474
+ agent,
1475
+ agent_input,
1476
+ session_id,
1477
+ user_id,
1478
+ agent_position,
1479
+ crew_context
1480
+ )
1481
+
1482
+ result = self._extract_result(response)
1483
+ agent_end = asyncio.get_event_loop().time()
1484
+ execution_time = agent_end - agent_start
1485
+
1486
+ execution_id = f"{agent_id}#iteration{iterations_run}"
1487
+ log_entry = {
1488
+ 'agent_id': agent_id,
1489
+ 'execution_id': execution_id,
1490
+ 'iteration': iterations_run,
1491
+ 'agent_name': agent.name,
1492
+ 'agent_index': agent_position,
1493
+ 'input': self._truncate_text(agent_input),
1494
+ 'output': self._truncate_text(result),
1495
+ 'full_output': result,
1496
+ 'execution_time': execution_time,
1497
+ 'success': True,
1498
+ }
1499
+ self.execution_log.append(log_entry)
1500
+
1501
+ crew_context.agent_results[agent_id] = result
1502
+ current_input = result
1503
+ responses[execution_id] = response
1504
+ agents_info.append(
1505
+ build_agent_metadata(
1506
+ execution_id,
1507
+ agent,
1508
+ response,
1509
+ result,
1510
+ execution_time,
1511
+ 'completed'
1512
+ )
1513
+ )
1514
+ results.append(result)
1515
+ agent_ids.append(execution_id)
1516
+ shared_state['history'].append({
1517
+ 'iteration': iterations_run,
1518
+ 'agent_id': agent_id,
1519
+ 'output': result,
1520
+ })
1521
+
1522
+ # Save successful execution to memory
1523
+ agent_result = AgentResult(
1524
+ agent_id=execution_id,
1525
+ agent_name=agent.name,
1526
+ task=agent_input,
1527
+ result=result,
1528
+ metadata={
1529
+ 'success': True,
1530
+ 'mode': 'loop',
1531
+ 'iteration': iterations_run,
1532
+ 'user_id': user_id,
1533
+ 'session_id': session_id,
1534
+ 'agent_position': agent_position,
1535
+ 'result_type': type(result).__name__
1536
+ },
1537
+ execution_time=execution_time
1538
+ )
1539
+ # Vectorize only if analysis enabled
1540
+ self.execution_memory.add_result(
1541
+ agent_result,
1542
+ vectorize=True
1543
+ )
1544
+
1545
+ success_count += 1
1546
+ except Exception as exc:
1547
+ execution_id = f"{agent_id}#iteration{iterations_run}"
1548
+ error_msg = f"Error executing agent {agent_id}: {exc}"
1549
+ self.logger.error(error_msg, exc_info=True)
1550
+ self.execution_log.append({
1551
+ 'agent_id': agent_id,
1552
+ 'execution_id': execution_id,
1553
+ 'iteration': iterations_run,
1554
+ 'agent_name': agent.name,
1555
+ 'agent_index': agent_position,
1556
+ 'input': self._truncate_text(agent_input),
1557
+ 'output': error_msg,
1558
+ 'execution_time': 0.0,
1559
+ 'success': False,
1560
+ 'error': str(exc)
1561
+ })
1562
+ agents_info.append(
1563
+ build_agent_metadata(
1564
+ execution_id,
1565
+ agent,
1566
+ None,
1567
+ None,
1568
+ 0.0,
1569
+ 'failed',
1570
+ str(exc)
1571
+ )
1572
+ )
1573
+ results.append(error_msg)
1574
+ agent_ids.append(execution_id)
1575
+ errors[execution_id] = str(exc)
1576
+
1577
+ # Save failed execution to memory
1578
+ agent_result = AgentResult(
1579
+ agent_id=execution_id,
1580
+ agent_name=agent.name,
1581
+ task=agent_input,
1582
+ result=error_msg,
1583
+ metadata={
1584
+ 'success': False,
1585
+ 'error': str(exc),
1586
+ 'mode': 'loop',
1587
+ 'iteration': iterations_run,
1588
+ 'user_id': user_id,
1589
+ 'session_id': session_id,
1590
+ 'agent_position': agent_position
1591
+ },
1592
+ execution_time=0.0
1593
+ )
1594
+ self.execution_memory.add_result(
1595
+ agent_result,
1596
+ vectorize=False
1597
+ )
1598
+
1599
+ failure_count += 1
1600
+ iteration_success = False
1601
+ current_input = error_msg
1602
+
1603
+ shared_state['last_output'] = current_input
1604
+ shared_state['iteration_outputs'].append(current_input)
1605
+ if condition:
1606
+ condition_met = await self._evaluate_loop_condition(
1607
+ condition=condition,
1608
+ shared_state=shared_state,
1609
+ last_output=current_input,
1610
+ iteration=iterations_run,
1611
+ user_id=user_id,
1612
+ session_id=session_id,
1613
+ max_tokens=max_tokens,
1614
+ temperature=temperature,
1615
+ )
1616
+ else:
1617
+ condition_met = False
1618
+
1619
+ if condition_met:
1620
+ break
1621
+
1622
+ if not iteration_success:
1623
+ self.logger.debug(
1624
+ f"Loop iteration {iterations_run} completed with errors; continuing until condition is met or max iterations reached"
1625
+ )
1626
+
1627
+ current_input = shared_state['last_output']
1628
+
1629
+ overall_end = asyncio.get_event_loop().time()
1630
+
1631
+ last_output = shared_state['last_output'] if shared_state['iteration_outputs'] else initial_task
1632
+ status = determine_run_status(success_count, failure_count)
1633
+
1634
+ result = CrewResult(
1635
+ output=last_output,
1636
+ response=responses,
1637
+ results=results,
1638
+ agent_ids=agent_ids,
1639
+ agents=agents_info,
1640
+ errors=errors,
1641
+ execution_log=self.execution_log,
1642
+ total_time=overall_end - overall_start,
1643
+ status=status,
1644
+ metadata={
1645
+ 'mode': 'loop',
1646
+ 'iterations': iterations_run,
1647
+ 'max_iterations': max_iterations,
1648
+ 'condition': condition,
1649
+ 'condition_met': condition_met,
1650
+ 'shared_state': shared_state,
1651
+ }
1652
+ )
1653
+
1654
+ if synthesis_prompt:
1655
+ result = await self._synthesize_results(
1656
+ crew_result=result,
1657
+ synthesis_prompt=synthesis_prompt,
1658
+ user_id=user_id,
1659
+ session_id=session_id,
1660
+ max_tokens=max_tokens,
1661
+ temperature=temperature,
1662
+ **kwargs
1663
+ )
1664
+
1665
+ return result
1666
+
1667
+ async def run_parallel(
1668
+ self,
1669
+ tasks: List[Dict[str, Any]],
1670
+ all_results: Optional[bool] = False,
1671
+ user_id: str = None,
1672
+ session_id: str = None,
1673
+ generate_summary: bool = True,
1674
+ synthesis_prompt: Optional[str] = None,
1675
+ max_tokens: int = 4096,
1676
+ temperature: float = 0.1,
1677
+ **kwargs
1678
+ ) -> CrewResult:
1679
+ """
1680
+ Execute multiple agents in parallel using asyncio.gather().
1681
+
1682
+ In parallel execution, all agents run simultaneously on their respective tasks.
1683
+ This is like having multiple independent workers each handling their own job,
1684
+ all working at the same time without waiting for each other.
1685
+
1686
+ This mode is useful when:
1687
+ - You have multiple independent analyses to perform
1688
+ - Agents don't depend on each other's results
1689
+ - You want to maximize throughput and minimize total execution time
1690
+ - Each agent is working on a different aspect of the same problem
1691
+
1692
+ Args:
1693
+ tasks: List of task dictionaries, each containing:
1694
+ - 'agent_id': ID of the agent to execute
1695
+ - 'query': The query/task for that agent
1696
+ user_id: User identifier for tracking
1697
+ session_id: Session identifier
1698
+ synthesis_prompt: Optional prompt to synthesize all results with LLM
1699
+ max_tokens: Max tokens for synthesis (if synthesis_prompt provided)
1700
+ temperature: Temperature for synthesis LLM
1701
+ **kwargs: Additional arguments passed to all agents
1702
+
1703
+ Returns:
1704
+ CrewResult: Standardized execution payload containing outputs,
1705
+ metadata, and execution logs.
1706
+ """
1707
+ session_id = session_id or str(uuid.uuid4())
1708
+ user_id = user_id or 'crew_user'
1709
+ original_query = tasks[0]['query'] if tasks else ""
1710
+
1711
+ # initialize execution log
1712
+ self.execution_memory = ExecutionMemory(
1713
+ original_query=original_query,
1714
+ embedding_model=self.embedding_model if self.enable_analysis else None,
1715
+ dimension=getattr(self, 'dimension', 384),
1716
+ index_type=getattr(self, 'index_type', 'Flat')
1717
+ )
1718
+ # Set execution order for parallel mode (all agents at same level)
1719
+ self.execution_memory.execution_order = [
1720
+ task.get('agent_id') for task in tasks
1721
+ if task.get('agent_id') in self.agents
1722
+ ]
1723
+
1724
+ crew_context = AgentContext(
1725
+ user_id=user_id,
1726
+ session_id=session_id,
1727
+ original_query=original_query,
1728
+ shared_data={
1729
+ **kwargs,
1730
+ 'execution_memory': self.execution_memory,
1731
+ },
1732
+ agent_results={}
1733
+ )
1734
+
1735
+ self.execution_log = []
1736
+ responses: Dict[str, Any] = {}
1737
+ results_payload: List[Any] = []
1738
+ agent_ids: List[str] = []
1739
+ agents_info: List[AgentExecutionInfo] = []
1740
+ errors: Dict[str, str] = {}
1741
+ success_count = 0
1742
+ failure_count = 0
1743
+ last_output = None
1744
+
1745
+ # Create async tasks for parallel execution
1746
+ async_tasks = []
1747
+ task_metadata = []
1748
+
1749
+ for i, task in enumerate(tasks):
1750
+ agent_id = task.get('agent_id')
1751
+ query = task.get('query')
1752
+
1753
+ if agent_id not in self.agents:
1754
+ self.logger.warning(f"Agent '{agent_id}' not found, skipping")
1755
+ continue
1756
+
1757
+ agent = self.agents[agent_id]
1758
+ task_metadata.append({
1759
+ 'agent_id': agent_id,
1760
+ 'agent_name': agent.name,
1761
+ 'query': query,
1762
+ 'index': i
1763
+ })
1764
+ async_tasks.append(
1765
+ self._execute_agent(
1766
+ agent, query, session_id, user_id, i, crew_context
1767
+ )
1768
+ )
1769
+
1770
+ if not async_tasks:
1771
+ return CrewResult(
1772
+ output=None,
1773
+ status='failed',
1774
+ errors={'__crew__': 'No valid tasks to execute'},
1775
+ metadata={'mode': 'parallel'}
1776
+ )
1777
+
1778
+ # Execute all tasks in parallel using asyncio.gather()
1779
+ # This is the key to parallel execution - all coroutines run concurrently
1780
+ start_time = asyncio.get_event_loop().time()
1781
+ results = await asyncio.gather(*async_tasks, return_exceptions=True)
1782
+ end_time = asyncio.get_event_loop().time()
1783
+
1784
+ # Process results from all parallel executions
1785
+ parallel_results = {}
1786
+
1787
+ for i, (result, metadata) in enumerate(zip(results, task_metadata)):
1788
+ agent_id = metadata['agent_id']
1789
+ agent_name = metadata['agent_name']
1790
+ agent_ids.append(agent_id)
1791
+ _query = metadata['query']
1792
+ execution_time = end_time - start_time # Total parallel time
1793
+
1794
+ if isinstance(result, Exception):
1795
+ # Handle exceptions from failed agents
1796
+ error_msg = f"Error: {str(result)}"
1797
+ parallel_results[agent_id] = error_msg
1798
+ errors[agent_id] = str(result)
1799
+ # Save failed execution to memory
1800
+ agent_result = AgentResult(
1801
+ agent_id=agent_id,
1802
+ agent_name=agent_name,
1803
+ task=_query,
1804
+ result=error_msg,
1805
+ metadata={
1806
+ 'success': False,
1807
+ 'error': str(result),
1808
+ 'mode': 'parallel',
1809
+ 'user_id': user_id,
1810
+ 'session_id': session_id
1811
+ },
1812
+ execution_time=0.0
1813
+ )
1814
+ self.execution_memory.add_result(
1815
+ agent_result,
1816
+ vectorize=False
1817
+ )
1818
+ log_entry = {
1819
+ 'agent_id': agent_id,
1820
+ 'agent_name': agent_name,
1821
+ 'agent_index': i,
1822
+ 'input': _query,
1823
+ 'output': error_msg,
1824
+ 'execution_time': 0,
1825
+ 'success': False,
1826
+ 'error': str(result)
1827
+ }
1828
+ agents_info.append(
1829
+ build_agent_metadata(
1830
+ agent_id,
1831
+ self.agents.get(agent_id),
1832
+ None,
1833
+ error_msg,
1834
+ 0.0,
1835
+ 'failed',
1836
+ str(result)
1837
+ )
1838
+ )
1839
+ results_payload.append(error_msg)
1840
+
1841
+ responses[agent_id] = None
1842
+ failure_count += 1
1843
+ else:
1844
+ # Handle successful agent execution
1845
+ extracted_result = self._extract_result(result)
1846
+ parallel_results[agent_id] = extracted_result
1847
+ crew_context.agent_results[agent_id] = extracted_result
1848
+ _query = metadata['query']
1849
+
1850
+ # Save successful execution to memory
1851
+ agent_result = AgentResult(
1852
+ agent_id=agent_id,
1853
+ agent_name=agent_name,
1854
+ task=_query,
1855
+ result=extracted_result,
1856
+ metadata={
1857
+ 'success': True,
1858
+ 'mode': 'parallel',
1859
+ 'user_id': user_id,
1860
+ 'session_id': session_id,
1861
+ 'index': i,
1862
+ 'result_type': type(extracted_result).__name__
1863
+ },
1864
+ execution_time=execution_time
1865
+ )
1866
+ # Vectorize only if analysis enabled (handled internally by ExecutionMemory)
1867
+ self.execution_memory.add_result(
1868
+ agent_result,
1869
+ vectorize=True
1870
+ )
1871
+
1872
+ log_entry = {
1873
+ 'agent_id': agent_id,
1874
+ 'agent_name': agent_name,
1875
+ 'agent_index': i,
1876
+ 'input': _query,
1877
+ 'output': self._truncate_text(extracted_result),
1878
+ 'full_output': extracted_result,
1879
+ 'execution_time': end_time - start_time, # Total parallel time
1880
+ 'success': True
1881
+ }
1882
+ agents_info.append(
1883
+ build_agent_metadata(
1884
+ agent_id,
1885
+ self.agents.get(agent_id),
1886
+ result,
1887
+ extracted_result,
1888
+ end_time - start_time,
1889
+ 'completed'
1890
+ )
1891
+ )
1892
+ results_payload.append(extracted_result)
1893
+ responses[agent_id] = result
1894
+ last_output = extracted_result
1895
+ success_count += 1
1896
+
1897
+ self.execution_log.append(log_entry)
1898
+ status = determine_run_status(success_count, failure_count)
1899
+
1900
+ output = results_payload if all_results else last_output
1901
+
1902
+ result = CrewResult(
1903
+ output=output,
1904
+ response=responses,
1905
+ results=results_payload,
1906
+ agent_ids=agent_ids,
1907
+ agents=agents_info,
1908
+ errors=errors,
1909
+ execution_log=self.execution_log,
1910
+ total_time=end_time - start_time,
1911
+ status=status,
1912
+ metadata={
1913
+ 'mode': 'parallel',
1914
+ 'task_count': len(agent_ids),
1915
+ 'requested_tasks': len(tasks),
1916
+ }
1917
+ )
1918
+ if generate_summary and self._llm and synthesis_prompt:
1919
+ result = await self._synthesize_results(
1920
+ crew_result=result,
1921
+ synthesis_prompt=synthesis_prompt,
1922
+ user_id=user_id,
1923
+ session_id=session_id,
1924
+ max_tokens=max_tokens,
1925
+ temperature=temperature,
1926
+ **kwargs
1927
+ )
1928
+
1929
+ return result
1930
+
1931
+ async def run_flow(
1932
+ self,
1933
+ initial_task: str,
1934
+ max_iterations: int = 100,
1935
+ on_agent_complete: Optional[Callable] = None,
1936
+ synthesis_prompt: Optional[str] = None,
1937
+ user_id: str = None,
1938
+ session_id: str = None,
1939
+ max_tokens: int = 4096,
1940
+ temperature: float = 0.1,
1941
+ **kwargs
1942
+ ) -> CrewResult:
1943
+ """
1944
+ Execute the workflow using the defined task flows (DAG-based execution).
1945
+
1946
+ Flow-based execution is the most sophisticated mode. It executes agents based
1947
+ on a Directed Acyclic Graph (DAG) of dependencies, automatically parallelizing
1948
+ independent agents while respecting dependencies.
1949
+
1950
+ Think of this like a project management system where:
1951
+ - Some tasks can start immediately (no dependencies)
1952
+ - Some tasks must wait for specific other tasks to complete (dependencies)
1953
+ - When multiple tasks can run, they execute in parallel (optimization)
1954
+ - The workflow completes when all final tasks are done
1955
+
1956
+ This mode is useful when:
1957
+ - You have complex workflows with both sequential and parallel elements
1958
+ - Different agents depend on specific other agents' outputs
1959
+ - You want automatic parallelization wherever possible
1960
+ - Your workflow follows patterns like:
1961
+ * Writer → [Editor1, Editor2] → Final Reviewer
1962
+ * [Research1, Research2, Research3] → Synthesizer
1963
+ * Complex multi-stage pipelines with branching and merging
1964
+
1965
+ The workflow execution follows these steps:
1966
+ 1. Start with agents that have no dependencies (initial agents)
1967
+ 2. Execute ready agents in parallel when possible
1968
+ 3. Wait for dependencies before executing dependent agents
1969
+ 4. Continue until all final agents complete
1970
+ 5. Handle errors and detect stuck workflows
1971
+
1972
+ Args:
1973
+ initial_task: The initial task/prompt to start the workflow
1974
+ max_iterations: Maximum number of execution rounds (safety limit to prevent infinite loops)
1975
+ synthesis_prompt: Optional prompt to synthesize all results with LLM
1976
+ user_id: User identifier (used for synthesis)
1977
+ session_id: Session identifier (used for synthesis)
1978
+ max_tokens: Max tokens for synthesis
1979
+ temperature: Temperature for synthesis LLM
1980
+ on_agent_complete: Optional callback function called when an agent completes.
1981
+ Signature: async def callback(agent_name: str, result: Any, context: FlowContext)
1982
+
1983
+ Returns:
1984
+ CrewResult: Standardized execution payload containing outputs,
1985
+ metadata, and execution logs.
1986
+
1987
+ Raises:
1988
+ ValueError: If no initial agent is found (no workflow defined)
1989
+ RuntimeError: If workflow gets stuck or exceeds max_iterations
1990
+ """
1991
+ # Setup session identifiers
1992
+ session_id = session_id or str(uuid.uuid4())
1993
+ user_id = user_id or 'crew_user'
1994
+
1995
+ # Initialize execution memory
1996
+ self.execution_memory = ExecutionMemory(
1997
+ original_query=initial_task,
1998
+ embedding_model=self.embedding_model if self.enable_analysis else None,
1999
+ dimension=getattr(self, 'dimension', 384),
2000
+ index_type=getattr(self, 'index_type', 'Flat')
2001
+ )
2002
+ # Set execution order for flow mode (will be updated as agents complete)
2003
+ self.execution_memory.execution_order = []
2004
+
2005
+ # Initialize execution context to track the workflow state
2006
+ context = FlowContext(initial_task=initial_task)
2007
+ # Store execution metadata in context for use in _execute_parallel_agents
2008
+ context.execution_memory = self.execution_memory
2009
+ context.user_id = user_id
2010
+ context.session_id = session_id
2011
+
2012
+ self.execution_log = []
2013
+ start_time = asyncio.get_event_loop().time()
2014
+
2015
+ # Validate workflow before starting
2016
+ if not self.initial_agent:
2017
+ raise ValueError(
2018
+ "No initial agent found. Define task flows first using task_flow()."
2019
+ )
2020
+
2021
+ iteration = 0
2022
+ while iteration < max_iterations:
2023
+ # Find agents ready to execute (all dependencies satisfied)
2024
+ ready_agents = await self._get_ready_agents(context)
2025
+
2026
+ if not ready_agents:
2027
+ # Check if we're done - all final agents have completed
2028
+ if self.final_agents.issubset(context.completed_tasks):
2029
+ break
2030
+
2031
+ # Check if we're stuck - no ready agents but also no active agents
2032
+ if not context.active_tasks:
2033
+ raise RuntimeError(
2034
+ f"Workflow is stuck. Completed: {context.completed_tasks}, "
2035
+ f"Expected final: {self.final_agents}. "
2036
+ f"This usually indicates a circular dependency or missing agents."
2037
+ )
2038
+
2039
+ # Wait for active tasks to complete
2040
+ await asyncio.sleep(0.1)
2041
+ continue
2042
+
2043
+ # Execute all ready agents in parallel
2044
+ # This is where the automatic parallelization happens
2045
+ results = await self._execute_parallel_agents(ready_agents, context)
2046
+
2047
+ # Call callback for each completed agent if provided
2048
+ if on_agent_complete:
2049
+ for agent_name, result in results.items():
2050
+ await on_agent_complete(agent_name, result, context)
2051
+
2052
+ iteration += 1
2053
+
2054
+ if iteration >= max_iterations:
2055
+ raise RuntimeError(
2056
+ f"Workflow exceeded max iterations ({max_iterations}). "
2057
+ f"Completed: {context.completed_tasks}, "
2058
+ f"Expected: {self.final_agents}"
2059
+ )
2060
+
2061
+ end_time = asyncio.get_event_loop().time()
2062
+ error_messages: Dict[str, str] = {
2063
+ agent: str(err)
2064
+ for agent, err in context.errors.items()
2065
+ }
2066
+ completion_order = context.completion_order or list(context.completed_tasks)
2067
+
2068
+ results_payload = [
2069
+ context.results.get(agent_name)
2070
+ for agent_name in completion_order
2071
+ ]
2072
+
2073
+ agents_info: List[AgentExecutionInfo] = []
2074
+ for agent_name in completion_order:
2075
+ metadata = context.agent_metadata.get(agent_name)
2076
+ if metadata:
2077
+ agents_info.append(metadata)
2078
+
2079
+ success_count = sum(
2080
+ info.status == 'completed' for info in agents_info
2081
+ )
2082
+ failure_count = sum(info.status == 'failed' for info in agents_info)
2083
+
2084
+ for agent_name, error in error_messages.items():
2085
+ if agent_name not in completion_order:
2086
+ node = self.workflow_graph.get(agent_name)
2087
+ agent_obj = node.agent if node else None
2088
+ metadata = build_agent_metadata(
2089
+ agent_name,
2090
+ agent_obj,
2091
+ context.responses.get(agent_name),
2092
+ context.results.get(agent_name),
2093
+ 0.0,
2094
+ 'failed',
2095
+ error
2096
+ )
2097
+ agents_info.append(metadata)
2098
+ failure_count += 1
2099
+
2100
+ last_output = None
2101
+ if completion_order:
2102
+ last_agent = completion_order[-1]
2103
+ last_output = context.results.get(last_agent)
2104
+
2105
+ status = determine_run_status(success_count, failure_count)
2106
+
2107
+ result = CrewResult(
2108
+ output=last_output,
2109
+ response=context.responses,
2110
+ results=results_payload,
2111
+ agent_ids=completion_order,
2112
+ agents=agents_info,
2113
+ errors=error_messages,
2114
+ execution_log=self.execution_log,
2115
+ total_time=end_time - start_time,
2116
+ status=status,
2117
+ metadata={'mode': 'flow', 'iterations': iteration}
2118
+ )
2119
+ if synthesis_prompt:
2120
+ result = await self._synthesize_results(
2121
+ crew_result=result,
2122
+ synthesis_prompt=synthesis_prompt,
2123
+ user_id=user_id,
2124
+ session_id=session_id,
2125
+ max_tokens=max_tokens,
2126
+ temperature=temperature,
2127
+ **kwargs
2128
+ )
2129
+
2130
+ return result
2131
+
2132
+ def visualize_workflow(self) -> str:
2133
+ """
2134
+ Generate a text representation of the workflow graph.
2135
+
2136
+ This is useful for debugging and understanding the structure of your
2137
+ workflow before executing it. It shows each agent, what it depends on,
2138
+ and what depends on it.
2139
+
2140
+ Could be extended to use graphviz for visual diagrams.
2141
+ """
2142
+ lines = ["Workflow Graph:", "=" * 50]
2143
+
2144
+ for agent_name, node in self.workflow_graph.items():
2145
+ deps = f"depends on: {node.dependencies}" if node.dependencies else "initial"
2146
+ successors = f"→ {node.successors}" if node.successors else "(final)"
2147
+ lines.append(f" {agent_name}: {deps} {successors}")
2148
+
2149
+ return "\n".join(lines)
2150
+
2151
+ async def validate_workflow(self) -> bool:
2152
+ """
2153
+ Validate the workflow for common issues.
2154
+
2155
+ This method checks for:
2156
+ - Circular dependencies (agent A depends on B, B depends on A)
2157
+ - Disconnected agents (agents not reachable from initial agents)
2158
+
2159
+ It's recommended to call this before executing run_flow() to catch
2160
+ configuration errors early.
2161
+
2162
+ Raises:
2163
+ ValueError: If circular dependency is detected
2164
+
2165
+ Returns:
2166
+ True if workflow is valid
2167
+ """
2168
+ def has_cycle(start: str, visited: Set[str], rec_stack: Set[str]) -> bool:
2169
+ """
2170
+ Detect cycles using depth-first search with recursion stack.
2171
+
2172
+ This is a classic graph algorithm for detecting cycles in directed graphs.
2173
+ We track both visited nodes (to avoid redundant work) and the current
2174
+ recursion stack (to detect back edges that indicate cycles).
2175
+ """
2176
+ visited.add(start)
2177
+ rec_stack.add(start)
2178
+
2179
+ node = self.workflow_graph[start]
2180
+ for successor in node.successors:
2181
+ if successor not in visited:
2182
+ if has_cycle(successor, visited, rec_stack):
2183
+ return True
2184
+ elif successor in rec_stack:
2185
+ # Found a back edge - this is a cycle
2186
+ return True
2187
+
2188
+ rec_stack.remove(start)
2189
+ return False
2190
+
2191
+ visited = set()
2192
+ for agent_name in self.workflow_graph:
2193
+ if agent_name not in visited and has_cycle(agent_name, visited, set()):
2194
+ raise ValueError(
2195
+ f"Circular dependency detected involving {agent_name}. "
2196
+ f"Circular dependencies create infinite loops and are not allowed."
2197
+ )
2198
+
2199
+ return True
2200
+
2201
+ def get_execution_summary(self) -> Dict[str, Any]:
2202
+ """
2203
+ Get a summary of the last execution.
2204
+
2205
+ This provides high-level metrics about the execution, useful for
2206
+ monitoring and optimization.
2207
+ """
2208
+ if not self.execution_log:
2209
+ return {'message': 'No executions yet'}
2210
+
2211
+ total_time = sum(log['execution_time'] for log in self.execution_log)
2212
+ success_count = sum(bool(log['success']) for log in self.execution_log)
2213
+
2214
+ return {
2215
+ 'total_agents': len(self.agents),
2216
+ 'executed_agents': len(self.execution_log),
2217
+ 'successful_agents': success_count,
2218
+ 'total_execution_time': total_time,
2219
+ 'average_time_per_agent': (
2220
+ total_time / len(self.execution_log) if self.execution_log else 0
2221
+ )
2222
+ }
2223
+
2224
+ async def run(
2225
+ self,
2226
+ task: Union[str, Dict[str, str]],
2227
+ synthesis_prompt: Optional[str] = None,
2228
+ user_id: str = None,
2229
+ session_id: str = None,
2230
+ max_tokens: int = 4096,
2231
+ temperature: float = 0.1,
2232
+ **kwargs
2233
+ ) -> AIMessage:
2234
+ """
2235
+ Execute all agents in parallel with a task, then synthesize results with LLM.
2236
+
2237
+ This is a simplified interface for the common pattern:
2238
+ 1. Multiple agents research/gather information in parallel
2239
+ 2. LLM synthesizes all findings into a coherent response
2240
+
2241
+ Args:
2242
+ task: The task/prompt for agents. Can be:
2243
+ - str: Same prompt for all agents
2244
+ - dict: Custom prompt per agent {agent_id: prompt}
2245
+ synthesis_prompt: Prompt for LLM to synthesize results.
2246
+ If None, uses default synthesis prompt.
2247
+ Aliases: conclusion, summary_prompt, final_prompt
2248
+ user_id: User identifier
2249
+ session_id: Session identifier
2250
+ max_tokens: Max tokens for synthesis LLM
2251
+ temperature: Temperature for synthesis LLM
2252
+ **kwargs: Additional arguments passed to LLM
2253
+
2254
+ Returns:
2255
+ AIMessage: Synthesized response from the LLM
2256
+
2257
+ Example:
2258
+ >>> crew = AgentCrew(
2259
+ ... agents=[info_agent, price_agent, review_agent],
2260
+ ... llm=ClaudeClient()
2261
+ ... )
2262
+ >>> result = await crew.task(
2263
+ ... task="Research iPhone 15 Pro",
2264
+ ... synthesis_prompt="Create an executive summary"
2265
+ ... )
2266
+ >>> print(result.content)
2267
+
2268
+ Raises:
2269
+ ValueError: If no LLM is configured for synthesis
2270
+ """
2271
+ if not self._llm:
2272
+ raise ValueError(
2273
+ "No LLM configured for synthesis. "
2274
+ "Pass llm parameter to AgentCrew constructor: "
2275
+ "AgentCrew(agents=[...], llm=ClaudeClient())"
2276
+ )
2277
+
2278
+ if not self.agents:
2279
+ raise ValueError(
2280
+ "No agents in crew. Add agents first."
2281
+ )
2282
+
2283
+ # Setup session
2284
+ session_id = session_id or str(uuid.uuid4())
2285
+ user_id = user_id or 'crew_user'
2286
+
2287
+ # Prepare tasks for each agent
2288
+ tasks_list = []
2289
+
2290
+ if isinstance(task, str):
2291
+ # Same task for all agents
2292
+ tasks_list.extend(
2293
+ {'agent_id': agent_id, 'query': task}
2294
+ for agent_id, _ in self.agents.items()
2295
+ )
2296
+ elif isinstance(task, dict):
2297
+ # Custom task per agent
2298
+ for agent_id, agent_task in task.items():
2299
+ if agent_id in self.agents:
2300
+ tasks_list.append({
2301
+ 'agent_id': agent_id,
2302
+ 'query': agent_task
2303
+ })
2304
+ else:
2305
+ self.logger.warning(
2306
+ f"Agent '{agent_id}' in task dict not found in crew"
2307
+ )
2308
+ else:
2309
+ raise ValueError(
2310
+ f"task must be str or dict, got {type(task)}"
2311
+ )
2312
+
2313
+ # Execute agents in parallel
2314
+ self.logger.info(
2315
+ f"Executing {len(tasks_list)} agents in parallel for research"
2316
+ )
2317
+
2318
+ parallel_result = await self.run_parallel(
2319
+ tasks=tasks_list,
2320
+ user_id=user_id,
2321
+ session_id=session_id,
2322
+ **kwargs
2323
+ )
2324
+
2325
+ if not parallel_result['success']:
2326
+ raise RuntimeError(
2327
+ f"Parallel execution failed: {parallel_result.get('error', 'Unknown error')}"
2328
+ )
2329
+
2330
+ # Build context from all agent results
2331
+ context_parts = ["# Research Findings from Specialist Agents\n"]
2332
+
2333
+ for agent_id, result in parallel_result['results'].items():
2334
+ agent = self.agents[agent_id]
2335
+ agent_name = agent.name
2336
+
2337
+ context_parts.extend((f"\n## {agent_name}\n", result, "\n---\n"))
2338
+
2339
+ research_context = "\n".join(context_parts)
2340
+
2341
+ # Default synthesis prompt if none provided
2342
+ if not synthesis_prompt:
2343
+ synthesis_prompt = """Based on the research findings from our specialist agents above,
2344
+ provide a comprehensive synthesis that:
2345
+ 1. Integrates all the key findings
2346
+ 2. Highlights the most important insights
2347
+ 3. Identifies any patterns or contradictions
2348
+ 4. Provides actionable conclusions
2349
+
2350
+ Create a clear, well-structured response."""
2351
+
2352
+ # Build final prompt for LLM
2353
+ final_prompt = f"""{research_context}
2354
+
2355
+ {synthesis_prompt}"""
2356
+
2357
+ # Call LLM for synthesis
2358
+ self.logger.info("Synthesizing results with LLM coordinator")
2359
+
2360
+ async with self._llm as client:
2361
+ synthesis_response = await client.ask(
2362
+ prompt=final_prompt,
2363
+ max_tokens=max_tokens,
2364
+ temperature=temperature,
2365
+ user_id=user_id,
2366
+ session_id=f"{session_id}_synthesis",
2367
+ **kwargs
2368
+ )
2369
+
2370
+ # Enhance response with crew metadata
2371
+ if hasattr(synthesis_response, 'metadata'):
2372
+ synthesis_response.metadata['crew_name'] = self.name
2373
+ synthesis_response.metadata['agents_used'] = list(parallel_result['results'].keys())
2374
+ synthesis_response.metadata['total_execution_time'] = parallel_result['total_execution_time']
2375
+
2376
+ return synthesis_response
2377
+
2378
+ def clear_memory(self, keep_summary=False):
2379
+ """Limpia execution memory y FAISS"""
2380
+ self.execution_memory.clear()
2381
+ # self.faiss_store.clear()
2382
+ if not keep_summary:
2383
+ self._summary = None
2384
+
2385
+ def get_memory_snapshot(self) -> Dict:
2386
+ """Retorna estado completo del memory para inspección"""
2387
+ return {
2388
+ "results": self.execution_memory.results,
2389
+ "summary": self._summary,
2390
+ "execution_order": self.execution_memory.execution_order
2391
+ }
2392
+
2393
+ def _build_ask_context(
2394
+ self,
2395
+ semantic_results: List[Tuple[str, AgentResult, float]],
2396
+ textual_context: Dict[str, Any],
2397
+ question: str
2398
+ ) -> Dict[str, Any]:
2399
+ """
2400
+ Construye el contexto combinado para el LLM principal.
2401
+
2402
+ Integra resultados de búsqueda semántica (FAISS), contexto textual
2403
+ del CrewResult, información de agentes disponibles, y metadata de ejecución.
2404
+ """
2405
+ context = {
2406
+ 'question': question,
2407
+ 'semantic_matches': [],
2408
+ 'crew_summary': {},
2409
+ 'agents_available': [],
2410
+ 'execution_metadata': {}
2411
+ }
2412
+
2413
+ # 1. Procesar resultados semánticos de FAISS
2414
+ seen_agents = set()
2415
+ for chunk_text, agent_result, score in semantic_results:
2416
+ if agent_result.agent_id not in seen_agents:
2417
+ context['semantic_matches'].append({
2418
+ 'agent_id': agent_result.agent_id,
2419
+ 'agent_name': agent_result.agent_name,
2420
+ 'relevant_content': chunk_text,
2421
+ 'similarity_score': round(score, 3),
2422
+ 'task_executed': agent_result.task,
2423
+ 'execution_time': agent_result.execution_time
2424
+ })
2425
+ seen_agents.add(agent_result.agent_id)
2426
+
2427
+ # 2. Agregar contexto del CrewResult
2428
+ if textual_context:
2429
+ context['crew_summary'] = {
2430
+ 'final_output': textual_context.get('final_output', ''),
2431
+ 'relevant_logs': textual_context.get('relevant_logs', []),
2432
+ 'relevant_agents': [
2433
+ {
2434
+ 'agent_id': info.agent_id,
2435
+ 'agent_name': info.agent_name,
2436
+ 'status': info.status,
2437
+ 'execution_time': info.execution_time
2438
+ }
2439
+ for info in textual_context.get('relevant_agents', [])
2440
+ ]
2441
+ }
2442
+
2443
+ # 3. Listar agentes disponibles para re-ejecución
2444
+ context['agents_available'] = [
2445
+ {
2446
+ 'agent_id': agent_id,
2447
+ 'agent_name': agent.name,
2448
+ 'tool_name': f"agent_{agent_id}",
2449
+ 'previous_result': (
2450
+ self.execution_memory.get_results_by_agent(agent_id).result
2451
+ if self.execution_memory.get_results_by_agent(agent_id)
2452
+ else None
2453
+ )
2454
+ }
2455
+ for agent_id, agent in self.agents.items()
2456
+ ]
2457
+
2458
+ # 4. Metadata de ejecución
2459
+ if self.last_crew_result:
2460
+ context['execution_metadata'] = {
2461
+ 'total_agents': len(self.agents),
2462
+ 'execution_mode': self.last_crew_result.metadata.get('mode', 'unknown'),
2463
+ 'total_time': self.last_crew_result.total_time,
2464
+ 'status': self.last_crew_result.status,
2465
+ 'completed_agents': len([
2466
+ a for a in self.last_crew_result.agents if a.status == 'completed'
2467
+ ]),
2468
+ 'failed_agents': len([
2469
+ a for a in self.last_crew_result.agents if a.status == 'failed'
2470
+ ])
2471
+ }
2472
+
2473
+ return context
2474
+
2475
+ def _build_ask_system_prompt(self, enable_reexecution: bool = True) -> str:
2476
+ """Construye el system prompt para el LLM principal en ask()."""
2477
+ base_prompt = f"""You are an intelligent orchestrator for the AgentCrew named "{self.name}".
2478
+
2479
+ Your role is to answer questions about the execution results from a team of specialized agents.
2480
+ You have access to:
2481
+
2482
+ 1. **Execution History**: Detailed results from each agent's previous execution
2483
+ 2. **Semantic Search**: Relevant content chunks from agent outputs based on similarity
2484
+ 3. **Crew Metadata**: Execution times, status, and workflow information
2485
+
2486
+ **IMPORTANT GUIDELINES:**
2487
+
2488
+ 1. **Answer directly**: Use the provided context to answer the user's question accurately
2489
+ 2. **Cite sources**: Reference which agent(s) provided the information
2490
+ 3. **Be precise**: If information is not in the results, clearly state so
2491
+ 4. **Synthesize**: Combine information from multiple agents when relevant
2492
+ """
2493
+
2494
+ if enable_reexecution:
2495
+ base_prompt += """
2496
+ 5. **Re-execute when needed**: If the user asks for MORE information or the existing results
2497
+ are insufficient, you can call the agent tools to get fresh data. When re-executing:
2498
+ - Use the tool named "agent_<agent_id>" to re-execute that specific agent
2499
+ - Pass a clear, focused query that addresses what information is missing
2500
+ - The agent will receive: original query + their previous result + your new question
2501
+ - Re-executed results REPLACE previous results in the execution memory
2502
+
2503
+ **Available Agent Tools:**
2504
+ You have access to tools for each agent in the crew. Use them strategically when:
2505
+ - User explicitly asks for "more information" or "additional details"
2506
+ - Current results don't answer the question completely
2507
+ - User wants to explore a new angle not covered in original execution
2508
+
2509
+ **Tool Usage Pattern:**
2510
+ ```
2511
+ Call: agent_<agent_id>(query="Specific question for this agent")
2512
+ ```
2513
+
2514
+ The agent will provide updated information that supersedes their previous result.
2515
+ """
2516
+ else:
2517
+ base_prompt += """
2518
+ 5. **No re-execution**: You can only answer based on existing results.
2519
+ If information is missing, inform the user they need to run the crew again.
2520
+ """
2521
+
2522
+ base_prompt += """
2523
+ **Response Format:**
2524
+ - Start with a direct answer to the user's question
2525
+ - Reference agent sources: "According to [Agent Name]..." or "[Agent Name] found that..."
2526
+ - Use markdown for readability (headers, lists, bold for key points)
2527
+ - If re-executing agents, explain what new information you're gathering
2528
+
2529
+ Remember: You're a knowledge orchestrator, not just a data retriever. Synthesize,
2530
+ analyze, and present information in the most helpful way for the user.
2531
+ """
2532
+
2533
+ return base_prompt.strip()
2534
+
2535
+ def _build_ask_user_prompt(self, question: str, context: Dict[str, Any]) -> str:
2536
+ """Construye el user prompt con la pregunta y contexto recuperado."""
2537
+ prompt_parts = [
2538
+ "# User Question",
2539
+ f"{question}",
2540
+ "",
2541
+ "---",
2542
+ ""
2543
+ ]
2544
+
2545
+ # 1. Resultados semánticos (más importantes primero)
2546
+ if context.get('semantic_matches'):
2547
+ prompt_parts.extend([
2548
+ "# Relevant Information from Agents (Semantic Search)",
2549
+ ""
2550
+ ])
2551
+
2552
+ for i, match in enumerate(context['semantic_matches'], 1):
2553
+ prompt_parts.extend([
2554
+ f"## Match {i}: {match['agent_name']} (Similarity: {match['similarity_score']})",
2555
+ f"**Task Executed**: {match['task_executed']}",
2556
+ f"**Execution Time**: {match['execution_time']:.2f}s",
2557
+ "",
2558
+ "**Relevant Content**:",
2559
+ f"```",
2560
+ match['relevant_content'],
2561
+ "```",
2562
+ ""
2563
+ ])
2564
+ else:
2565
+ prompt_parts.extend([
2566
+ "# Relevant Information from Agents",
2567
+ "*No semantically similar content found. Answering based on crew summary.*",
2568
+ ""
2569
+ ])
2570
+
2571
+ # 2. Resumen del crew (si existe)
2572
+ crew_summary = context.get('crew_summary', {})
2573
+ if crew_summary.get('final_output'):
2574
+ prompt_parts.extend([
2575
+ "---",
2576
+ "",
2577
+ "# Final Crew Output",
2578
+ crew_summary['final_output'],
2579
+ ""
2580
+ ])
2581
+
2582
+ if crew_summary.get('relevant_agents'):
2583
+ prompt_parts.extend([
2584
+ "## Agents Involved",
2585
+ ""
2586
+ ])
2587
+ prompt_parts.extend(
2588
+ f"- **{agent_info['agent_name']}** ({agent_info['status']}, {agent_info['execution_time']:.2f}s)"
2589
+ for agent_info in crew_summary['relevant_agents']
2590
+ )
2591
+ prompt_parts.append("")
2592
+
2593
+ # 3. Metadata de ejecución
2594
+ if exec_meta := context.get('execution_metadata', {}):
2595
+ prompt_parts.extend([
2596
+ "---",
2597
+ "",
2598
+ "# Execution Metadata",
2599
+ f"- **Mode**: {exec_meta.get('execution_mode', 'unknown')}",
2600
+ f"- **Total Agents**: {exec_meta.get('total_agents', 0)}",
2601
+ f"- **Completed**: {exec_meta.get('completed_agents', 0)}",
2602
+ f"- **Failed**: {exec_meta.get('failed_agents', 0)}",
2603
+ f"- **Total Time**: {exec_meta.get('total_time', 0):.2f}s",
2604
+ f"- **Status**: {exec_meta.get('status', 'unknown')}",
2605
+ ""
2606
+ ])
2607
+
2608
+ # 4. Agentes disponibles para re-ejecución
2609
+ if agents_available := context.get('agents_available', []):
2610
+ prompt_parts.extend([
2611
+ "---",
2612
+ "",
2613
+ "# Available Agents for Re-execution",
2614
+ ""
2615
+ ])
2616
+ for agent_info in agents_available:
2617
+ has_result = agent_info['previous_result'] is not None
2618
+ status_emoji = "✅" if has_result else "⚠️"
2619
+
2620
+ prompt_parts.append(
2621
+ f"{status_emoji} **{agent_info['agent_name']}** "
2622
+ f"(tool: `{agent_info['tool_name']}`)"
2623
+ )
2624
+
2625
+ if has_result:
2626
+ # Truncar resultado previo
2627
+ prev_result = str(agent_info['previous_result'])
2628
+ if len(prev_result) > 200:
2629
+ prev_result = f"{prev_result[:200]}..."
2630
+ prompt_parts.append(f" - Previous result: {prev_result}")
2631
+ else:
2632
+ prompt_parts.append(" - No previous execution")
2633
+
2634
+ prompt_parts.append("")
2635
+
2636
+ # 5. Instrucciones finales
2637
+ prompt_parts.extend([
2638
+ "---",
2639
+ "",
2640
+ "**Instructions**: Based on the information above, answer the user's question. ",
2641
+ "If you need additional information and agent re-execution is enabled, ",
2642
+ "call the appropriate agent tools with specific queries.",
2643
+ ""
2644
+ ])
2645
+
2646
+ return "\n".join(prompt_parts)
2647
+
2648
+ def _textual_search(
2649
+ self,
2650
+ query: str,
2651
+ crew_result: Optional[CrewResult] = None
2652
+ ) -> Dict[str, Any]:
2653
+ """Búsqueda textual básica en el CrewResult usando keywords."""
2654
+ if crew_result is None:
2655
+ crew_result = self.last_crew_result
2656
+
2657
+ if not crew_result:
2658
+ return {}
2659
+
2660
+ # Extraer keywords simples (minúsculas, sin stopwords comunes)
2661
+ stopwords = {
2662
+ 'el', 'la', 'de', 'que', 'en', 'y', 'a', 'los', 'las',
2663
+ 'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for'
2664
+ }
2665
+
2666
+ keywords = [
2667
+ word.lower()
2668
+ for word in query.split()
2669
+ if len(word) > 2 and word.lower() not in stopwords
2670
+ ]
2671
+
2672
+ if not keywords:
2673
+ keywords = [query.lower()]
2674
+
2675
+ context = {
2676
+ 'final_output': crew_result.output,
2677
+ 'relevant_logs': [],
2678
+ 'relevant_agents': []
2679
+ }
2680
+
2681
+ # Buscar en execution_log
2682
+ for log_entry in crew_result.execution_log:
2683
+ log_text = json_encoder(log_entry).lower()
2684
+
2685
+ # Si encuentra al menos 2 keywords o 1 keyword en logs cortos
2686
+ matches = sum(kw in log_text for kw in keywords)
2687
+ if matches >= 2 or (matches >= 1 and len(log_entry) < 500):
2688
+ context['relevant_logs'].append(log_entry)
2689
+
2690
+ # Limitar logs relevantes a los más importantes
2691
+ context['relevant_logs'] = context['relevant_logs'][:5]
2692
+
2693
+ # Buscar en agent metadata
2694
+ for agent_info in crew_result.agents:
2695
+ agent_text = f"{agent_info.agent_name} {agent_info.agent_id}".lower()
2696
+
2697
+ if any(kw in agent_text for kw in keywords):
2698
+ context['relevant_agents'].append(agent_info)
2699
+
2700
+ return context
2701
+
2702
+ async def ask(
2703
+ self,
2704
+ question: str,
2705
+ *,
2706
+ user_id: Optional[str] = None,
2707
+ session_id: Optional[str] = None,
2708
+ top_k: int = 5,
2709
+ score_threshold: float = 0.7,
2710
+ enable_agent_reexecution: bool = True,
2711
+ max_tokens: Optional[int] = None,
2712
+ temperature: Optional[float] = None,
2713
+ **llm_kwargs
2714
+ ) -> AIMessage:
2715
+ """
2716
+ Interactive execution query against the crew's execution memory.
2717
+
2718
+ This method allows users to ask questions about the results of previous
2719
+ agent executions. It combines semantic search over the execution memory
2720
+ with textual search in the last CrewResult to build a context for the LLM.
2721
+ The LLM then generates a response based on this context.
2722
+
2723
+ Args:
2724
+ question: User question about the results
2725
+ user_id: User identification (optional)
2726
+ session_id: Session identifier (optional)
2727
+ top_k: number of top semantic results to retrieve
2728
+ score_threshold: Score for semantic results
2729
+ enable_agent_reexecution: Allow re-executing agents via tools
2730
+ max_tokens: Maximum tokens for LLM response
2731
+ temperature: LLM Temperature
2732
+ **llm_kwargs: Additional arguments for LLM
2733
+
2734
+ Returns:
2735
+ AIMessage: response of LLM.
2736
+
2737
+ Raises:
2738
+ ValueError: Error if LLM is not configured or not results.
2739
+
2740
+ Example:
2741
+ >>> crew = AgentCrew(agents=[...], llm=GoogleGenAIClient())
2742
+ >>> await crew.run_parallel(...)
2743
+ >>> response = await crew.ask("What found the Research Agent?")
2744
+ >>> print(response.content)
2745
+ """
2746
+ # 1. Validaciones
2747
+ if not self._llm:
2748
+ raise ValueError(
2749
+ "No LLM configured for ask(). "
2750
+ "Pass llm parameter to AgentCrew constructor."
2751
+ )
2752
+
2753
+ if not self.execution_memory.results:
2754
+ raise ValueError(
2755
+ "No execution results available. Run crew first using "
2756
+ "run_sequential(), run_parallel(), run_flow(), or run_loop()."
2757
+ )
2758
+
2759
+ self.logger.info(
2760
+ f"Processing ask() query: {question[:100]}..."
2761
+ )
2762
+ start_time = asyncio.get_event_loop().time()
2763
+
2764
+ # 2. Búsqueda semántica en FAISS (ExecutionMemory)
2765
+ self.logger.debug(
2766
+ f"Performing semantic search with top_k={top_k}"
2767
+ )
2768
+ semantic_results = self.execution_memory.search_similar(
2769
+ query=question,
2770
+ top_k=top_k
2771
+ )
2772
+
2773
+ # Filtrar por score_threshold
2774
+ semantic_results = [
2775
+ (chunk, result, score)
2776
+ for chunk, result, score in semantic_results
2777
+ if score >= score_threshold
2778
+ ]
2779
+
2780
+ self.logger.info(
2781
+ f"Found {len(semantic_results)} semantic matches above threshold {score_threshold}"
2782
+ )
2783
+
2784
+ # 3. Búsqueda textual en CrewResult
2785
+ textual_context = self._textual_search(
2786
+ query=question,
2787
+ crew_result=self.last_crew_result
2788
+ )
2789
+
2790
+ # 4. Construir contexto combinado
2791
+ context = self._build_ask_context(
2792
+ semantic_results=semantic_results,
2793
+ textual_context=textual_context,
2794
+ question=question
2795
+ )
2796
+
2797
+ # 5. Construir prompts
2798
+ system_prompt = self._build_ask_system_prompt(
2799
+ enable_reexecution=enable_agent_reexecution
2800
+ )
2801
+
2802
+ user_prompt = self._build_ask_user_prompt(
2803
+ question=question,
2804
+ context=context
2805
+ )
2806
+
2807
+ # 6. Ejecutar LLM principal
2808
+ session_id = session_id or str(uuid.uuid4())
2809
+ user_id = user_id or 'crew_ask_user'
2810
+
2811
+ self.logger.info(
2812
+ f"Calling LLM orchestrator (tools_enabled={enable_agent_reexecution})"
2813
+ )
2814
+
2815
+ async with self._llm as client:
2816
+ response = await client.ask(
2817
+ question=user_prompt,
2818
+ system_prompt=system_prompt,
2819
+ use_tools=enable_agent_reexecution,
2820
+ use_conversation_history=False,
2821
+ max_tokens=max_tokens or 4096,
2822
+ temperature=temperature or 0.2,
2823
+ user_id=user_id,
2824
+ session_id=f"{session_id}_ask",
2825
+ **llm_kwargs
2826
+ )
2827
+
2828
+ # 7. Agregar metadata a la respuesta
2829
+ end_time = asyncio.get_event_loop().time()
2830
+
2831
+ if not hasattr(response, 'metadata'):
2832
+ response.metadata = {}
2833
+
2834
+ response.metadata.update(
2835
+ {
2836
+ 'ask_execution_time': end_time - start_time,
2837
+ 'semantic_results_count': len(semantic_results),
2838
+ 'semantic_results': [
2839
+ {
2840
+ 'agent_id': result.agent_id,
2841
+ 'agent_name': result.agent_name,
2842
+ 'score': float(score),
2843
+ }
2844
+ for _, result, score in semantic_results
2845
+ ],
2846
+ 'agents_consulted': list(
2847
+ {result.agent_id for _, result, _ in semantic_results}
2848
+ ),
2849
+ 'textual_context_used': bool(textual_context.get('relevant_logs')),
2850
+ 'reexecution_enabled': enable_agent_reexecution,
2851
+ 'crew_name': self.name,
2852
+ }
2853
+ )
2854
+
2855
+ # Detectar si hubo re-ejecuciones (tool calls)
2856
+ if hasattr(response, 'tool_calls') and response.tool_calls:
2857
+ reexecuted_agents = []
2858
+ for call in response.tool_calls:
2859
+ tool_name = call.get('name', '') if isinstance(call, dict) else getattr(call, 'name', '') # noqa
2860
+ if tool_name.startswith('agent_'):
2861
+ agent_id = tool_name.replace('agent_', '')
2862
+ reexecuted_agents.append(agent_id)
2863
+
2864
+ if reexecuted_agents:
2865
+ response.metadata['agents_reexecuted'] = reexecuted_agents
2866
+ self.logger.info(
2867
+ f"Agents re-executed during ask(): {reexecuted_agents}"
2868
+ )
2869
+
2870
+ self.logger.info(
2871
+ f"ask() completed in {end_time - start_time:.2f}s"
2872
+ )
2873
+
2874
+ return response
2875
+
2876
+ # =================== SUMMARY() SYSTEM METHODS ===================
2877
+ def _chunk_results_adaptive(
2878
+ self,
2879
+ max_tokens_per_chunk: int = 4000
2880
+ ) -> List[List[AgentResult]]:
2881
+ """
2882
+ Divide resultados en chunks adaptativos respetando execution_order.
2883
+
2884
+ Estrategia:
2885
+ - Respetar orden de ejecución estrictamente
2886
+ - Estimar tokens por resultado (~4 chars = 1 token)
2887
+ - Agrupar hasta max_tokens_per_chunk
2888
+ - Omitir resultados con errores
2889
+
2890
+ Args:
2891
+ max_tokens_per_chunk: Máximo de tokens por chunk
2892
+
2893
+ Returns:
2894
+ Lista de chunks, cada chunk es lista de AgentResult
2895
+ """
2896
+ chunks = []
2897
+ current_chunk = []
2898
+ current_tokens = 0
2899
+
2900
+ # Iterar en orden de ejecución
2901
+ for agent_id in self.execution_memory.execution_order:
2902
+ result = self.execution_memory.get_results_by_agent(agent_id)
2903
+
2904
+ if not result:
2905
+ continue
2906
+
2907
+ # Omitir resultados con errores
2908
+ if hasattr(result, 'metadata') and result.metadata.get('status') == 'failed':
2909
+ self.logger.debug(f"Skipping failed agent: {agent_id}")
2910
+ continue
2911
+
2912
+ # Estimar tokens (método simple: ~4 chars = 1 token)
2913
+ result_text = result.to_text()
2914
+ estimated_tokens = len(result_text) // 4
2915
+
2916
+ # Si agregar este resultado excede el límite y ya hay resultados en el chunk
2917
+ if current_tokens + estimated_tokens > max_tokens_per_chunk and current_chunk:
2918
+ chunks.append(current_chunk)
2919
+ current_chunk = [result]
2920
+ current_tokens = estimated_tokens
2921
+ else:
2922
+ current_chunk.append(result)
2923
+ current_tokens += estimated_tokens
2924
+
2925
+ # Agregar último chunk si no está vacío
2926
+ if current_chunk:
2927
+ chunks.append(current_chunk)
2928
+
2929
+ return chunks
2930
+
2931
+ def _format_result_for_report(
2932
+ self,
2933
+ result: AgentResult,
2934
+ include_metadata: bool = False
2935
+ ) -> str:
2936
+ """
2937
+ Formatea un AgentResult como markdown para el reporte.
2938
+
2939
+ Args:
2940
+ result: AgentResult a formatear
2941
+ include_metadata: Si incluir metadata (tiempo, status, etc.)
2942
+
2943
+ Returns:
2944
+ String markdown formateado
2945
+ """
2946
+ parts = [
2947
+ f"## {result.agent_name}",
2948
+ "",
2949
+ f"**Task**: {result.task}",
2950
+ ""
2951
+ ]
2952
+
2953
+ if include_metadata:
2954
+ parts.extend([
2955
+ f"**Execution Time**: {result.execution_time:.2f}s",
2956
+ f"**Timestamp**: {result.timestamp.isoformat()}",
2957
+ ""
2958
+ ])
2959
+
2960
+ # Formatear resultado
2961
+ result_content = str(result.result)
2962
+
2963
+ # Si es muy largo, agregar en bloque de código
2964
+ if len(result_content) > 500:
2965
+ parts.extend([
2966
+ "**Result**:",
2967
+ "```",
2968
+ result_content,
2969
+ "```"
2970
+ ])
2971
+ else:
2972
+ parts.extend([
2973
+ "**Result**:",
2974
+ result_content
2975
+ ])
2976
+
2977
+ parts.append("") # Línea en blanco al final
2978
+
2979
+ return "\n".join(parts)
2980
+
2981
+ def _generate_full_report(self) -> str:
2982
+ """
2983
+ Genera reporte completo concatenando todos los resultados.
2984
+
2985
+ No usa LLM, simplemente formatea y concatena en orden.
2986
+ Omite agentes con errores.
2987
+
2988
+ Returns:
2989
+ String markdown con reporte completo
2990
+ """
2991
+ self.logger.info("Generating full report (no LLM)...")
2992
+
2993
+ report_parts = [
2994
+ f"# {self.name} - Full Execution Report",
2995
+ "",
2996
+ f"**Generated**: {datetime.now().isoformat()}",
2997
+ ""
2998
+ ]
2999
+
3000
+ # Agregar metadata del último crew result si existe
3001
+ if self.last_crew_result:
3002
+ report_parts.extend([
3003
+ "## Execution Summary",
3004
+ "",
3005
+ f"- **Mode**: {self.last_crew_result.metadata.get('mode', 'unknown')}",
3006
+ f"- **Total Agents**: {len(self.agents)}",
3007
+ f"- **Status**: {self.last_crew_result.status}",
3008
+ f"- **Total Time**: {self.last_crew_result.total_time:.2f}s",
3009
+ "",
3010
+ "---",
3011
+ ""
3012
+ ])
3013
+
3014
+ report_parts.extend(("## Agent Results", ""))
3015
+ results_added = 0
3016
+ for agent_id in self.execution_memory.execution_order:
3017
+ result = self.execution_memory.get_results_by_agent(agent_id)
3018
+
3019
+ if not result:
3020
+ continue
3021
+
3022
+ # Omitir errores
3023
+ if hasattr(result, 'metadata') and result.metadata.get('status') == 'failed':
3024
+ continue
3025
+
3026
+ formatted = self._format_result_for_report(result, include_metadata=False)
3027
+ report_parts.append(formatted)
3028
+ report_parts.append("---")
3029
+ report_parts.append("")
3030
+ results_added += 1
3031
+
3032
+ self.logger.info(f"Full report generated with {results_added} agent results")
3033
+
3034
+ return "\n".join(report_parts)
3035
+
3036
+ async def _generate_executive_summary(
3037
+ self,
3038
+ summary_prompt: Optional[str] = None,
3039
+ max_tokens_per_chunk: int = 4000,
3040
+ user_id: Optional[str] = None,
3041
+ session_id: Optional[str] = None,
3042
+ **llm_kwargs
3043
+ ) -> str:
3044
+ """
3045
+ Genera executive summary usando LLM iterativo con chunks.
3046
+
3047
+ Proceso:
3048
+ 1. Dividir resultados en chunks
3049
+ 2. Para cada chunk: LLM genera mini-summary
3050
+ 3. Final pass: LLM combina mini-summaries en executive summary
3051
+
3052
+ Garantiza completitud sin truncamiento por max_tokens.
3053
+
3054
+ Args:
3055
+ summary_prompt: Prompt personalizado (usa default si None)
3056
+ max_tokens_per_chunk: Tokens máximos por chunk
3057
+ user_id: User ID
3058
+ session_id: Session ID
3059
+
3060
+ Returns:
3061
+ String markdown con executive summary
3062
+ """
3063
+ if not self._llm:
3064
+ raise ValueError(
3065
+ "No LLM configured. Pass llm parameter to AgentCrew constructor."
3066
+ )
3067
+
3068
+ self.logger.info("Generating executive summary with iterative LLM...")
3069
+
3070
+ # Default summary prompt
3071
+ if not summary_prompt:
3072
+ summary_prompt = """Based on the research findings from our specialist agents above,
3073
+ provide a comprehensive synthesis that:
3074
+ 1. Integrates all the key findings
3075
+ 2. Highlights the most important insights
3076
+ 3. Identifies any patterns or contradictions
3077
+ 4. Provides actionable conclusions
3078
+
3079
+ Create a clear, well-structured response."""
3080
+
3081
+ # 1. Dividir en chunks
3082
+ chunks = self._chunk_results_adaptive(max_tokens_per_chunk)
3083
+
3084
+ if not chunks:
3085
+ return "No results available to summarize."
3086
+
3087
+ self.logger.info(
3088
+ f"Processing {len(chunks)} chunks for executive summary"
3089
+ )
3090
+
3091
+ # 2. Procesar cada chunk con progress feedback
3092
+ mini_summaries = []
3093
+ session_id = session_id or str(uuid.uuid4())
3094
+ user_id = user_id or 'crew_summary_user'
3095
+ # Progress tracking
3096
+ if self.use_tqdm:
3097
+ chunk_iterator = async_tqdm(
3098
+ enumerate(chunks, 1),
3099
+ total=len(chunks),
3100
+ desc="Summarizing chunks"
3101
+ )
3102
+ else:
3103
+ chunk_iterator = enumerate(chunks, 1)
3104
+ for chunk_idx, chunk in chunk_iterator:
3105
+ if not self.use_tqdm:
3106
+ self.logger.info(f"Processing chunk {chunk_idx}/{len(chunks)}...")
3107
+
3108
+ # Construir contexto del chunk
3109
+ chunk_context_parts = [
3110
+ f"# Chunk {chunk_idx} of {len(chunks)} - Agent Results",
3111
+ ""
3112
+ ]
3113
+
3114
+ for result in chunk:
3115
+ formatted = self._format_result_for_report(
3116
+ result,
3117
+ include_metadata=False
3118
+ )
3119
+ chunk_context_parts.append(formatted)
3120
+
3121
+ chunk_context = "\n".join(chunk_context_parts)
3122
+
3123
+ # Prompt para mini-summary
3124
+ chunk_prompt = f"""{chunk_context}
3125
+ ---
3126
+ **Task**: Provide a concise summary of the key findings from these agents.
3127
+ Focus on main insights and important information. This summary will be combined
3128
+ with other summaries to create a final executive summary.
3129
+
3130
+ Keep your summary clear, structured, and focused on the most valuable information."""
3131
+
3132
+ # Llamar LLM
3133
+ async with self._llm as client:
3134
+ try:
3135
+ response = await client.ask(
3136
+ question=chunk_prompt,
3137
+ use_conversation_history=False,
3138
+ max_tokens=4096,
3139
+ temperature=0.3,
3140
+ user_id=user_id,
3141
+ session_id=f"{session_id}_chunk_{chunk_idx}",
3142
+ **llm_kwargs
3143
+ )
3144
+ mini_summaries.append({
3145
+ 'chunk_idx': chunk_idx,
3146
+ 'summary': response.content,
3147
+ 'agents': [r.agent_name for r in chunk]
3148
+ })
3149
+ except Exception as e:
3150
+ self.logger.error(f"Error processing chunk {chunk_idx}: {e}")
3151
+ # Agregar placeholder
3152
+ mini_summaries.append({
3153
+ 'chunk_idx': chunk_idx,
3154
+ 'summary': f"[Error processing chunk {chunk_idx}]",
3155
+ 'agents': [r.agent_name for r in chunk]
3156
+ })
3157
+
3158
+ # 3. Final pass: Combinar mini-summaries
3159
+ self.logger.info("Generating final executive summary...")
3160
+
3161
+ final_context_parts = [
3162
+ f"# {self.name} - Agent Summaries to Synthesize",
3163
+ ""
3164
+ ]
3165
+
3166
+ for mini in mini_summaries:
3167
+ final_context_parts.extend([
3168
+ f"## Summary Part {mini['chunk_idx']}",
3169
+ f"*Agents: {', '.join(mini['agents'])}*",
3170
+ "",
3171
+ mini['summary'],
3172
+ "",
3173
+ "---",
3174
+ ""
3175
+ ])
3176
+
3177
+ final_context = "\n".join(final_context_parts)
3178
+
3179
+ # Final synthesis prompt
3180
+ final_prompt = f"""{final_context}
3181
+
3182
+ ---
3183
+
3184
+ {summary_prompt}
3185
+
3186
+ **Important**: Create a cohesive executive summary that synthesizes ALL the information
3187
+ above. Ensure the summary:
3188
+ - Is well-structured with clear sections
3189
+ - Integrates findings from all agent summaries
3190
+ - Highlights the most critical insights
3191
+ - Provides actionable recommendations
3192
+ - Maintains a professional, executive-level tone"""
3193
+
3194
+ # Final LLM call
3195
+ async with self._llm as client:
3196
+ final_response = await client.ask(
3197
+ question=final_prompt,
3198
+ use_conversation_history=False,
3199
+ max_tokens=llm_kwargs.get('max_tokens', 4096),
3200
+ temperature=0.3,
3201
+ user_id=user_id,
3202
+ session_id=f"{session_id}_final",
3203
+ **llm_kwargs
3204
+ )
3205
+
3206
+ self.logger.info("Executive summary generated successfully")
3207
+
3208
+ # Construir reporte final con metadata
3209
+ final_report_parts = [
3210
+ f"# {self.name} - Executive Summary",
3211
+ "",
3212
+ f"**Generated**: {datetime.now().isoformat()}",
3213
+ ""
3214
+ ]
3215
+
3216
+ if self.last_crew_result:
3217
+ final_report_parts.extend([
3218
+ "## Execution Overview",
3219
+ "",
3220
+ f"- **Mode**: {self.last_crew_result.metadata.get('mode', 'unknown')}",
3221
+ f"- **Total Agents**: {len(self.agents)}",
3222
+ f"- **Status**: {self.last_crew_result.status}",
3223
+ f"- **Chunks Processed**: {len(chunks)}",
3224
+ "",
3225
+ "---",
3226
+ ""
3227
+ ])
3228
+
3229
+ final_report_parts.extend([
3230
+ "## Summary",
3231
+ "",
3232
+ final_response.content
3233
+ ])
3234
+
3235
+ return "\n".join(final_report_parts)
3236
+
3237
+ async def summary(
3238
+ self,
3239
+ mode: Literal["full_report", "executive_summary"] = "executive_summary",
3240
+ summary_prompt: Optional[str] = None,
3241
+ max_tokens_per_chunk: int = 4000,
3242
+ user_id: Optional[str] = None,
3243
+ session_id: Optional[str] = None,
3244
+ **llm_kwargs
3245
+ ) -> str:
3246
+ """
3247
+ Genera reporte completo o executive summary de todos los resultados.
3248
+
3249
+ Dos modos de operación:
3250
+
3251
+ 1. **full_report** (sin LLM):
3252
+ - Itera en orden por execution_memory.execution_order
3253
+ - Concatena todos los resultados formateados
3254
+ - Retorna documento completo markdown
3255
+ - Rápido, no requiere LLM
3256
+
3257
+ 2. **executive_summary** (con LLM iterativo):
3258
+ - Divide resultados en chunks (respetando max_tokens)
3259
+ - LLM procesa cada chunk → genera mini-summary
3260
+ - Combina mini-summaries → executive summary final
3261
+ - Garantiza completitud sin truncamiento
3262
+ - Usa progress feedback (tqdm si disponible)
3263
+
3264
+ Características:
3265
+ - Respeta execution_order estrictamente
3266
+ - Omite agentes con errores
3267
+ - No incluye metadata por default (simplificado)
3268
+ - Retorna markdown estructurado
3269
+
3270
+ Args:
3271
+ mode: Tipo de reporte ('full_report' o 'executive_summary')
3272
+ summary_prompt: Prompt personalizado para executive summary
3273
+ (usa default si None)
3274
+ max_tokens_per_chunk: Tokens máximos por chunk para executive_summary
3275
+ user_id: User identifier
3276
+ session_id: Session identifier
3277
+ **llm_kwargs: Argumentos adicionales para LLM
3278
+
3279
+ Returns:
3280
+ String markdown con el reporte completo
3281
+
3282
+ Raises:
3283
+ ValueError: Si mode='executive_summary' pero no hay LLM configurado
3284
+ ValueError: Si no hay resultados en execution_memory
3285
+
3286
+ Example:
3287
+ >>> # Full report sin LLM
3288
+ >>> report = await crew.summary(mode="full_report")
3289
+ >>> print(report)
3290
+
3291
+ >>> # Executive summary con LLM
3292
+ >>> summary = await crew.summary(
3293
+ ... mode="executive_summary",
3294
+ ... summary_prompt="Create executive summary highlighting ROI"
3295
+ ... )
3296
+ >>> print(summary)
3297
+ """
3298
+ # Validaciones
3299
+ if not self.execution_memory.results:
3300
+ raise ValueError(
3301
+ "No execution results available. Run crew first using "
3302
+ "run_sequential(), run_parallel(), run_flow(), or run_loop()."
3303
+ )
3304
+
3305
+ if mode == "executive_summary" and not self._llm:
3306
+ raise ValueError(
3307
+ "executive_summary mode requires LLM. "
3308
+ "Either use mode='full_report' or pass llm to AgentCrew constructor."
3309
+ )
3310
+
3311
+ self.logger.info(
3312
+ f"Generating {mode} from {len(self.execution_memory.results)} results"
3313
+ )
3314
+
3315
+ # Ejecutar según modo
3316
+ if mode == "full_report":
3317
+ result = self._generate_full_report()
3318
+ else: # executive_summary
3319
+ result = await self._generate_executive_summary(
3320
+ summary_prompt=summary_prompt,
3321
+ max_tokens_per_chunk=max_tokens_per_chunk,
3322
+ user_id=user_id,
3323
+ session_id=session_id,
3324
+ **llm_kwargs
3325
+ )
3326
+
3327
+ # Save in self._summary
3328
+ self._summary = result
3329
+
3330
+ return result