PraisonAI 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (393) hide show
  1. praisonai/__init__.py +54 -0
  2. praisonai/__main__.py +15 -0
  3. praisonai/acp/__init__.py +54 -0
  4. praisonai/acp/config.py +159 -0
  5. praisonai/acp/server.py +587 -0
  6. praisonai/acp/session.py +219 -0
  7. praisonai/adapters/__init__.py +50 -0
  8. praisonai/adapters/readers.py +395 -0
  9. praisonai/adapters/rerankers.py +315 -0
  10. praisonai/adapters/retrievers.py +394 -0
  11. praisonai/adapters/vector_stores.py +409 -0
  12. praisonai/agent_scheduler.py +337 -0
  13. praisonai/agents_generator.py +903 -0
  14. praisonai/api/call.py +292 -0
  15. praisonai/auto.py +1197 -0
  16. praisonai/capabilities/__init__.py +275 -0
  17. praisonai/capabilities/a2a.py +140 -0
  18. praisonai/capabilities/assistants.py +283 -0
  19. praisonai/capabilities/audio.py +320 -0
  20. praisonai/capabilities/batches.py +469 -0
  21. praisonai/capabilities/completions.py +336 -0
  22. praisonai/capabilities/container_files.py +155 -0
  23. praisonai/capabilities/containers.py +93 -0
  24. praisonai/capabilities/embeddings.py +158 -0
  25. praisonai/capabilities/files.py +467 -0
  26. praisonai/capabilities/fine_tuning.py +293 -0
  27. praisonai/capabilities/guardrails.py +182 -0
  28. praisonai/capabilities/images.py +330 -0
  29. praisonai/capabilities/mcp.py +190 -0
  30. praisonai/capabilities/messages.py +270 -0
  31. praisonai/capabilities/moderations.py +154 -0
  32. praisonai/capabilities/ocr.py +217 -0
  33. praisonai/capabilities/passthrough.py +204 -0
  34. praisonai/capabilities/rag.py +207 -0
  35. praisonai/capabilities/realtime.py +160 -0
  36. praisonai/capabilities/rerank.py +165 -0
  37. praisonai/capabilities/responses.py +266 -0
  38. praisonai/capabilities/search.py +109 -0
  39. praisonai/capabilities/skills.py +133 -0
  40. praisonai/capabilities/vector_store_files.py +334 -0
  41. praisonai/capabilities/vector_stores.py +304 -0
  42. praisonai/capabilities/videos.py +141 -0
  43. praisonai/chainlit_ui.py +304 -0
  44. praisonai/chat/__init__.py +106 -0
  45. praisonai/chat/app.py +125 -0
  46. praisonai/cli/__init__.py +26 -0
  47. praisonai/cli/app.py +213 -0
  48. praisonai/cli/commands/__init__.py +75 -0
  49. praisonai/cli/commands/acp.py +70 -0
  50. praisonai/cli/commands/completion.py +333 -0
  51. praisonai/cli/commands/config.py +166 -0
  52. praisonai/cli/commands/debug.py +142 -0
  53. praisonai/cli/commands/diag.py +55 -0
  54. praisonai/cli/commands/doctor.py +166 -0
  55. praisonai/cli/commands/environment.py +179 -0
  56. praisonai/cli/commands/lsp.py +112 -0
  57. praisonai/cli/commands/mcp.py +210 -0
  58. praisonai/cli/commands/profile.py +457 -0
  59. praisonai/cli/commands/run.py +228 -0
  60. praisonai/cli/commands/schedule.py +150 -0
  61. praisonai/cli/commands/serve.py +97 -0
  62. praisonai/cli/commands/session.py +212 -0
  63. praisonai/cli/commands/traces.py +145 -0
  64. praisonai/cli/commands/version.py +101 -0
  65. praisonai/cli/configuration/__init__.py +18 -0
  66. praisonai/cli/configuration/loader.py +353 -0
  67. praisonai/cli/configuration/paths.py +114 -0
  68. praisonai/cli/configuration/schema.py +164 -0
  69. praisonai/cli/features/__init__.py +268 -0
  70. praisonai/cli/features/acp.py +236 -0
  71. praisonai/cli/features/action_orchestrator.py +546 -0
  72. praisonai/cli/features/agent_scheduler.py +773 -0
  73. praisonai/cli/features/agent_tools.py +474 -0
  74. praisonai/cli/features/agents.py +375 -0
  75. praisonai/cli/features/at_mentions.py +471 -0
  76. praisonai/cli/features/auto_memory.py +182 -0
  77. praisonai/cli/features/autonomy_mode.py +490 -0
  78. praisonai/cli/features/background.py +356 -0
  79. praisonai/cli/features/base.py +168 -0
  80. praisonai/cli/features/capabilities.py +1326 -0
  81. praisonai/cli/features/checkpoints.py +338 -0
  82. praisonai/cli/features/code_intelligence.py +652 -0
  83. praisonai/cli/features/compaction.py +294 -0
  84. praisonai/cli/features/compare.py +534 -0
  85. praisonai/cli/features/cost_tracker.py +514 -0
  86. praisonai/cli/features/debug.py +810 -0
  87. praisonai/cli/features/deploy.py +517 -0
  88. praisonai/cli/features/diag.py +289 -0
  89. praisonai/cli/features/doctor/__init__.py +63 -0
  90. praisonai/cli/features/doctor/checks/__init__.py +24 -0
  91. praisonai/cli/features/doctor/checks/acp_checks.py +240 -0
  92. praisonai/cli/features/doctor/checks/config_checks.py +366 -0
  93. praisonai/cli/features/doctor/checks/db_checks.py +366 -0
  94. praisonai/cli/features/doctor/checks/env_checks.py +543 -0
  95. praisonai/cli/features/doctor/checks/lsp_checks.py +199 -0
  96. praisonai/cli/features/doctor/checks/mcp_checks.py +349 -0
  97. praisonai/cli/features/doctor/checks/memory_checks.py +268 -0
  98. praisonai/cli/features/doctor/checks/network_checks.py +251 -0
  99. praisonai/cli/features/doctor/checks/obs_checks.py +328 -0
  100. praisonai/cli/features/doctor/checks/performance_checks.py +235 -0
  101. praisonai/cli/features/doctor/checks/permissions_checks.py +259 -0
  102. praisonai/cli/features/doctor/checks/selftest_checks.py +322 -0
  103. praisonai/cli/features/doctor/checks/serve_checks.py +426 -0
  104. praisonai/cli/features/doctor/checks/skills_checks.py +231 -0
  105. praisonai/cli/features/doctor/checks/tools_checks.py +371 -0
  106. praisonai/cli/features/doctor/engine.py +266 -0
  107. praisonai/cli/features/doctor/formatters.py +310 -0
  108. praisonai/cli/features/doctor/handler.py +397 -0
  109. praisonai/cli/features/doctor/models.py +264 -0
  110. praisonai/cli/features/doctor/registry.py +239 -0
  111. praisonai/cli/features/endpoints.py +1019 -0
  112. praisonai/cli/features/eval.py +560 -0
  113. praisonai/cli/features/external_agents.py +231 -0
  114. praisonai/cli/features/fast_context.py +410 -0
  115. praisonai/cli/features/flow_display.py +566 -0
  116. praisonai/cli/features/git_integration.py +651 -0
  117. praisonai/cli/features/guardrail.py +171 -0
  118. praisonai/cli/features/handoff.py +185 -0
  119. praisonai/cli/features/hooks.py +583 -0
  120. praisonai/cli/features/image.py +384 -0
  121. praisonai/cli/features/interactive_runtime.py +585 -0
  122. praisonai/cli/features/interactive_tools.py +380 -0
  123. praisonai/cli/features/interactive_tui.py +603 -0
  124. praisonai/cli/features/jobs.py +632 -0
  125. praisonai/cli/features/knowledge.py +531 -0
  126. praisonai/cli/features/lite.py +244 -0
  127. praisonai/cli/features/lsp_cli.py +225 -0
  128. praisonai/cli/features/mcp.py +169 -0
  129. praisonai/cli/features/message_queue.py +587 -0
  130. praisonai/cli/features/metrics.py +211 -0
  131. praisonai/cli/features/n8n.py +673 -0
  132. praisonai/cli/features/observability.py +293 -0
  133. praisonai/cli/features/ollama.py +361 -0
  134. praisonai/cli/features/output_style.py +273 -0
  135. praisonai/cli/features/package.py +631 -0
  136. praisonai/cli/features/performance.py +308 -0
  137. praisonai/cli/features/persistence.py +636 -0
  138. praisonai/cli/features/profile.py +226 -0
  139. praisonai/cli/features/profiler/__init__.py +81 -0
  140. praisonai/cli/features/profiler/core.py +558 -0
  141. praisonai/cli/features/profiler/optimizations.py +652 -0
  142. praisonai/cli/features/profiler/suite.py +386 -0
  143. praisonai/cli/features/profiling.py +350 -0
  144. praisonai/cli/features/queue/__init__.py +73 -0
  145. praisonai/cli/features/queue/manager.py +395 -0
  146. praisonai/cli/features/queue/models.py +286 -0
  147. praisonai/cli/features/queue/persistence.py +564 -0
  148. praisonai/cli/features/queue/scheduler.py +484 -0
  149. praisonai/cli/features/queue/worker.py +372 -0
  150. praisonai/cli/features/recipe.py +1723 -0
  151. praisonai/cli/features/recipes.py +449 -0
  152. praisonai/cli/features/registry.py +229 -0
  153. praisonai/cli/features/repo_map.py +860 -0
  154. praisonai/cli/features/router.py +466 -0
  155. praisonai/cli/features/sandbox_executor.py +515 -0
  156. praisonai/cli/features/serve.py +829 -0
  157. praisonai/cli/features/session.py +222 -0
  158. praisonai/cli/features/skills.py +856 -0
  159. praisonai/cli/features/slash_commands.py +650 -0
  160. praisonai/cli/features/telemetry.py +179 -0
  161. praisonai/cli/features/templates.py +1384 -0
  162. praisonai/cli/features/thinking.py +305 -0
  163. praisonai/cli/features/todo.py +334 -0
  164. praisonai/cli/features/tools.py +680 -0
  165. praisonai/cli/features/tui/__init__.py +83 -0
  166. praisonai/cli/features/tui/app.py +580 -0
  167. praisonai/cli/features/tui/cli.py +566 -0
  168. praisonai/cli/features/tui/debug.py +511 -0
  169. praisonai/cli/features/tui/events.py +99 -0
  170. praisonai/cli/features/tui/mock_provider.py +328 -0
  171. praisonai/cli/features/tui/orchestrator.py +652 -0
  172. praisonai/cli/features/tui/screens/__init__.py +50 -0
  173. praisonai/cli/features/tui/screens/main.py +245 -0
  174. praisonai/cli/features/tui/screens/queue.py +174 -0
  175. praisonai/cli/features/tui/screens/session.py +124 -0
  176. praisonai/cli/features/tui/screens/settings.py +148 -0
  177. praisonai/cli/features/tui/widgets/__init__.py +56 -0
  178. praisonai/cli/features/tui/widgets/chat.py +261 -0
  179. praisonai/cli/features/tui/widgets/composer.py +224 -0
  180. praisonai/cli/features/tui/widgets/queue_panel.py +200 -0
  181. praisonai/cli/features/tui/widgets/status.py +167 -0
  182. praisonai/cli/features/tui/widgets/tool_panel.py +248 -0
  183. praisonai/cli/features/workflow.py +720 -0
  184. praisonai/cli/legacy.py +236 -0
  185. praisonai/cli/main.py +5559 -0
  186. praisonai/cli/schedule_cli.py +54 -0
  187. praisonai/cli/state/__init__.py +31 -0
  188. praisonai/cli/state/identifiers.py +161 -0
  189. praisonai/cli/state/sessions.py +313 -0
  190. praisonai/code/__init__.py +93 -0
  191. praisonai/code/agent_tools.py +344 -0
  192. praisonai/code/diff/__init__.py +21 -0
  193. praisonai/code/diff/diff_strategy.py +432 -0
  194. praisonai/code/tools/__init__.py +27 -0
  195. praisonai/code/tools/apply_diff.py +221 -0
  196. praisonai/code/tools/execute_command.py +275 -0
  197. praisonai/code/tools/list_files.py +274 -0
  198. praisonai/code/tools/read_file.py +206 -0
  199. praisonai/code/tools/search_replace.py +248 -0
  200. praisonai/code/tools/write_file.py +217 -0
  201. praisonai/code/utils/__init__.py +46 -0
  202. praisonai/code/utils/file_utils.py +307 -0
  203. praisonai/code/utils/ignore_utils.py +308 -0
  204. praisonai/code/utils/text_utils.py +276 -0
  205. praisonai/db/__init__.py +64 -0
  206. praisonai/db/adapter.py +531 -0
  207. praisonai/deploy/__init__.py +62 -0
  208. praisonai/deploy/api.py +231 -0
  209. praisonai/deploy/docker.py +454 -0
  210. praisonai/deploy/doctor.py +367 -0
  211. praisonai/deploy/main.py +327 -0
  212. praisonai/deploy/models.py +179 -0
  213. praisonai/deploy/providers/__init__.py +33 -0
  214. praisonai/deploy/providers/aws.py +331 -0
  215. praisonai/deploy/providers/azure.py +358 -0
  216. praisonai/deploy/providers/base.py +101 -0
  217. praisonai/deploy/providers/gcp.py +314 -0
  218. praisonai/deploy/schema.py +208 -0
  219. praisonai/deploy.py +185 -0
  220. praisonai/endpoints/__init__.py +53 -0
  221. praisonai/endpoints/a2u_server.py +410 -0
  222. praisonai/endpoints/discovery.py +165 -0
  223. praisonai/endpoints/providers/__init__.py +28 -0
  224. praisonai/endpoints/providers/a2a.py +253 -0
  225. praisonai/endpoints/providers/a2u.py +208 -0
  226. praisonai/endpoints/providers/agents_api.py +171 -0
  227. praisonai/endpoints/providers/base.py +231 -0
  228. praisonai/endpoints/providers/mcp.py +263 -0
  229. praisonai/endpoints/providers/recipe.py +206 -0
  230. praisonai/endpoints/providers/tools_mcp.py +150 -0
  231. praisonai/endpoints/registry.py +131 -0
  232. praisonai/endpoints/server.py +161 -0
  233. praisonai/inbuilt_tools/__init__.py +24 -0
  234. praisonai/inbuilt_tools/autogen_tools.py +117 -0
  235. praisonai/inc/__init__.py +2 -0
  236. praisonai/inc/config.py +96 -0
  237. praisonai/inc/models.py +155 -0
  238. praisonai/integrations/__init__.py +56 -0
  239. praisonai/integrations/base.py +303 -0
  240. praisonai/integrations/claude_code.py +270 -0
  241. praisonai/integrations/codex_cli.py +255 -0
  242. praisonai/integrations/cursor_cli.py +195 -0
  243. praisonai/integrations/gemini_cli.py +222 -0
  244. praisonai/jobs/__init__.py +67 -0
  245. praisonai/jobs/executor.py +425 -0
  246. praisonai/jobs/models.py +230 -0
  247. praisonai/jobs/router.py +314 -0
  248. praisonai/jobs/server.py +186 -0
  249. praisonai/jobs/store.py +203 -0
  250. praisonai/llm/__init__.py +66 -0
  251. praisonai/llm/registry.py +382 -0
  252. praisonai/mcp_server/__init__.py +152 -0
  253. praisonai/mcp_server/adapters/__init__.py +74 -0
  254. praisonai/mcp_server/adapters/agents.py +128 -0
  255. praisonai/mcp_server/adapters/capabilities.py +168 -0
  256. praisonai/mcp_server/adapters/cli_tools.py +568 -0
  257. praisonai/mcp_server/adapters/extended_capabilities.py +462 -0
  258. praisonai/mcp_server/adapters/knowledge.py +93 -0
  259. praisonai/mcp_server/adapters/memory.py +104 -0
  260. praisonai/mcp_server/adapters/prompts.py +306 -0
  261. praisonai/mcp_server/adapters/resources.py +124 -0
  262. praisonai/mcp_server/adapters/tools_bridge.py +280 -0
  263. praisonai/mcp_server/auth/__init__.py +48 -0
  264. praisonai/mcp_server/auth/api_key.py +291 -0
  265. praisonai/mcp_server/auth/oauth.py +460 -0
  266. praisonai/mcp_server/auth/oidc.py +289 -0
  267. praisonai/mcp_server/auth/scopes.py +260 -0
  268. praisonai/mcp_server/cli.py +852 -0
  269. praisonai/mcp_server/elicitation.py +445 -0
  270. praisonai/mcp_server/icons.py +302 -0
  271. praisonai/mcp_server/recipe_adapter.py +573 -0
  272. praisonai/mcp_server/recipe_cli.py +824 -0
  273. praisonai/mcp_server/registry.py +703 -0
  274. praisonai/mcp_server/sampling.py +422 -0
  275. praisonai/mcp_server/server.py +490 -0
  276. praisonai/mcp_server/tasks.py +443 -0
  277. praisonai/mcp_server/transports/__init__.py +18 -0
  278. praisonai/mcp_server/transports/http_stream.py +376 -0
  279. praisonai/mcp_server/transports/stdio.py +132 -0
  280. praisonai/persistence/__init__.py +84 -0
  281. praisonai/persistence/config.py +238 -0
  282. praisonai/persistence/conversation/__init__.py +25 -0
  283. praisonai/persistence/conversation/async_mysql.py +427 -0
  284. praisonai/persistence/conversation/async_postgres.py +410 -0
  285. praisonai/persistence/conversation/async_sqlite.py +371 -0
  286. praisonai/persistence/conversation/base.py +151 -0
  287. praisonai/persistence/conversation/json_store.py +250 -0
  288. praisonai/persistence/conversation/mysql.py +387 -0
  289. praisonai/persistence/conversation/postgres.py +401 -0
  290. praisonai/persistence/conversation/singlestore.py +240 -0
  291. praisonai/persistence/conversation/sqlite.py +341 -0
  292. praisonai/persistence/conversation/supabase.py +203 -0
  293. praisonai/persistence/conversation/surrealdb.py +287 -0
  294. praisonai/persistence/factory.py +301 -0
  295. praisonai/persistence/hooks/__init__.py +18 -0
  296. praisonai/persistence/hooks/agent_hooks.py +297 -0
  297. praisonai/persistence/knowledge/__init__.py +26 -0
  298. praisonai/persistence/knowledge/base.py +144 -0
  299. praisonai/persistence/knowledge/cassandra.py +232 -0
  300. praisonai/persistence/knowledge/chroma.py +295 -0
  301. praisonai/persistence/knowledge/clickhouse.py +242 -0
  302. praisonai/persistence/knowledge/cosmosdb_vector.py +438 -0
  303. praisonai/persistence/knowledge/couchbase.py +286 -0
  304. praisonai/persistence/knowledge/lancedb.py +216 -0
  305. praisonai/persistence/knowledge/langchain_adapter.py +291 -0
  306. praisonai/persistence/knowledge/lightrag_adapter.py +212 -0
  307. praisonai/persistence/knowledge/llamaindex_adapter.py +256 -0
  308. praisonai/persistence/knowledge/milvus.py +277 -0
  309. praisonai/persistence/knowledge/mongodb_vector.py +306 -0
  310. praisonai/persistence/knowledge/pgvector.py +335 -0
  311. praisonai/persistence/knowledge/pinecone.py +253 -0
  312. praisonai/persistence/knowledge/qdrant.py +301 -0
  313. praisonai/persistence/knowledge/redis_vector.py +291 -0
  314. praisonai/persistence/knowledge/singlestore_vector.py +299 -0
  315. praisonai/persistence/knowledge/surrealdb_vector.py +309 -0
  316. praisonai/persistence/knowledge/upstash_vector.py +266 -0
  317. praisonai/persistence/knowledge/weaviate.py +223 -0
  318. praisonai/persistence/migrations/__init__.py +10 -0
  319. praisonai/persistence/migrations/manager.py +251 -0
  320. praisonai/persistence/orchestrator.py +406 -0
  321. praisonai/persistence/state/__init__.py +21 -0
  322. praisonai/persistence/state/async_mongodb.py +200 -0
  323. praisonai/persistence/state/base.py +107 -0
  324. praisonai/persistence/state/dynamodb.py +226 -0
  325. praisonai/persistence/state/firestore.py +175 -0
  326. praisonai/persistence/state/gcs.py +155 -0
  327. praisonai/persistence/state/memory.py +245 -0
  328. praisonai/persistence/state/mongodb.py +158 -0
  329. praisonai/persistence/state/redis.py +190 -0
  330. praisonai/persistence/state/upstash.py +144 -0
  331. praisonai/persistence/tests/__init__.py +3 -0
  332. praisonai/persistence/tests/test_all_backends.py +633 -0
  333. praisonai/profiler.py +1214 -0
  334. praisonai/recipe/__init__.py +134 -0
  335. praisonai/recipe/bridge.py +278 -0
  336. praisonai/recipe/core.py +893 -0
  337. praisonai/recipe/exceptions.py +54 -0
  338. praisonai/recipe/history.py +402 -0
  339. praisonai/recipe/models.py +266 -0
  340. praisonai/recipe/operations.py +440 -0
  341. praisonai/recipe/policy.py +422 -0
  342. praisonai/recipe/registry.py +849 -0
  343. praisonai/recipe/runtime.py +214 -0
  344. praisonai/recipe/security.py +711 -0
  345. praisonai/recipe/serve.py +859 -0
  346. praisonai/recipe/server.py +613 -0
  347. praisonai/scheduler/__init__.py +45 -0
  348. praisonai/scheduler/agent_scheduler.py +552 -0
  349. praisonai/scheduler/base.py +124 -0
  350. praisonai/scheduler/daemon_manager.py +225 -0
  351. praisonai/scheduler/state_manager.py +155 -0
  352. praisonai/scheduler/yaml_loader.py +193 -0
  353. praisonai/scheduler.py +194 -0
  354. praisonai/setup/__init__.py +1 -0
  355. praisonai/setup/build.py +21 -0
  356. praisonai/setup/post_install.py +23 -0
  357. praisonai/setup/setup_conda_env.py +25 -0
  358. praisonai/setup.py +16 -0
  359. praisonai/templates/__init__.py +116 -0
  360. praisonai/templates/cache.py +364 -0
  361. praisonai/templates/dependency_checker.py +358 -0
  362. praisonai/templates/discovery.py +391 -0
  363. praisonai/templates/loader.py +564 -0
  364. praisonai/templates/registry.py +511 -0
  365. praisonai/templates/resolver.py +206 -0
  366. praisonai/templates/security.py +327 -0
  367. praisonai/templates/tool_override.py +498 -0
  368. praisonai/templates/tools_doctor.py +256 -0
  369. praisonai/test.py +105 -0
  370. praisonai/train.py +562 -0
  371. praisonai/train_vision.py +306 -0
  372. praisonai/ui/agents.py +824 -0
  373. praisonai/ui/callbacks.py +57 -0
  374. praisonai/ui/chainlit_compat.py +246 -0
  375. praisonai/ui/chat.py +532 -0
  376. praisonai/ui/code.py +717 -0
  377. praisonai/ui/colab.py +474 -0
  378. praisonai/ui/colab_chainlit.py +81 -0
  379. praisonai/ui/components/aicoder.py +284 -0
  380. praisonai/ui/context.py +283 -0
  381. praisonai/ui/database_config.py +56 -0
  382. praisonai/ui/db.py +294 -0
  383. praisonai/ui/realtime.py +488 -0
  384. praisonai/ui/realtimeclient/__init__.py +756 -0
  385. praisonai/ui/realtimeclient/tools.py +242 -0
  386. praisonai/ui/sql_alchemy.py +710 -0
  387. praisonai/upload_vision.py +140 -0
  388. praisonai/version.py +1 -0
  389. praisonai-3.0.0.dist-info/METADATA +3493 -0
  390. praisonai-3.0.0.dist-info/RECORD +393 -0
  391. praisonai-3.0.0.dist-info/WHEEL +5 -0
  392. praisonai-3.0.0.dist-info/entry_points.txt +4 -0
  393. praisonai-3.0.0.dist-info/top_level.txt +1 -0
praisonai/auto.py ADDED
@@ -0,0 +1,1197 @@
1
+ from openai import OpenAI
2
+ from pydantic import BaseModel
3
+ from typing import Dict, List, Optional
4
+ import instructor
5
+ import os
6
+ import json
7
+ import yaml
8
+ from rich import print
9
+ import logging
10
+
11
+ # Framework-specific imports with availability checks
12
+ CREWAI_AVAILABLE = False
13
+ AUTOGEN_AVAILABLE = False
14
+ PRAISONAI_TOOLS_AVAILABLE = False
15
+ PRAISONAI_AVAILABLE = False
16
+
17
+ try:
18
+ from praisonaiagents import Agent as PraisonAgent, Task as PraisonTask, PraisonAIAgents
19
+ PRAISONAI_AVAILABLE = True
20
+ except ImportError:
21
+ pass
22
+
23
+ try:
24
+ from crewai import Agent, Task, Crew
25
+ CREWAI_AVAILABLE = True
26
+ except ImportError:
27
+ pass
28
+
29
+ try:
30
+ import autogen
31
+ AUTOGEN_AVAILABLE = True
32
+ except ImportError:
33
+ pass
34
+
35
+ try:
36
+ from autogen_agentchat.agents import AssistantAgent
37
+ from autogen_ext.models.openai import OpenAIChatCompletionClient
38
+ AUTOGEN_V4_AVAILABLE = True
39
+ except ImportError:
40
+ AUTOGEN_V4_AVAILABLE = False
41
+
42
+ try:
43
+ from praisonai_tools import (
44
+ CodeDocsSearchTool, CSVSearchTool, DirectorySearchTool, DOCXSearchTool,
45
+ DirectoryReadTool, FileReadTool, TXTSearchTool, JSONSearchTool,
46
+ MDXSearchTool, PDFSearchTool, RagTool, ScrapeElementFromWebsiteTool,
47
+ ScrapeWebsiteTool, WebsiteSearchTool, XMLSearchTool,
48
+ YoutubeChannelSearchTool, YoutubeVideoSearchTool
49
+ )
50
+ PRAISONAI_TOOLS_AVAILABLE = True
51
+ except ImportError:
52
+ PRAISONAI_TOOLS_AVAILABLE = False
53
+
54
+ # LiteLLM availability check for multi-provider support
55
+ LITELLM_AVAILABLE = False
56
+ try:
57
+ import litellm # noqa: F401 - imported for availability check
58
+ LITELLM_AVAILABLE = True
59
+ except ImportError:
60
+ pass
61
+
62
+ _loglevel = os.environ.get('LOGLEVEL', 'INFO').strip().upper() or 'INFO'
63
+ logging.basicConfig(level=_loglevel, format='%(asctime)s - %(levelname)s - %(message)s')
64
+
65
+ # =============================================================================
66
+ # Available Tools List (shared between generators) - Legacy for praisonai_tools
67
+ # =============================================================================
68
+ AVAILABLE_TOOLS = [
69
+ "CodeDocsSearchTool", "CSVSearchTool", "DirectorySearchTool", "DOCXSearchTool",
70
+ "DirectoryReadTool", "FileReadTool", "TXTSearchTool", "JSONSearchTool",
71
+ "MDXSearchTool", "PDFSearchTool", "RagTool", "ScrapeElementFromWebsiteTool",
72
+ "ScrapeWebsiteTool", "WebsiteSearchTool", "XMLSearchTool",
73
+ "YoutubeChannelSearchTool", "YoutubeVideoSearchTool"
74
+ ]
75
+
76
+ # =============================================================================
77
+ # Enhanced Tool Discovery from praisonaiagents.tools
78
+ # =============================================================================
79
+
80
+ # Tool categories with their tools from praisonaiagents.tools
81
+ TOOL_CATEGORIES = {
82
+ 'web_search': [
83
+ 'internet_search', 'duckduckgo', 'tavily_search', 'exa_search',
84
+ 'search_web', 'ydc_search', 'searxng_search'
85
+ ],
86
+ 'web_scraping': [
87
+ 'scrape_page', 'extract_links', 'crawl', 'extract_text',
88
+ 'crawl4ai', 'crawl4ai_extract', 'get_article'
89
+ ],
90
+ 'file_operations': [
91
+ 'read_file', 'write_file', 'list_files', 'get_file_info',
92
+ 'copy_file', 'move_file', 'delete_file'
93
+ ],
94
+ 'code_execution': [
95
+ 'execute_command', 'execute_code', 'analyze_code', 'format_code'
96
+ ],
97
+ 'data_processing': [
98
+ 'read_csv', 'write_csv', 'analyze_csv', 'read_json', 'write_json',
99
+ 'read_excel', 'write_excel', 'read_yaml', 'write_yaml', 'read_xml'
100
+ ],
101
+ 'research': [
102
+ 'search_arxiv', 'get_arxiv_paper', 'wiki_search', 'wiki_summary',
103
+ 'get_news_sources', 'get_trending_topics'
104
+ ],
105
+ 'finance': [
106
+ 'get_stock_price', 'get_stock_info', 'get_historical_data'
107
+ ],
108
+ 'math': [
109
+ 'evaluate', 'solve_equation', 'convert_units', 'calculate_statistics'
110
+ ],
111
+ 'database': [
112
+ 'query', 'create_table', 'load_data', 'find_documents', 'vector_search'
113
+ ]
114
+ }
115
+
116
+ # Keywords that map to tool categories
117
+ TASK_KEYWORD_TO_TOOLS = {
118
+ # Web search keywords
119
+ 'search': 'web_search',
120
+ 'find': 'web_search',
121
+ 'look up': 'web_search',
122
+ 'google': 'web_search',
123
+ 'internet': 'web_search',
124
+ 'online': 'web_search',
125
+ 'web': 'web_search',
126
+
127
+ # Web scraping keywords
128
+ 'scrape': 'web_scraping',
129
+ 'crawl': 'web_scraping',
130
+ 'extract from website': 'web_scraping',
131
+ 'get from url': 'web_scraping',
132
+ 'fetch page': 'web_scraping',
133
+
134
+ # File operation keywords
135
+ 'read file': 'file_operations',
136
+ 'write file': 'file_operations',
137
+ 'save': 'file_operations',
138
+ 'load': 'file_operations',
139
+ 'open file': 'file_operations',
140
+ 'create file': 'file_operations',
141
+
142
+ # Code execution keywords
143
+ 'execute': 'code_execution',
144
+ 'run code': 'code_execution',
145
+ 'python': 'code_execution',
146
+ 'script': 'code_execution',
147
+ 'command': 'code_execution',
148
+ 'shell': 'code_execution',
149
+
150
+ # Data processing keywords
151
+ 'csv': 'data_processing',
152
+ 'excel': 'data_processing',
153
+ 'json': 'data_processing',
154
+ 'yaml': 'data_processing',
155
+ 'xml': 'data_processing',
156
+ 'data': 'data_processing',
157
+ 'spreadsheet': 'data_processing',
158
+
159
+ # Research keywords
160
+ 'research': 'research',
161
+ 'paper': 'research',
162
+ 'arxiv': 'research',
163
+ 'wikipedia': 'research',
164
+ 'academic': 'research',
165
+ 'news': 'research',
166
+
167
+ # Finance keywords
168
+ 'stock': 'finance',
169
+ 'price': 'finance',
170
+ 'market': 'finance',
171
+ 'financial': 'finance',
172
+ 'trading': 'finance',
173
+
174
+ # Math keywords
175
+ 'calculate': 'math',
176
+ 'math': 'math',
177
+ 'equation': 'math',
178
+ 'compute': 'math',
179
+ 'statistics': 'math',
180
+
181
+ # Database keywords
182
+ 'database': 'database',
183
+ 'sql': 'database',
184
+ 'query': 'database',
185
+ 'mongodb': 'database',
186
+ 'vector': 'database'
187
+ }
188
+
189
+
190
+ def get_all_available_tools() -> Dict[str, List[str]]:
191
+ """
192
+ Get all available tools organized by category.
193
+
194
+ Returns:
195
+ Dict mapping category names to lists of tool names
196
+ """
197
+ return TOOL_CATEGORIES.copy()
198
+
199
+
200
+ def get_tools_for_task(task_description: str) -> List[str]:
201
+ """
202
+ Analyze a task description and return appropriate tools.
203
+
204
+ Args:
205
+ task_description: The task to analyze
206
+
207
+ Returns:
208
+ List of tool names appropriate for the task
209
+ """
210
+ task_lower = task_description.lower()
211
+ matched_categories = set()
212
+
213
+ # Match keywords to categories
214
+ for keyword, category in TASK_KEYWORD_TO_TOOLS.items():
215
+ if keyword in task_lower:
216
+ matched_categories.add(category)
217
+
218
+ # Collect tools from matched categories
219
+ tools = []
220
+ for category in matched_categories:
221
+ if category in TOOL_CATEGORIES:
222
+ tools.extend(TOOL_CATEGORIES[category])
223
+
224
+ # Always include core tools for flexibility
225
+ core_tools = ['read_file', 'write_file', 'execute_command']
226
+ for tool in core_tools:
227
+ if tool not in tools:
228
+ tools.append(tool)
229
+
230
+ # Remove duplicates while preserving order
231
+ seen = set()
232
+ unique_tools = []
233
+ for tool in tools:
234
+ if tool not in seen:
235
+ seen.add(tool)
236
+ unique_tools.append(tool)
237
+
238
+ return unique_tools
239
+
240
+
241
+ def recommend_agent_count(task_description: str) -> int:
242
+ """
243
+ Recommend the optimal number of agents based on task complexity.
244
+
245
+ Args:
246
+ task_description: The task to analyze
247
+
248
+ Returns:
249
+ Recommended number of agents (1-4)
250
+ """
251
+ complexity = BaseAutoGenerator.analyze_complexity(task_description)
252
+
253
+ if complexity == 'simple':
254
+ return 1
255
+ elif complexity == 'moderate':
256
+ return 2
257
+ else: # complex
258
+ # Count distinct aspects of the task
259
+ task_lower = task_description.lower()
260
+ aspects = 0
261
+
262
+ aspect_keywords = [
263
+ ['research', 'search', 'find', 'gather'],
264
+ ['analyze', 'evaluate', 'assess', 'review'],
265
+ ['write', 'create', 'generate', 'produce'],
266
+ ['edit', 'refine', 'improve', 'polish'],
267
+ ['coordinate', 'manage', 'orchestrate', 'delegate']
268
+ ]
269
+
270
+ for keyword_group in aspect_keywords:
271
+ if any(kw in task_lower for kw in keyword_group):
272
+ aspects += 1
273
+
274
+ return min(max(aspects, 2), 4) # Between 2 and 4 agents
275
+
276
+ # =============================================================================
277
+ # Base Generator Class (DRY - shared functionality)
278
+ # =============================================================================
279
+ class BaseAutoGenerator:
280
+ """
281
+ Base class for auto-generators with shared functionality.
282
+
283
+ Provides:
284
+ - Lazy-loaded instructor client (LiteLLM or OpenAI fallback)
285
+ - Environment variable handling for model/API configuration
286
+ - Config list management
287
+ """
288
+
289
+ def __init__(self, config_list: Optional[List[Dict]] = None):
290
+ """
291
+ Initialize base generator with LLM configuration.
292
+
293
+ Args:
294
+ config_list: Optional LLM configuration list
295
+ """
296
+ # Support multiple environment variable patterns for better compatibility
297
+ model_name = os.environ.get("MODEL_NAME") or os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini")
298
+ base_url = (
299
+ os.environ.get("OPENAI_BASE_URL") or
300
+ os.environ.get("OPENAI_API_BASE") or
301
+ os.environ.get("OLLAMA_API_BASE", "https://api.openai.com/v1")
302
+ )
303
+
304
+ self.config_list = config_list or [
305
+ {
306
+ 'model': model_name,
307
+ 'base_url': base_url,
308
+ 'api_key': os.environ.get("OPENAI_API_KEY")
309
+ }
310
+ ]
311
+ self._client = None # Lazy loading for performance
312
+
313
+ @property
314
+ def client(self):
315
+ """Lazy load the instructor client to avoid performance impact.
316
+
317
+ Uses LiteLLM via instructor.from_provider if available for multi-provider support,
318
+ otherwise falls back to direct OpenAI SDK.
319
+ """
320
+ if self._client is None:
321
+ if LITELLM_AVAILABLE:
322
+ # Use LiteLLM for multi-provider support (100+ LLMs)
323
+ model_name = self.config_list[0]['model']
324
+ self._client = instructor.from_provider(
325
+ f"litellm/{model_name}",
326
+ mode=instructor.Mode.JSON
327
+ )
328
+ else:
329
+ # Fallback to direct OpenAI SDK
330
+ self._client = instructor.patch(
331
+ OpenAI(
332
+ base_url=self.config_list[0]['base_url'],
333
+ api_key=self.config_list[0]['api_key'],
334
+ ),
335
+ mode=instructor.Mode.JSON,
336
+ )
337
+ return self._client
338
+
339
+ @staticmethod
340
+ def get_available_tools() -> List[str]:
341
+ """Return list of available tools for agent assignment."""
342
+ return AVAILABLE_TOOLS.copy()
343
+
344
+ @staticmethod
345
+ def analyze_complexity(topic: str) -> str:
346
+ """
347
+ Analyze task complexity based on keywords.
348
+
349
+ Args:
350
+ topic: The task description
351
+
352
+ Returns:
353
+ str: Complexity level - 'simple', 'moderate', or 'complex'
354
+ """
355
+ topic_lower = topic.lower()
356
+
357
+ # Complex task indicators
358
+ complex_keywords = [
359
+ 'comprehensive', 'multi-step', 'analyze and', 'research and write',
360
+ 'multiple', 'coordinate', 'complex', 'detailed analysis',
361
+ 'full report', 'in-depth', 'thorough'
362
+ ]
363
+
364
+ # Simple task indicators
365
+ simple_keywords = [
366
+ 'write a', 'create a', 'simple', 'quick', 'brief',
367
+ 'haiku', 'poem', 'summary', 'list', 'single'
368
+ ]
369
+
370
+ if any(kw in topic_lower for kw in complex_keywords):
371
+ return 'complex'
372
+ elif any(kw in topic_lower for kw in simple_keywords):
373
+ return 'simple'
374
+ else:
375
+ return 'moderate'
376
+
377
+
378
+ # =============================================================================
379
+ # Pydantic Models for Structured Output
380
+ # =============================================================================
381
+
382
+ class TaskDetails(BaseModel):
383
+ """Details for a single task."""
384
+ description: str
385
+ expected_output: str
386
+
387
+ class RoleDetails(BaseModel):
388
+ """Details for a single role/agent."""
389
+ role: str
390
+ goal: str
391
+ backstory: str
392
+ tasks: Dict[str, TaskDetails]
393
+ tools: List[str]
394
+
395
+ class TeamStructure(BaseModel):
396
+ """Structure for multi-agent team."""
397
+ roles: Dict[str, RoleDetails]
398
+
399
+ class SingleAgentStructure(BaseModel):
400
+ """Structure for single-agent generation (Anthropic's 'start simple' principle)."""
401
+ name: str
402
+ role: str
403
+ goal: str
404
+ backstory: str
405
+ instructions: str
406
+ tools: List[str] = []
407
+ task_description: str
408
+ expected_output: str
409
+
410
+ class PatternRecommendation(BaseModel):
411
+ """LLM-based pattern recommendation with reasoning."""
412
+ pattern: str # sequential, parallel, routing, orchestrator-workers, evaluator-optimizer
413
+ reasoning: str # Why this pattern was chosen
414
+ confidence: float # 0.0 to 1.0 confidence score
415
+
416
+ class ValidationGate(BaseModel):
417
+ """Validation gate for prompt chaining workflows."""
418
+ criteria: str # What to validate
419
+ pass_action: str # Action if validation passes (e.g., "continue", "next_step")
420
+ fail_action: str # Action if validation fails (e.g., "retry", "escalate", "abort")
421
+
422
+ class AutoGenerator(BaseAutoGenerator):
423
+ """
424
+ Auto-generates agents.yaml files from a topic description.
425
+
426
+ Inherits from BaseAutoGenerator for shared LLM client functionality.
427
+
428
+ Usage:
429
+ generator = AutoGenerator(framework="crewai", topic="Create a movie script")
430
+ path = generator.generate()
431
+ """
432
+
433
+ def __init__(self, topic="Movie Story writing about AI", agent_file="test.yaml",
434
+ framework="crewai", config_list: Optional[List[Dict]] = None,
435
+ pattern: str = "sequential", single_agent: bool = False):
436
+ """
437
+ Initialize the AutoGenerator class with the specified topic, agent file, and framework.
438
+
439
+ Args:
440
+ topic: The task/topic for agent generation
441
+ agent_file: Output YAML file name
442
+ framework: Framework to use (crewai, autogen, praisonai)
443
+ config_list: Optional LLM configuration
444
+ pattern: Workflow pattern (sequential, parallel, routing, orchestrator-workers, evaluator-optimizer)
445
+ single_agent: If True, generate a single agent instead of a team
446
+
447
+ Note: autogen framework is different from this AutoGenerator class.
448
+ """
449
+ # Initialize base class first (handles config_list and client)
450
+ super().__init__(config_list=config_list)
451
+
452
+ # Validate framework availability and show framework-specific messages
453
+ if framework == "crewai" and not CREWAI_AVAILABLE:
454
+ raise ImportError("""
455
+ CrewAI is not installed. Please install with:
456
+ pip install "praisonai[crewai]"
457
+ """)
458
+ elif framework == "autogen" and not (AUTOGEN_AVAILABLE or AUTOGEN_V4_AVAILABLE):
459
+ raise ImportError("""
460
+ AutoGen is not installed. Please install with:
461
+ pip install "praisonai[autogen]" for v0.2
462
+ pip install "praisonai[autogen-v4]" for v0.4
463
+ """)
464
+ elif framework == "praisonai" and not PRAISONAI_AVAILABLE:
465
+ raise ImportError("""
466
+ Praisonai is not installed. Please install with:
467
+ pip install praisonaiagents
468
+ """)
469
+
470
+ # Only show tools message if using a framework and tools are needed
471
+ if (framework in ["crewai", "autogen"]) and not PRAISONAI_TOOLS_AVAILABLE:
472
+ if framework == "autogen":
473
+ logging.warning("""
474
+ Tools are not available for autogen. To use tools, install:
475
+ pip install "praisonai[autogen]" for v0.2
476
+ pip install "praisonai[autogen-v4]" for v0.4
477
+ """)
478
+ else:
479
+ logging.warning(f"""
480
+ Tools are not available for {framework}. To use tools, install:
481
+ pip install "praisonai[{framework}]"
482
+ """)
483
+
484
+ self.topic = topic
485
+ self.agent_file = agent_file
486
+ self.framework = framework or "praisonai"
487
+ self.pattern = pattern
488
+ self.single_agent = single_agent
489
+
490
+ def recommend_pattern(self, topic: str = None) -> str:
491
+ """
492
+ Recommend the best workflow pattern based on task characteristics.
493
+
494
+ Args:
495
+ topic: The task description (uses self.topic if not provided)
496
+
497
+ Returns:
498
+ str: Recommended pattern name
499
+ """
500
+ task = topic or self.topic
501
+ task_lower = task.lower()
502
+
503
+ # Keywords that suggest specific patterns
504
+ parallel_keywords = ['multiple', 'concurrent', 'parallel', 'simultaneously', 'different sources', 'compare', 'various']
505
+ routing_keywords = ['classify', 'categorize', 'route', 'different types', 'depending on', 'if...then']
506
+ orchestrator_keywords = ['complex', 'comprehensive', 'multi-step', 'coordinate', 'delegate', 'break down', 'analyze and']
507
+ evaluator_keywords = ['refine', 'improve', 'iterate', 'quality', 'review', 'feedback', 'polish', 'optimize']
508
+
509
+ # Check for pattern indicators
510
+ if any(kw in task_lower for kw in evaluator_keywords):
511
+ return "evaluator-optimizer"
512
+ elif any(kw in task_lower for kw in orchestrator_keywords):
513
+ return "orchestrator-workers"
514
+ elif any(kw in task_lower for kw in routing_keywords):
515
+ return "routing"
516
+ elif any(kw in task_lower for kw in parallel_keywords):
517
+ return "parallel"
518
+ else:
519
+ return "sequential"
520
+
521
+ def generate(self, merge=False):
522
+ """
523
+ Generates a team structure for the specified topic.
524
+
525
+ Args:
526
+ merge (bool): Whether to merge with existing agents.yaml file instead of overwriting.
527
+
528
+ Returns:
529
+ str: The full path of the YAML file containing the generated team structure.
530
+
531
+ Raises:
532
+ Exception: If the generation process fails.
533
+
534
+ Usage:
535
+ generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
536
+ path = generator.generate()
537
+ print(path)
538
+ """
539
+ response = self.client.chat.completions.create(
540
+ model=self.config_list[0]['model'],
541
+ response_model=TeamStructure,
542
+ max_retries=5,
543
+ timeout=120.0, # 2 minute timeout for complex generations
544
+ messages=[
545
+ {"role": "system", "content": "You are a helpful assistant designed to output complex team structures."},
546
+ {"role": "user", "content": self.get_user_content()}
547
+ ]
548
+ )
549
+ json_data = json.loads(response.model_dump_json())
550
+ self.convert_and_save(json_data, merge=merge)
551
+ full_path = os.path.abspath(self.agent_file)
552
+ return full_path
553
+
554
+ def convert_and_save(self, json_data, merge=False):
555
+ """Converts the provided JSON data into the desired YAML format and saves it to a file.
556
+
557
+ Args:
558
+ json_data (dict): The JSON data representing the team structure.
559
+ merge (bool): Whether to merge with existing agents.yaml file instead of overwriting.
560
+ """
561
+
562
+ # Handle merge functionality
563
+ if merge and os.path.exists(self.agent_file):
564
+ yaml_data = self.merge_with_existing_agents(json_data)
565
+ else:
566
+ # Original behavior: create new yaml_data structure
567
+ yaml_data = {
568
+ "framework": self.framework,
569
+ "topic": self.topic,
570
+ "roles": {},
571
+ "dependencies": []
572
+ }
573
+
574
+ for role_id, role_details in json_data['roles'].items():
575
+ yaml_data['roles'][role_id] = {
576
+ "backstory": "" + role_details['backstory'],
577
+ "goal": role_details['goal'],
578
+ "role": role_details['role'],
579
+ "tasks": {},
580
+ "tools": role_details.get('tools', [])
581
+ }
582
+
583
+ for task_id, task_details in role_details['tasks'].items():
584
+ yaml_data['roles'][role_id]['tasks'][task_id] = {
585
+ "description": "" + task_details['description'],
586
+ "expected_output": "" + task_details['expected_output']
587
+ }
588
+
589
+ # Save to YAML file, maintaining the order
590
+ with open(self.agent_file, 'w') as f:
591
+ yaml.dump(yaml_data, f, allow_unicode=True, sort_keys=False)
592
+
593
+ def merge_with_existing_agents(self, new_json_data):
594
+ """
595
+ Merge existing agents.yaml with new auto-generated agents.
596
+
597
+ Args:
598
+ new_json_data (dict): The JSON data representing the new team structure.
599
+
600
+ Returns:
601
+ dict: The merged YAML data structure.
602
+ """
603
+ try:
604
+ # Load existing agents.yaml
605
+ with open(self.agent_file, 'r') as f:
606
+ existing_data = yaml.safe_load(f)
607
+
608
+ if not existing_data:
609
+ # If existing file is empty, treat as new file
610
+ existing_data = {"roles": {}, "dependencies": []}
611
+ except (yaml.YAMLError, FileNotFoundError) as e:
612
+ logging.warning(f"Could not load existing agents file {self.agent_file}: {e}")
613
+ logging.warning("Creating new file instead of merging")
614
+ existing_data = {"roles": {}, "dependencies": []}
615
+
616
+ # Start with existing data structure
617
+ merged_data = existing_data.copy()
618
+
619
+ # Ensure required fields exist
620
+ if 'roles' not in merged_data:
621
+ merged_data['roles'] = {}
622
+ if 'dependencies' not in merged_data:
623
+ merged_data['dependencies'] = []
624
+ if 'framework' not in merged_data:
625
+ merged_data['framework'] = self.framework
626
+
627
+ # Handle topic merging
628
+ existing_topic = merged_data.get('topic', '')
629
+ new_topic = self.topic
630
+ if existing_topic and existing_topic != new_topic:
631
+ merged_data['topic'] = f"{existing_topic} + {new_topic}"
632
+ else:
633
+ merged_data['topic'] = new_topic
634
+
635
+ # Merge new roles with existing ones
636
+ for role_id, role_details in new_json_data['roles'].items():
637
+ # Check for conflicts and rename if necessary
638
+ final_role_id = role_id
639
+ counter = 1
640
+ while final_role_id in merged_data['roles']:
641
+ final_role_id = f"{role_id}_auto_{counter}"
642
+ counter += 1
643
+
644
+ # Add the new role
645
+ merged_data['roles'][final_role_id] = {
646
+ "backstory": "" + role_details['backstory'],
647
+ "goal": role_details['goal'],
648
+ "role": role_details['role'],
649
+ "tasks": {},
650
+ "tools": role_details.get('tools', [])
651
+ }
652
+
653
+ # Add tasks for this role
654
+ for task_id, task_details in role_details['tasks'].items():
655
+ merged_data['roles'][final_role_id]['tasks'][task_id] = {
656
+ "description": "" + task_details['description'],
657
+ "expected_output": "" + task_details['expected_output']
658
+ }
659
+
660
+ return merged_data
661
+
662
+ def discover_tools_for_topic(self) -> List[str]:
663
+ """
664
+ Discover appropriate tools for the topic using intelligent matching.
665
+
666
+ Returns:
667
+ List of tool names appropriate for this topic
668
+ """
669
+ return get_tools_for_task(self.topic)
670
+
671
+ def get_user_content(self):
672
+ """
673
+ Generates a prompt for the OpenAI API to generate a team structure.
674
+ Uses intelligent tool discovery based on task analysis.
675
+
676
+ Args:
677
+ None
678
+
679
+ Returns:
680
+ str: The prompt for the OpenAI API.
681
+
682
+ Usage:
683
+ generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
684
+ prompt = generator.get_user_content()
685
+ print(prompt)
686
+ """
687
+ # Pattern-specific guidance
688
+ pattern_guidance = {
689
+ "sequential": "The team will work in sequence. Each role passes output to the next.",
690
+ "parallel": "The team will work in parallel on independent subtasks, then combine results.",
691
+ "routing": "A classifier agent will route requests to specialized agents based on input type.",
692
+ "orchestrator-workers": "A central orchestrator will dynamically delegate tasks to specialized workers.",
693
+ "evaluator-optimizer": "One agent generates content, another evaluates it in a loop until quality criteria are met."
694
+ }
695
+
696
+ workflow_guidance = pattern_guidance.get(self.pattern, pattern_guidance["sequential"])
697
+
698
+ # Get recommended tools based on task analysis
699
+ recommended_tools = self.discover_tools_for_topic()
700
+ recommended_agent_count = recommend_agent_count(self.topic)
701
+ complexity = self.analyze_complexity(self.topic)
702
+
703
+ # Build comprehensive tool list with categories
704
+ all_tools_by_category = []
705
+ for category, tools in TOOL_CATEGORIES.items():
706
+ all_tools_by_category.append(f" {category}: {', '.join(tools)}")
707
+ tools_reference = "\n".join(all_tools_by_category)
708
+
709
+ # Also include legacy tools for backward compatibility
710
+ legacy_tools = ", ".join(AVAILABLE_TOOLS)
711
+
712
+ user_content = f"""Analyze and generate a team structure for: "{self.topic}"
713
+
714
+ TASK COMPLEXITY ANALYSIS (Pre-computed):
715
+ - Complexity: {complexity}
716
+ - Recommended agents: {recommended_agent_count}
717
+ - Recommended tools based on task keywords: {', '.join(recommended_tools)}
718
+
719
+ STEP 1: VALIDATE TASK ANALYSIS
720
+ Review the pre-computed analysis above. Adjust if needed based on your understanding.
721
+
722
+ STEP 2: DETERMINE OPTIMAL TEAM SIZE
723
+ Based on complexity analysis:
724
+ - Simple tasks: 1-2 agents (single focused agent or simple pair)
725
+ - Moderate tasks: 2-3 agents (researcher + executor pattern)
726
+ - Complex tasks: 3-4 agents (specialized team)
727
+
728
+ Recommended for this task: {recommended_agent_count} agent(s)
729
+
730
+ IMPORTANT: Avoid unnecessary complexity. Only add agents if there is meaningful specialization.
731
+ Each agent must have a distinct, non-overlapping responsibility.
732
+
733
+ STEP 3: DESIGN THE TEAM (Pattern: {self.pattern})
734
+ {workflow_guidance}
735
+
736
+ Each agent should have:
737
+ - A clear, distinct role with meaningful specialization
738
+ - A specific goal
739
+ - Relevant backstory
740
+ - 1 focused task with clear description and expected output
741
+ - Appropriate tools from the recommended list
742
+
743
+ AVAILABLE TOOLS BY CATEGORY:
744
+ {tools_reference}
745
+
746
+ LEGACY TOOLS (for backward compatibility):
747
+ {legacy_tools}
748
+
749
+ RECOMMENDED TOOLS FOR THIS TASK: {', '.join(recommended_tools)}
750
+ Prioritize using the recommended tools. Only add others if specifically needed.
751
+
752
+ Example structure (2 agents for a research + writing task):
753
+ {{
754
+ "roles": {{
755
+ "researcher": {{
756
+ "role": "Research Analyst",
757
+ "goal": "Gather comprehensive information on the topic",
758
+ "backstory": "Expert researcher skilled at finding and synthesizing information.",
759
+ "tools": ["internet_search", "read_file"],
760
+ "tasks": {{
761
+ "research_task": {{
762
+ "description": "Research key information about the topic and compile findings.",
763
+ "expected_output": "Comprehensive research notes with key facts and insights."
764
+ }}
765
+ }}
766
+ }},
767
+ "writer": {{
768
+ "role": "Content Writer",
769
+ "goal": "Create polished final content",
770
+ "backstory": "Skilled writer who transforms research into engaging content.",
771
+ "tools": ["write_file"],
772
+ "tasks": {{
773
+ "writing_task": {{
774
+ "description": "Write the final content based on research findings.",
775
+ "expected_output": "Polished, well-structured final document."
776
+ }}
777
+ }}
778
+ }}
779
+ }}
780
+ }}
781
+
782
+ Now generate the optimal team structure for: {self.topic}
783
+ Use the recommended tools: {', '.join(recommended_tools)}
784
+ """
785
+ return user_content
786
+
787
+
788
+ # generator = AutoGenerator(framework="crewai", topic="Create a movie script about Cat in Mars")
789
+ # print(generator.generate())
790
+
791
+
792
+ # =============================================================================
793
+ # Workflow Auto-Generation (Feature Parity)
794
+ # =============================================================================
795
+
796
+ class WorkflowStepDetails(BaseModel):
797
+ """Details for a workflow step."""
798
+ agent: str
799
+ action: str
800
+ expected_output: Optional[str] = None
801
+
802
+ class WorkflowRouteDetails(BaseModel):
803
+ """Details for a route step."""
804
+ name: str
805
+ route: Dict[str, List[str]]
806
+
807
+ class WorkflowParallelDetails(BaseModel):
808
+ """Details for a parallel step."""
809
+ name: str
810
+ parallel: List[WorkflowStepDetails]
811
+
812
+ class WorkflowAgentDetails(BaseModel):
813
+ """Details for a workflow agent."""
814
+ name: str
815
+ role: str
816
+ goal: str
817
+ instructions: str
818
+ tools: Optional[List[str]] = None
819
+
820
+ class WorkflowStructure(BaseModel):
821
+ """Structure for auto-generated workflow."""
822
+ name: str
823
+ description: str
824
+ agents: Dict[str, WorkflowAgentDetails]
825
+ steps: List[Dict] # Can be agent steps, route, parallel, etc.
826
+ gates: Optional[List[ValidationGate]] = None # Optional validation gates
827
+
828
+
829
+ class WorkflowAutoGenerator(BaseAutoGenerator):
830
+ """
831
+ Auto-generates workflow.yaml files from a topic description.
832
+
833
+ Inherits from BaseAutoGenerator for shared LLM client functionality.
834
+
835
+ Usage:
836
+ generator = WorkflowAutoGenerator(topic="Research AI trends and write a report")
837
+ path = generator.generate()
838
+ """
839
+
840
+ def __init__(self, topic: str = "Research and write about AI",
841
+ workflow_file: str = "workflow.yaml",
842
+ config_list: Optional[List[Dict]] = None,
843
+ framework: str = "praisonai",
844
+ single_agent: bool = False):
845
+ """
846
+ Initialize the WorkflowAutoGenerator.
847
+
848
+ Args:
849
+ topic: The task/topic for the workflow
850
+ workflow_file: Output file name
851
+ config_list: Optional LLM configuration
852
+ framework: Framework to use (praisonai, crewai, autogen)
853
+ single_agent: If True, generate a single agent workflow
854
+ """
855
+ # Initialize base class (handles config_list and client)
856
+ super().__init__(config_list=config_list)
857
+
858
+ self.topic = topic
859
+ self.workflow_file = workflow_file
860
+ self.framework = framework
861
+ self.single_agent = single_agent
862
+
863
+ def recommend_pattern(self, topic: str = None) -> str:
864
+ """
865
+ Recommend the best workflow pattern based on task characteristics.
866
+
867
+ Args:
868
+ topic: The task description (uses self.topic if not provided)
869
+
870
+ Returns:
871
+ str: Recommended pattern name
872
+
873
+ Pattern recommendations based on Anthropic's best practices:
874
+ - sequential: Clear step-by-step dependencies
875
+ - parallel: Independent subtasks that can run concurrently
876
+ - routing: Different input types need different handling
877
+ - orchestrator-workers: Complex tasks needing dynamic decomposition
878
+ - evaluator-optimizer: Tasks requiring iterative refinement
879
+ """
880
+ task = topic or self.topic
881
+ task_lower = task.lower()
882
+
883
+ # Keywords that suggest specific patterns
884
+ parallel_keywords = ['multiple', 'concurrent', 'parallel', 'simultaneously', 'different sources', 'compare', 'various']
885
+ routing_keywords = ['classify', 'categorize', 'route', 'different types', 'depending on', 'if...then']
886
+ orchestrator_keywords = ['complex', 'comprehensive', 'multi-step', 'coordinate', 'delegate', 'break down', 'analyze and']
887
+ evaluator_keywords = ['refine', 'improve', 'iterate', 'quality', 'review', 'feedback', 'polish', 'optimize']
888
+
889
+ # Check for pattern indicators
890
+ if any(kw in task_lower for kw in evaluator_keywords):
891
+ return "evaluator-optimizer"
892
+ elif any(kw in task_lower for kw in orchestrator_keywords):
893
+ return "orchestrator-workers"
894
+ elif any(kw in task_lower for kw in routing_keywords):
895
+ return "routing"
896
+ elif any(kw in task_lower for kw in parallel_keywords):
897
+ return "parallel"
898
+ else:
899
+ return "sequential"
900
+
901
+ def recommend_pattern_llm(self, topic: str = None) -> PatternRecommendation:
902
+ """
903
+ Use LLM to recommend the best workflow pattern with reasoning.
904
+
905
+ Args:
906
+ topic: The task description (uses self.topic if not provided)
907
+
908
+ Returns:
909
+ PatternRecommendation: Pattern with reasoning and confidence score
910
+ """
911
+ task = topic or self.topic
912
+
913
+ prompt = f"""Analyze this task and recommend the best workflow pattern:
914
+
915
+ Task: "{task}"
916
+
917
+ Available patterns:
918
+ 1. sequential - Agents work one after another, passing output to the next
919
+ 2. parallel - Multiple agents work concurrently on independent subtasks
920
+ 3. routing - A classifier routes requests to specialized agents based on input type
921
+ 4. orchestrator-workers - Central orchestrator dynamically delegates to specialized workers
922
+ 5. evaluator-optimizer - Generator creates content, evaluator reviews in a loop until quality met
923
+
924
+ Respond with:
925
+ - pattern: The recommended pattern name
926
+ - reasoning: Why this pattern is best for this task
927
+ - confidence: Your confidence score (0.0 to 1.0)
928
+ """
929
+
930
+ response = self.client.chat.completions.create(
931
+ model=self.config_list[0]['model'],
932
+ response_model=PatternRecommendation,
933
+ max_retries=3,
934
+ timeout=60.0,
935
+ messages=[
936
+ {"role": "system", "content": "You are an expert at designing AI agent workflows."},
937
+ {"role": "user", "content": prompt}
938
+ ]
939
+ )
940
+
941
+ return response
942
+
943
+ def generate(self, pattern: str = "sequential", merge: bool = False) -> str:
944
+ """
945
+ Generate a workflow YAML file.
946
+
947
+ Args:
948
+ pattern: Workflow pattern - "sequential", "routing", "parallel", "loop",
949
+ "orchestrator-workers", "evaluator-optimizer"
950
+ merge: If True, merge with existing workflow file instead of overwriting
951
+
952
+ Returns:
953
+ Path to the generated workflow file
954
+ """
955
+ response = self.client.chat.completions.create(
956
+ model=self.config_list[0]['model'],
957
+ response_model=WorkflowStructure,
958
+ max_retries=5,
959
+ timeout=120.0, # 2 minute timeout for complex generations
960
+ messages=[
961
+ {"role": "system", "content": "You are a helpful assistant that designs workflow structures."},
962
+ {"role": "user", "content": self._get_prompt(pattern)}
963
+ ]
964
+ )
965
+
966
+ json_data = json.loads(response.model_dump_json())
967
+
968
+ if merge and os.path.exists(self.workflow_file):
969
+ return self._save_workflow(self.merge_with_existing_workflow(json_data), pattern)
970
+ return self._save_workflow(json_data, pattern)
971
+
972
+ def merge_with_existing_workflow(self, new_data: Dict) -> Dict:
973
+ """
974
+ Merge new workflow data with existing workflow file.
975
+
976
+ Args:
977
+ new_data: The new workflow data to merge
978
+
979
+ Returns:
980
+ Dict: Merged workflow data
981
+ """
982
+ try:
983
+ with open(self.workflow_file, 'r') as f:
984
+ existing_data = yaml.safe_load(f)
985
+
986
+ if not existing_data:
987
+ return new_data
988
+ except (yaml.YAMLError, FileNotFoundError) as e:
989
+ logging.warning(f"Could not load existing workflow file {self.workflow_file}: {e}")
990
+ return new_data
991
+
992
+ # Merge agents (avoid duplicates)
993
+ merged_agents = existing_data.get('agents', {}).copy()
994
+ for agent_id, agent_data in new_data.get('agents', {}).items():
995
+ # Rename if conflict
996
+ final_id = agent_id
997
+ counter = 1
998
+ while final_id in merged_agents:
999
+ final_id = f"{agent_id}_auto_{counter}"
1000
+ counter += 1
1001
+ merged_agents[final_id] = agent_data
1002
+
1003
+ # Merge steps (append new steps)
1004
+ merged_steps = existing_data.get('steps', []) + new_data.get('steps', [])
1005
+
1006
+ # Create merged structure
1007
+ merged = {
1008
+ 'name': existing_data.get('name', new_data.get('name', 'Merged Workflow')),
1009
+ 'description': f"{existing_data.get('description', '')} + {new_data.get('description', '')}",
1010
+ 'agents': merged_agents,
1011
+ 'steps': merged_steps
1012
+ }
1013
+
1014
+ return merged
1015
+
1016
+ def _get_prompt(self, pattern: str) -> str:
1017
+ """Generate the prompt based on the workflow pattern."""
1018
+ # Analyze complexity to determine agent count
1019
+ complexity = self.analyze_complexity(self.topic)
1020
+ if complexity == 'simple':
1021
+ agent_guidance = "Create 1-2 agents (simple task detected)."
1022
+ elif complexity == 'complex':
1023
+ agent_guidance = "Create 3-4 agents (complex task detected)."
1024
+ else:
1025
+ agent_guidance = "Create 2-3 agents (moderate task detected)."
1026
+
1027
+ # Get available tools
1028
+ tools_list = ", ".join(self.get_available_tools())
1029
+
1030
+ base_prompt = f"""Generate a workflow structure for: "{self.topic}"
1031
+
1032
+ STEP 1: ANALYZE TASK COMPLEXITY
1033
+ - Is this a simple task (1-2 agents)?
1034
+ - Does it require multiple specialists (2-3 agents)?
1035
+ - Is it complex with many dependencies (3-4 agents)?
1036
+
1037
+ STEP 2: DESIGN WORKFLOW
1038
+ The workflow should use the "{pattern}" pattern.
1039
+ {agent_guidance}
1040
+ Each agent should have clear roles and instructions.
1041
+ Each step should have a clear action.
1042
+
1043
+ STEP 3: ASSIGN TOOLS (if needed)
1044
+ Available Tools: {tools_list}
1045
+ Only assign tools if the task requires them. Use empty list or null if no tools needed.
1046
+
1047
+ """
1048
+
1049
+ if pattern == "routing":
1050
+ base_prompt += """
1051
+ Include a classifier agent that routes to different specialized agents.
1052
+ The route step should have at least 2 routes plus a default.
1053
+
1054
+ Example structure:
1055
+ {
1056
+ "name": "Routing Workflow",
1057
+ "description": "Routes requests to specialized agents",
1058
+ "agents": {
1059
+ "classifier": {"name": "Classifier", "role": "Request Classifier", "goal": "Classify requests", "instructions": "Respond with ONLY: technical, creative, or general"},
1060
+ "tech_agent": {"name": "TechExpert", "role": "Technical Expert", "goal": "Handle technical questions", "instructions": "Provide technical answers"}
1061
+ },
1062
+ "steps": [
1063
+ {"agent": "classifier", "action": "Classify: {{input}}"},
1064
+ {"name": "routing", "route": {"technical": ["tech_agent"], "default": ["tech_agent"]}}
1065
+ ]
1066
+ }
1067
+ """
1068
+ elif pattern == "parallel":
1069
+ base_prompt += """
1070
+ Include multiple agents that work in parallel, then an aggregator.
1071
+
1072
+ Example structure:
1073
+ {
1074
+ "name": "Parallel Workflow",
1075
+ "description": "Multiple agents work concurrently",
1076
+ "agents": {
1077
+ "researcher1": {"name": "Researcher1", "role": "Market Analyst", "goal": "Research market", "instructions": "Provide market insights"},
1078
+ "researcher2": {"name": "Researcher2", "role": "Competitor Analyst", "goal": "Research competitors", "instructions": "Provide competitor insights"},
1079
+ "aggregator": {"name": "Aggregator", "role": "Synthesizer", "goal": "Combine findings", "instructions": "Synthesize all research"}
1080
+ },
1081
+ "steps": [
1082
+ {"name": "parallel_research", "parallel": [
1083
+ {"agent": "researcher1", "action": "Research market for {{input}}"},
1084
+ {"agent": "researcher2", "action": "Research competitors for {{input}}"}
1085
+ ]},
1086
+ {"agent": "aggregator", "action": "Combine all findings"}
1087
+ ]
1088
+ }
1089
+ """
1090
+ elif pattern == "orchestrator-workers":
1091
+ base_prompt += """
1092
+ Create an orchestrator-workers workflow where a central orchestrator dynamically delegates tasks to specialized workers.
1093
+ The orchestrator analyzes the input, decides which workers are needed, and synthesizes results.
1094
+
1095
+ Example structure:
1096
+ {
1097
+ "name": "Orchestrator-Workers Workflow",
1098
+ "description": "Central orchestrator delegates to specialized workers",
1099
+ "agents": {
1100
+ "orchestrator": {"name": "Orchestrator", "role": "Task Coordinator", "goal": "Analyze tasks and delegate to appropriate workers", "instructions": "Break down the task, identify required specialists, and coordinate their work. Output a JSON with 'subtasks' array listing which workers to invoke."},
1101
+ "researcher": {"name": "Researcher", "role": "Research Specialist", "goal": "Gather information", "instructions": "Research and provide factual information"},
1102
+ "analyst": {"name": "Analyst", "role": "Data Analyst", "goal": "Analyze data and patterns", "instructions": "Analyze information and identify insights"},
1103
+ "writer": {"name": "Writer", "role": "Content Writer", "goal": "Create written content", "instructions": "Write clear, engaging content"},
1104
+ "synthesizer": {"name": "Synthesizer", "role": "Results Synthesizer", "goal": "Combine all worker outputs", "instructions": "Synthesize all worker outputs into a coherent final result"}
1105
+ },
1106
+ "steps": [
1107
+ {"agent": "orchestrator", "action": "Analyze task and determine required workers: {{input}}"},
1108
+ {"name": "worker_dispatch", "parallel": [
1109
+ {"agent": "researcher", "action": "Research: {{input}}"},
1110
+ {"agent": "analyst", "action": "Analyze: {{input}}"},
1111
+ {"agent": "writer", "action": "Draft content for: {{input}}"}
1112
+ ]},
1113
+ {"agent": "synthesizer", "action": "Combine all worker outputs into final result"}
1114
+ ]
1115
+ }
1116
+ """
1117
+ elif pattern == "evaluator-optimizer":
1118
+ base_prompt += """
1119
+ Create an evaluator-optimizer workflow where one agent generates content and another evaluates it in a loop.
1120
+ The generator improves based on evaluator feedback until quality criteria are met.
1121
+
1122
+ Example structure:
1123
+ {
1124
+ "name": "Evaluator-Optimizer Workflow",
1125
+ "description": "Iterative refinement through generation and evaluation",
1126
+ "agents": {
1127
+ "generator": {"name": "Generator", "role": "Content Generator", "goal": "Generate high-quality content", "instructions": "Create content based on the input. If feedback is provided, improve the content accordingly."},
1128
+ "evaluator": {"name": "Evaluator", "role": "Quality Evaluator", "goal": "Evaluate content quality", "instructions": "Evaluate the content on: clarity, accuracy, completeness, and relevance. Score 1-10 for each. If average score < 7, provide specific improvement feedback. If score >= 7, respond with 'APPROVED'."}
1129
+ },
1130
+ "steps": [
1131
+ {"agent": "generator", "action": "Generate initial content for: {{input}}"},
1132
+ {"name": "evaluation_loop", "loop": {
1133
+ "agent": "evaluator",
1134
+ "action": "Evaluate the generated content",
1135
+ "condition": "output does not contain 'APPROVED'",
1136
+ "max_iterations": 3,
1137
+ "feedback_to": "generator"
1138
+ }},
1139
+ {"agent": "generator", "action": "Finalize content based on all feedback"}
1140
+ ]
1141
+ }
1142
+ """
1143
+ else: # sequential
1144
+ base_prompt += """
1145
+ Create a sequential workflow where agents work one after another.
1146
+
1147
+ Example structure:
1148
+ {
1149
+ "name": "Sequential Workflow",
1150
+ "description": "Agents work in sequence",
1151
+ "agents": {
1152
+ "researcher": {"name": "Researcher", "role": "Research Analyst", "goal": "Research topics", "instructions": "Provide research findings"},
1153
+ "writer": {"name": "Writer", "role": "Content Writer", "goal": "Write content", "instructions": "Write clear content"}
1154
+ },
1155
+ "steps": [
1156
+ {"agent": "researcher", "action": "Research: {{input}}"},
1157
+ {"agent": "writer", "action": "Write based on: {{previous_output}}"}
1158
+ ]
1159
+ }
1160
+ """
1161
+
1162
+ base_prompt += f"\nGenerate a workflow for: {self.topic}"
1163
+ return base_prompt
1164
+
1165
+ def _save_workflow(self, data: Dict, pattern: str) -> str:
1166
+ """Save the workflow to a YAML file."""
1167
+ # Build the workflow YAML structure
1168
+ workflow_yaml = {
1169
+ 'name': data.get('name', 'Auto-Generated Workflow'),
1170
+ 'description': data.get('description', ''),
1171
+ 'framework': 'praisonai',
1172
+ 'workflow': {
1173
+ 'verbose': True,
1174
+ 'planning': False,
1175
+ 'reasoning': False
1176
+ },
1177
+ 'agents': {},
1178
+ 'steps': data.get('steps', [])
1179
+ }
1180
+
1181
+ # Convert agents
1182
+ for agent_id, agent_data in data.get('agents', {}).items():
1183
+ workflow_yaml['agents'][agent_id] = {
1184
+ 'name': agent_data.get('name', agent_id),
1185
+ 'role': agent_data.get('role', 'Assistant'),
1186
+ 'goal': agent_data.get('goal', ''),
1187
+ 'instructions': agent_data.get('instructions', '')
1188
+ }
1189
+ if agent_data.get('tools'):
1190
+ workflow_yaml['agents'][agent_id]['tools'] = agent_data['tools']
1191
+
1192
+ # Write to file
1193
+ full_path = os.path.abspath(self.workflow_file)
1194
+ with open(full_path, 'w') as f:
1195
+ yaml.dump(workflow_yaml, f, default_flow_style=False, sort_keys=False)
1196
+
1197
+ return full_path