ag2 0.9.1a1__py3-none-any.whl → 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (371) hide show
  1. {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/METADATA +272 -75
  2. ag2-0.9.2.dist-info/RECORD +406 -0
  3. {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/WHEEL +1 -2
  4. autogen/__init__.py +89 -0
  5. autogen/_website/__init__.py +3 -0
  6. autogen/_website/generate_api_references.py +427 -0
  7. autogen/_website/generate_mkdocs.py +1174 -0
  8. autogen/_website/notebook_processor.py +476 -0
  9. autogen/_website/process_notebooks.py +656 -0
  10. autogen/_website/utils.py +412 -0
  11. autogen/agentchat/__init__.py +44 -0
  12. autogen/agentchat/agent.py +182 -0
  13. autogen/agentchat/assistant_agent.py +85 -0
  14. autogen/agentchat/chat.py +309 -0
  15. autogen/agentchat/contrib/__init__.py +5 -0
  16. autogen/agentchat/contrib/agent_eval/README.md +7 -0
  17. autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
  18. autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
  19. autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
  20. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
  21. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
  22. autogen/agentchat/contrib/agent_eval/task.py +42 -0
  23. autogen/agentchat/contrib/agent_optimizer.py +429 -0
  24. autogen/agentchat/contrib/capabilities/__init__.py +5 -0
  25. autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
  26. autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
  27. autogen/agentchat/contrib/capabilities/teachability.py +393 -0
  28. autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
  29. autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
  30. autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
  31. autogen/agentchat/contrib/capabilities/transforms.py +566 -0
  32. autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
  33. autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
  34. autogen/agentchat/contrib/captainagent/__init__.py +9 -0
  35. autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
  36. autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
  37. autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
  38. autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
  39. autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
  40. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
  41. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
  42. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
  43. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
  44. autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
  45. autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
  46. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
  47. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
  48. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
  49. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
  50. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
  51. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
  52. autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
  53. autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
  54. autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
  55. autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
  56. autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
  57. autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
  58. autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
  59. autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
  60. autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
  61. autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
  62. autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
  63. autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
  64. autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
  65. autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
  66. autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
  67. autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
  68. autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
  69. autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
  70. autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
  71. autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
  72. autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
  73. autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
  74. autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
  75. autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
  76. autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
  77. autogen/agentchat/contrib/graph_rag/document.py +29 -0
  78. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
  79. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
  80. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
  81. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
  82. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
  83. autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
  84. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
  85. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
  86. autogen/agentchat/contrib/img_utils.py +397 -0
  87. autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
  88. autogen/agentchat/contrib/llava_agent.py +187 -0
  89. autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
  90. autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
  91. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
  92. autogen/agentchat/contrib/rag/__init__.py +10 -0
  93. autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
  94. autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
  95. autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
  96. autogen/agentchat/contrib/rag/query_engine.py +74 -0
  97. autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
  98. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
  99. autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
  100. autogen/agentchat/contrib/swarm_agent.py +1425 -0
  101. autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
  102. autogen/agentchat/contrib/vectordb/__init__.py +5 -0
  103. autogen/agentchat/contrib/vectordb/base.py +232 -0
  104. autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
  105. autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
  106. autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
  107. autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
  108. autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
  109. autogen/agentchat/contrib/vectordb/utils.py +126 -0
  110. autogen/agentchat/contrib/web_surfer.py +303 -0
  111. autogen/agentchat/conversable_agent.py +4023 -0
  112. autogen/agentchat/group/__init__.py +64 -0
  113. autogen/agentchat/group/available_condition.py +91 -0
  114. autogen/agentchat/group/context_condition.py +77 -0
  115. autogen/agentchat/group/context_expression.py +238 -0
  116. autogen/agentchat/group/context_str.py +41 -0
  117. autogen/agentchat/group/context_variables.py +192 -0
  118. autogen/agentchat/group/group_tool_executor.py +202 -0
  119. autogen/agentchat/group/group_utils.py +591 -0
  120. autogen/agentchat/group/handoffs.py +244 -0
  121. autogen/agentchat/group/llm_condition.py +93 -0
  122. autogen/agentchat/group/multi_agent_chat.py +237 -0
  123. autogen/agentchat/group/on_condition.py +58 -0
  124. autogen/agentchat/group/on_context_condition.py +54 -0
  125. autogen/agentchat/group/patterns/__init__.py +18 -0
  126. autogen/agentchat/group/patterns/auto.py +159 -0
  127. autogen/agentchat/group/patterns/manual.py +176 -0
  128. autogen/agentchat/group/patterns/pattern.py +288 -0
  129. autogen/agentchat/group/patterns/random.py +106 -0
  130. autogen/agentchat/group/patterns/round_robin.py +117 -0
  131. autogen/agentchat/group/reply_result.py +26 -0
  132. autogen/agentchat/group/speaker_selection_result.py +41 -0
  133. autogen/agentchat/group/targets/__init__.py +4 -0
  134. autogen/agentchat/group/targets/group_chat_target.py +132 -0
  135. autogen/agentchat/group/targets/group_manager_target.py +151 -0
  136. autogen/agentchat/group/targets/transition_target.py +413 -0
  137. autogen/agentchat/group/targets/transition_utils.py +6 -0
  138. autogen/agentchat/groupchat.py +1694 -0
  139. autogen/agentchat/realtime/__init__.py +3 -0
  140. autogen/agentchat/realtime/experimental/__init__.py +20 -0
  141. autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
  142. autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
  143. autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
  144. autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
  145. autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
  146. autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
  147. autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
  148. autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
  149. autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
  150. autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
  151. autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
  152. autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
  153. autogen/agentchat/realtime/experimental/function_observer.py +85 -0
  154. autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
  155. autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
  156. autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
  157. autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
  158. autogen/agentchat/realtime/experimental/websockets.py +21 -0
  159. autogen/agentchat/realtime_agent/__init__.py +21 -0
  160. autogen/agentchat/user_proxy_agent.py +111 -0
  161. autogen/agentchat/utils.py +206 -0
  162. autogen/agents/__init__.py +3 -0
  163. autogen/agents/contrib/__init__.py +10 -0
  164. autogen/agents/contrib/time/__init__.py +8 -0
  165. autogen/agents/contrib/time/time_reply_agent.py +73 -0
  166. autogen/agents/contrib/time/time_tool_agent.py +51 -0
  167. autogen/agents/experimental/__init__.py +27 -0
  168. autogen/agents/experimental/deep_research/__init__.py +7 -0
  169. autogen/agents/experimental/deep_research/deep_research.py +52 -0
  170. autogen/agents/experimental/discord/__init__.py +7 -0
  171. autogen/agents/experimental/discord/discord.py +66 -0
  172. autogen/agents/experimental/document_agent/__init__.py +19 -0
  173. autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
  174. autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
  175. autogen/agents/experimental/document_agent/document_agent.py +461 -0
  176. autogen/agents/experimental/document_agent/document_conditions.py +50 -0
  177. autogen/agents/experimental/document_agent/document_utils.py +380 -0
  178. autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
  179. autogen/agents/experimental/document_agent/parser_utils.py +130 -0
  180. autogen/agents/experimental/document_agent/url_utils.py +426 -0
  181. autogen/agents/experimental/reasoning/__init__.py +7 -0
  182. autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
  183. autogen/agents/experimental/slack/__init__.py +7 -0
  184. autogen/agents/experimental/slack/slack.py +73 -0
  185. autogen/agents/experimental/telegram/__init__.py +7 -0
  186. autogen/agents/experimental/telegram/telegram.py +77 -0
  187. autogen/agents/experimental/websurfer/__init__.py +7 -0
  188. autogen/agents/experimental/websurfer/websurfer.py +62 -0
  189. autogen/agents/experimental/wikipedia/__init__.py +7 -0
  190. autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
  191. autogen/browser_utils.py +309 -0
  192. autogen/cache/__init__.py +10 -0
  193. autogen/cache/abstract_cache_base.py +75 -0
  194. autogen/cache/cache.py +203 -0
  195. autogen/cache/cache_factory.py +88 -0
  196. autogen/cache/cosmos_db_cache.py +144 -0
  197. autogen/cache/disk_cache.py +102 -0
  198. autogen/cache/in_memory_cache.py +58 -0
  199. autogen/cache/redis_cache.py +123 -0
  200. autogen/code_utils.py +596 -0
  201. autogen/coding/__init__.py +22 -0
  202. autogen/coding/base.py +119 -0
  203. autogen/coding/docker_commandline_code_executor.py +268 -0
  204. autogen/coding/factory.py +47 -0
  205. autogen/coding/func_with_reqs.py +202 -0
  206. autogen/coding/jupyter/__init__.py +23 -0
  207. autogen/coding/jupyter/base.py +36 -0
  208. autogen/coding/jupyter/docker_jupyter_server.py +167 -0
  209. autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
  210. autogen/coding/jupyter/import_utils.py +82 -0
  211. autogen/coding/jupyter/jupyter_client.py +231 -0
  212. autogen/coding/jupyter/jupyter_code_executor.py +160 -0
  213. autogen/coding/jupyter/local_jupyter_server.py +172 -0
  214. autogen/coding/local_commandline_code_executor.py +405 -0
  215. autogen/coding/markdown_code_extractor.py +45 -0
  216. autogen/coding/utils.py +56 -0
  217. autogen/doc_utils.py +34 -0
  218. autogen/events/__init__.py +7 -0
  219. autogen/events/agent_events.py +1013 -0
  220. autogen/events/base_event.py +99 -0
  221. autogen/events/client_events.py +167 -0
  222. autogen/events/helpers.py +36 -0
  223. autogen/events/print_event.py +46 -0
  224. autogen/exception_utils.py +73 -0
  225. autogen/extensions/__init__.py +5 -0
  226. autogen/fast_depends/__init__.py +16 -0
  227. autogen/fast_depends/_compat.py +80 -0
  228. autogen/fast_depends/core/__init__.py +14 -0
  229. autogen/fast_depends/core/build.py +225 -0
  230. autogen/fast_depends/core/model.py +576 -0
  231. autogen/fast_depends/dependencies/__init__.py +15 -0
  232. autogen/fast_depends/dependencies/model.py +29 -0
  233. autogen/fast_depends/dependencies/provider.py +39 -0
  234. autogen/fast_depends/library/__init__.py +10 -0
  235. autogen/fast_depends/library/model.py +46 -0
  236. autogen/fast_depends/py.typed +6 -0
  237. autogen/fast_depends/schema.py +66 -0
  238. autogen/fast_depends/use.py +280 -0
  239. autogen/fast_depends/utils.py +187 -0
  240. autogen/formatting_utils.py +83 -0
  241. autogen/function_utils.py +13 -0
  242. autogen/graph_utils.py +178 -0
  243. autogen/import_utils.py +526 -0
  244. autogen/interop/__init__.py +22 -0
  245. autogen/interop/crewai/__init__.py +7 -0
  246. autogen/interop/crewai/crewai.py +88 -0
  247. autogen/interop/interoperability.py +71 -0
  248. autogen/interop/interoperable.py +46 -0
  249. autogen/interop/langchain/__init__.py +8 -0
  250. autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
  251. autogen/interop/langchain/langchain_tool.py +82 -0
  252. autogen/interop/litellm/__init__.py +7 -0
  253. autogen/interop/litellm/litellm_config_factory.py +179 -0
  254. autogen/interop/pydantic_ai/__init__.py +7 -0
  255. autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
  256. autogen/interop/registry.py +69 -0
  257. autogen/io/__init__.py +15 -0
  258. autogen/io/base.py +151 -0
  259. autogen/io/console.py +56 -0
  260. autogen/io/processors/__init__.py +12 -0
  261. autogen/io/processors/base.py +21 -0
  262. autogen/io/processors/console_event_processor.py +56 -0
  263. autogen/io/run_response.py +293 -0
  264. autogen/io/thread_io_stream.py +63 -0
  265. autogen/io/websockets.py +213 -0
  266. autogen/json_utils.py +43 -0
  267. autogen/llm_config.py +382 -0
  268. autogen/logger/__init__.py +11 -0
  269. autogen/logger/base_logger.py +128 -0
  270. autogen/logger/file_logger.py +261 -0
  271. autogen/logger/logger_factory.py +42 -0
  272. autogen/logger/logger_utils.py +57 -0
  273. autogen/logger/sqlite_logger.py +523 -0
  274. autogen/math_utils.py +339 -0
  275. autogen/mcp/__init__.py +7 -0
  276. autogen/mcp/__main__.py +78 -0
  277. autogen/mcp/mcp_client.py +208 -0
  278. autogen/mcp/mcp_proxy/__init__.py +19 -0
  279. autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +63 -0
  280. autogen/mcp/mcp_proxy/mcp_proxy.py +581 -0
  281. autogen/mcp/mcp_proxy/operation_grouping.py +158 -0
  282. autogen/mcp/mcp_proxy/operation_renaming.py +114 -0
  283. autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
  284. autogen/mcp/mcp_proxy/security.py +400 -0
  285. autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
  286. autogen/messages/__init__.py +7 -0
  287. autogen/messages/agent_messages.py +948 -0
  288. autogen/messages/base_message.py +107 -0
  289. autogen/messages/client_messages.py +171 -0
  290. autogen/messages/print_message.py +49 -0
  291. autogen/oai/__init__.py +53 -0
  292. autogen/oai/anthropic.py +714 -0
  293. autogen/oai/bedrock.py +628 -0
  294. autogen/oai/cerebras.py +299 -0
  295. autogen/oai/client.py +1444 -0
  296. autogen/oai/client_utils.py +169 -0
  297. autogen/oai/cohere.py +479 -0
  298. autogen/oai/gemini.py +998 -0
  299. autogen/oai/gemini_types.py +155 -0
  300. autogen/oai/groq.py +305 -0
  301. autogen/oai/mistral.py +303 -0
  302. autogen/oai/oai_models/__init__.py +11 -0
  303. autogen/oai/oai_models/_models.py +16 -0
  304. autogen/oai/oai_models/chat_completion.py +87 -0
  305. autogen/oai/oai_models/chat_completion_audio.py +32 -0
  306. autogen/oai/oai_models/chat_completion_message.py +86 -0
  307. autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
  308. autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
  309. autogen/oai/oai_models/completion_usage.py +60 -0
  310. autogen/oai/ollama.py +643 -0
  311. autogen/oai/openai_utils.py +881 -0
  312. autogen/oai/together.py +370 -0
  313. autogen/retrieve_utils.py +491 -0
  314. autogen/runtime_logging.py +160 -0
  315. autogen/token_count_utils.py +267 -0
  316. autogen/tools/__init__.py +20 -0
  317. autogen/tools/contrib/__init__.py +9 -0
  318. autogen/tools/contrib/time/__init__.py +7 -0
  319. autogen/tools/contrib/time/time.py +41 -0
  320. autogen/tools/dependency_injection.py +254 -0
  321. autogen/tools/experimental/__init__.py +48 -0
  322. autogen/tools/experimental/browser_use/__init__.py +7 -0
  323. autogen/tools/experimental/browser_use/browser_use.py +161 -0
  324. autogen/tools/experimental/crawl4ai/__init__.py +7 -0
  325. autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
  326. autogen/tools/experimental/deep_research/__init__.py +7 -0
  327. autogen/tools/experimental/deep_research/deep_research.py +328 -0
  328. autogen/tools/experimental/duckduckgo/__init__.py +7 -0
  329. autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
  330. autogen/tools/experimental/google/__init__.py +14 -0
  331. autogen/tools/experimental/google/authentication/__init__.py +11 -0
  332. autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
  333. autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
  334. autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
  335. autogen/tools/experimental/google/drive/__init__.py +9 -0
  336. autogen/tools/experimental/google/drive/drive_functions.py +124 -0
  337. autogen/tools/experimental/google/drive/toolkit.py +88 -0
  338. autogen/tools/experimental/google/model.py +17 -0
  339. autogen/tools/experimental/google/toolkit_protocol.py +19 -0
  340. autogen/tools/experimental/google_search/__init__.py +8 -0
  341. autogen/tools/experimental/google_search/google_search.py +93 -0
  342. autogen/tools/experimental/google_search/youtube_search.py +181 -0
  343. autogen/tools/experimental/messageplatform/__init__.py +17 -0
  344. autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
  345. autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
  346. autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
  347. autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
  348. autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
  349. autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
  350. autogen/tools/experimental/perplexity/__init__.py +7 -0
  351. autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
  352. autogen/tools/experimental/reliable/__init__.py +10 -0
  353. autogen/tools/experimental/reliable/reliable.py +1316 -0
  354. autogen/tools/experimental/tavily/__init__.py +7 -0
  355. autogen/tools/experimental/tavily/tavily_search.py +183 -0
  356. autogen/tools/experimental/web_search_preview/__init__.py +7 -0
  357. autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
  358. autogen/tools/experimental/wikipedia/__init__.py +7 -0
  359. autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
  360. autogen/tools/function_utils.py +411 -0
  361. autogen/tools/tool.py +187 -0
  362. autogen/tools/toolkit.py +86 -0
  363. autogen/types.py +29 -0
  364. autogen/version.py +7 -0
  365. templates/client_template/main.jinja2 +69 -0
  366. templates/config_template/config.jinja2 +7 -0
  367. templates/main.jinja2 +61 -0
  368. ag2-0.9.1a1.dist-info/RECORD +0 -6
  369. ag2-0.9.1a1.dist-info/top_level.txt +0 -1
  370. {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/LICENSE +0 -0
  371. {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/NOTICE.md +0 -0
@@ -0,0 +1,790 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ #
5
+ # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
+ # SPDX-License-Identifier: MIT
7
+ import hashlib
8
+ import importlib
9
+ import json
10
+ import logging
11
+ import re
12
+ import subprocess as sp
13
+ import time
14
+ from typing import Any, Optional, Union
15
+
16
+ from termcolor import colored
17
+
18
+ from .... import AssistantAgent, ConversableAgent, OpenAIWrapper, UserProxyAgent
19
+ from ....code_utils import CODE_BLOCK_PATTERN
20
+ from ....doc_utils import export_module
21
+ from ....llm_config import LLMConfig
22
+
23
+ __all__ = ["AgentBuilder"]
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ def _config_check(config: dict):
29
+ # check config loading
30
+ assert config.get("coding") is not None, 'Missing "coding" in your config.'
31
+ assert config.get("default_llm_config") is not None, 'Missing "default_llm_config" in your config.'
32
+ assert config.get("code_execution_config") is not None, 'Missing "code_execution_config" in your config.'
33
+
34
+ for agent_config in config["agent_configs"]:
35
+ assert agent_config.get("name", None) is not None, 'Missing agent "name" in your agent_configs.'
36
+ assert agent_config.get("system_message", None) is not None, (
37
+ 'Missing agent "system_message" in your agent_configs.'
38
+ )
39
+ assert agent_config.get("description", None) is not None, 'Missing agent "description" in your agent_configs.'
40
+
41
+
42
+ def _retrieve_json(text):
43
+ match = re.findall(CODE_BLOCK_PATTERN, text, flags=re.DOTALL)
44
+ if not match:
45
+ return text
46
+ code_blocks = []
47
+ for _, code in match:
48
+ code_blocks.append(code)
49
+ return code_blocks[0]
50
+
51
+
52
+ @export_module("autogen.agentchat.contrib.captainagent")
53
+ class AgentBuilder:
54
+ """AgentBuilder can help user build an automatic task solving process powered by multi-agent system.
55
+ Specifically, our building pipeline includes initialize and build.
56
+ """
57
+
58
+ online_server_name = "online"
59
+
60
+ DEFAULT_PROXY_AUTO_REPLY = 'There is no code from the last 1 message for me to execute. Group chat manager should let other participants to continue the conversation. If the group chat manager want to end the conversation, you should let other participant reply me only with "TERMINATE"'
61
+
62
+ GROUP_CHAT_DESCRIPTION = """ # Group chat instruction
63
+ You are now working in a group chat with different expert and a group chat manager.
64
+ You should refer to the previous message from other participant members or yourself, follow their topic and reply to them.
65
+
66
+ **Your role is**: {name}
67
+ Group chat members: {members}{user_proxy_desc}
68
+
69
+ When the task is complete and the result has been carefully verified, after obtaining agreement from the other members, you can end the conversation by replying only with "TERMINATE".
70
+
71
+ # Your profile
72
+ {sys_msg}
73
+ """
74
+
75
+ DEFAULT_DESCRIPTION = """## Your role
76
+ [Complete this part with expert's name and skill description]
77
+
78
+ ## Task and skill instructions
79
+ - [Complete this part with task description]
80
+ - [Complete this part with skill description]
81
+ - [(Optional) Complete this part with other information]
82
+ """
83
+
84
+ CODING_AND_TASK_SKILL_INSTRUCTION = """## Useful instructions for task-solving
85
+ - Solve the task step by step if you need to.
86
+ - When you find an answer, verify the answer carefully. Include verifiable evidence with possible test case in your response if possible.
87
+ - All your reply should be based on the provided facts.
88
+
89
+ ## How to verify?
90
+ **You have to keep believing that everyone else's answers are wrong until they provide clear enough evidence.**
91
+ - Verifying with step-by-step backward reasoning.
92
+ - Write test cases according to the general task.
93
+
94
+ ## How to use code?
95
+ - Suggest python code (in a python coding block) or shell script (in a sh coding block) for the Computer_terminal to execute.
96
+ - If missing python packages, you can install the package by suggesting a `pip install` code in the ```sh ... ``` block.
97
+ - When using code, you must indicate the script type in the coding block.
98
+ - Do not the coding block which requires users to modify.
99
+ - Do not suggest a coding block if it's not intended to be executed by the Computer_terminal.
100
+ - The Computer_terminal cannot modify your code.
101
+ - **Use 'print' function for the output when relevant**.
102
+ - Check the execution result returned by the Computer_terminal.
103
+ - Do not ask Computer_terminal to copy and paste the result.
104
+ - If the result indicates there is an error, fix the error and output the code again. """
105
+
106
+ CODING_PROMPT = """Does the following task need programming (i.e., access external API or tool by coding) to solve,
107
+ or coding may help the following task become easier?
108
+
109
+ TASK: {task}
110
+
111
+ Answer only YES or NO.
112
+ """
113
+
114
+ AGENT_NAME_PROMPT = """# Your task
115
+ Suggest no more than {max_agents} experts with their name according to the following user requirement.
116
+
117
+ ## User requirement
118
+ {task}
119
+
120
+ # Task requirement
121
+ - Expert's name should follow the format: [skill]_Expert.
122
+ - Only reply the names of the experts, separated by ",".
123
+ - If coding skills are required, they should be limited to Python and Shell.
124
+ For example: Python_Expert, Math_Expert, ... """
125
+
126
+ AGENT_SYS_MSG_PROMPT = """# Your goal
127
+ - According to the task and expert name, write a high-quality description for the expert by filling the given template.
128
+ - Ensure that your description are clear and unambiguous, and include all necessary information.
129
+
130
+ # Task
131
+ {task}
132
+
133
+ # Expert name
134
+ {position}
135
+
136
+ # Template
137
+ {default_sys_msg}
138
+ """
139
+
140
+ AGENT_DESCRIPTION_PROMPT = """# Your goal
141
+ Summarize the following expert's description in a sentence.
142
+
143
+ # Expert name
144
+ {position}
145
+
146
+ # Expert's description
147
+ {sys_msg}
148
+ """
149
+
150
+ AGENT_SEARCHING_PROMPT = """# Your goal
151
+ Considering the following task, what experts should be involved to the task?
152
+
153
+ # TASK
154
+ {task}
155
+
156
+ # EXPERT LIST
157
+ {agent_list}
158
+
159
+ # Requirement
160
+ - You should consider if the experts' name and profile match the task.
161
+ - Considering the effort, you should select less then {max_agents} experts; less is better.
162
+ - Separate expert names by commas and use "_" instead of space. For example, Product_manager,Programmer
163
+ - Only return the list of expert names.
164
+ """
165
+
166
+ AGENT_SELECTION_PROMPT = """# Your goal
167
+ Match roles in the role set to each expert in expert set.
168
+
169
+ # Skill set
170
+ {skills}
171
+
172
+ # Expert pool (formatting with name: description)
173
+ {expert_pool}
174
+
175
+ # Answer format
176
+ ```json
177
+ {{
178
+ "skill_1 description": "expert_name: expert_description", // if there exists an expert that suitable for skill_1
179
+ "skill_2 description": "None", // if there is no experts that suitable for skill_2
180
+ ...
181
+ }}
182
+ ```
183
+ """
184
+
185
+ def __init__(
186
+ self,
187
+ config_file_or_env: Optional[str] = "OAI_CONFIG_LIST",
188
+ config_file_location: Optional[str] = "",
189
+ llm_config: Optional[Union[LLMConfig, dict[str, Any]]] = None,
190
+ builder_model: Optional[Union[str, list]] = [],
191
+ agent_model: Optional[Union[str, list]] = [],
192
+ builder_model_tags: Optional[list] = [],
193
+ agent_model_tags: Optional[list] = [],
194
+ max_agents: Optional[int] = 5,
195
+ ):
196
+ """(These APIs are experimental and may change in the future.)
197
+
198
+ Args:
199
+ config_file_or_env (Optional[str], optional): Path to the config file or name of the environment
200
+ variable containing the OpenAI API configurations. Defaults to "OAI_CONFIG_LIST".
201
+ config_file_location (Optional[str], optional): Location of the config file if not in the
202
+ current directory. Defaults to "".
203
+ llm_config (Optional[Union[LLMConfig, dict[str, Any]]], optional): Specific configs for LLM
204
+ builder_model (Optional[Union[str, list]], optional): Model identifier(s) to use as the
205
+ builder/manager model that coordinates agent creation. Can be a string or list of strings.
206
+ Filters the config list to match these models. Defaults to [].
207
+ agent_model (Optional[Union[str, list]], optional): Model identifier(s) to use for the
208
+ generated participant agents. Can be a string or list of strings. Defaults to [].
209
+ builder_model_tags (Optional[list], optional): Tags to filter which models from the config
210
+ can be used as builder models. Defaults to [].
211
+ agent_model_tags (Optional[list], optional): Tags to filter which models from the config
212
+ can be used as agent models. Defaults to [].
213
+ max_agents (Optional[int], optional): Maximum number of agents to create for each task.
214
+ Defaults to 5.
215
+ """
216
+ builder_model = builder_model if isinstance(builder_model, list) else [builder_model]
217
+ builder_filter_dict = {}
218
+ if len(builder_model) != 0:
219
+ builder_filter_dict.update({"model": builder_model})
220
+ if len(builder_model_tags) != 0:
221
+ builder_filter_dict.update({"tags": builder_model_tags})
222
+
223
+ llm_config = (
224
+ LLMConfig.from_json(env=config_file_or_env, file_location=config_file_location).where(**builder_filter_dict)
225
+ if llm_config is None
226
+ else llm_config
227
+ )
228
+ builder_config_list = llm_config.config_list
229
+
230
+ if len(builder_config_list) == 0:
231
+ raise RuntimeError(
232
+ f"Fail to initialize build manager: {builder_model}{builder_model_tags} does not exist in {config_file_or_env}. "
233
+ f'If you want to change this model, please specify the "builder_model" in the constructor.'
234
+ )
235
+ self.builder_model = OpenAIWrapper(config_list=builder_config_list)
236
+
237
+ self.agent_model = agent_model if isinstance(agent_model, list) else [agent_model]
238
+ self.agent_model_tags = agent_model_tags
239
+ self.config_file_or_env = config_file_or_env
240
+ self.config_file_location = config_file_location
241
+ self.llm_config = llm_config
242
+
243
+ self.building_task: str = None
244
+ self.agent_configs: list[dict[str, Any]] = []
245
+ self.open_ports: list[str] = []
246
+ self.agent_procs: dict[str, tuple[sp.Popen, str]] = {}
247
+ self.agent_procs_assign: dict[str, tuple[ConversableAgent, str]] = {}
248
+ self.cached_configs: dict = {}
249
+
250
+ self.max_agents = max_agents
251
+
252
+ def set_builder_model(self, model: str):
253
+ self.builder_model = model
254
+
255
+ def set_agent_model(self, model: str):
256
+ self.agent_model = model
257
+
258
+ def _create_agent(
259
+ self,
260
+ agent_config: dict[str, Any],
261
+ member_name: list[str],
262
+ llm_config: Union[LLMConfig, dict[str, Any]],
263
+ use_oai_assistant: Optional[bool] = False,
264
+ ) -> AssistantAgent:
265
+ """Create a group chat participant agent.
266
+
267
+ If the agent rely on an open-source model, this function will automatically set up an endpoint for that agent.
268
+ The API address of that endpoint will be "localhost:{free port}".
269
+
270
+ Args:
271
+ agent_config: agent's config. It should include the following information:
272
+ 1. model_name: backbone model of an agent, e.g., gpt-4-1106-preview, meta/Llama-2-70b-chat
273
+ 2. agent_name: use to identify an agent in the group chat.
274
+ 3. system_message: including persona, task solving instruction, etc.
275
+ 4. description: brief description of an agent that help group chat manager to pick the speaker.
276
+ member_name: a list of agent names in the group chat.
277
+ llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...).
278
+ use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
279
+
280
+ Returns:
281
+ agent: a set-up agent.
282
+ """
283
+ model_name_or_hf_repo = agent_config.get("model", [])
284
+ model_name_or_hf_repo = (
285
+ model_name_or_hf_repo if isinstance(model_name_or_hf_repo, list) else [model_name_or_hf_repo]
286
+ )
287
+ model_tags = agent_config.get("tags", [])
288
+ agent_name = agent_config["name"]
289
+ system_message = agent_config["system_message"]
290
+ description = agent_config["description"]
291
+
292
+ # Path to the customize **ConversableAgent** class.
293
+ agent_path = agent_config.get("agent_path")
294
+ filter_dict = {}
295
+ if len(model_name_or_hf_repo) > 0:
296
+ filter_dict.update({"model": model_name_or_hf_repo})
297
+ if len(model_tags) > 0:
298
+ filter_dict.update({"tags": model_tags})
299
+ config_list = (
300
+ LLMConfig.from_json(env=self.config_file_or_env, file_location=self.config_file_location)
301
+ .where(**filter_dict)
302
+ .config_list
303
+ if self.llm_config is None
304
+ else self.llm_config.config_list
305
+ )
306
+ if len(config_list) == 0:
307
+ raise RuntimeError(
308
+ f"Fail to initialize agent {agent_name}: {model_name_or_hf_repo}{model_tags} does not exist in {self.config_file_or_env}.\n"
309
+ f'If you would like to change this model, please specify the "agent_model" in the constructor.\n'
310
+ f"If you load configs from json, make sure the model in agent_configs is in the {self.config_file_or_env}."
311
+ )
312
+ server_id = self.online_server_name
313
+ current_config = llm_config.copy()
314
+ current_config.update({"config_list": config_list})
315
+ if use_oai_assistant:
316
+ from ..gpt_assistant_agent import GPTAssistantAgent
317
+
318
+ agent = GPTAssistantAgent(
319
+ name=agent_name,
320
+ llm_config={**current_config, "assistant_id": None},
321
+ instructions=system_message,
322
+ overwrite_instructions=False,
323
+ )
324
+ else:
325
+ user_proxy_desc = ""
326
+ if self.cached_configs["coding"] is True:
327
+ user_proxy_desc = (
328
+ "\nThe group also include a Computer_terminal to help you run the python and shell code."
329
+ )
330
+
331
+ model_class = AssistantAgent
332
+ if agent_path:
333
+ module_path, model_class_name = agent_path.replace("/", ".").rsplit(".", 1)
334
+ module = importlib.import_module(module_path)
335
+ model_class = getattr(module, model_class_name)
336
+ if not issubclass(model_class, ConversableAgent):
337
+ logger.error(f"{model_class} is not a ConversableAgent. Use AssistantAgent as default")
338
+ model_class = AssistantAgent
339
+
340
+ additional_config = {
341
+ k: v
342
+ for k, v in agent_config.items()
343
+ if k not in ["model", "name", "system_message", "description", "agent_path", "tags"]
344
+ }
345
+ agent = model_class(
346
+ name=agent_name, llm_config=current_config.copy(), description=description, **additional_config
347
+ )
348
+ if system_message == "":
349
+ system_message = agent.system_message
350
+ else:
351
+ system_message = f"{system_message}\n\n{self.CODING_AND_TASK_SKILL_INSTRUCTION}"
352
+
353
+ enhanced_sys_msg = self.GROUP_CHAT_DESCRIPTION.format(
354
+ name=agent_name, members=member_name, user_proxy_desc=user_proxy_desc, sys_msg=system_message
355
+ )
356
+ agent.update_system_message(enhanced_sys_msg)
357
+ self.agent_procs_assign[agent_name] = (agent, server_id)
358
+ return agent
359
+
360
+ def clear_agent(self, agent_name: str, recycle_endpoint: Optional[bool] = True):
361
+ """Clear a specific agent by name.
362
+
363
+ Args:
364
+ agent_name: the name of agent.
365
+ recycle_endpoint: trigger for recycle the endpoint server. If true, the endpoint will be recycled
366
+ when there is no agent depending on.
367
+ """
368
+ _, server_id = self.agent_procs_assign[agent_name]
369
+ del self.agent_procs_assign[agent_name]
370
+ if recycle_endpoint:
371
+ if server_id == self.online_server_name:
372
+ return
373
+ else:
374
+ for _, iter_sid in self.agent_procs_assign.values():
375
+ if server_id == iter_sid:
376
+ return
377
+ self.agent_procs[server_id][0].terminate()
378
+ self.open_ports.append(server_id.split("_")[-1])
379
+ print(colored(f"Agent {agent_name} has been cleared.", "yellow"), flush=True)
380
+
381
+ def clear_all_agents(self, recycle_endpoint: Optional[bool] = True):
382
+ """Clear all cached agents."""
383
+ for agent_name in [agent_name for agent_name in self.agent_procs_assign]:
384
+ self.clear_agent(agent_name, recycle_endpoint)
385
+ print(colored("All agents have been cleared.", "yellow"), flush=True)
386
+
387
+ def build(
388
+ self,
389
+ building_task: str,
390
+ default_llm_config: Union[LLMConfig, dict[str, Any]],
391
+ coding: Optional[bool] = None,
392
+ code_execution_config: Optional[dict[str, Any]] = None,
393
+ use_oai_assistant: Optional[bool] = False,
394
+ user_proxy: Optional[ConversableAgent] = None,
395
+ max_agents: Optional[int] = None,
396
+ **kwargs: Any,
397
+ ) -> tuple[list[ConversableAgent], dict[str, Any]]:
398
+ """Auto build agents based on the building task.
399
+
400
+ Args:
401
+ building_task: instruction that helps build manager (gpt-4) to decide what agent should be built.
402
+ default_llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...).
403
+ coding: use to identify if the user proxy (a code interpreter) should be added.
404
+ code_execution_config: specific configs for user proxy (e.g., last_n_messages, work_dir, ...).
405
+ use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
406
+ user_proxy: user proxy's class that can be used to replace the default user proxy.
407
+ max_agents (Optional[int], default=None): Maximum number of agents to create for the task. If None, uses the value from self.max_agents.
408
+ **kwargs (Any): Additional arguments to pass to _build_agents.
409
+ - agent_configs: Optional list of predefined agent configurations to use.
410
+
411
+ Returns:
412
+ agent_list: a list of agents.
413
+ cached_configs: cached configs.
414
+ """
415
+ if code_execution_config is None:
416
+ code_execution_config = {
417
+ "last_n_messages": 1,
418
+ "work_dir": "groupchat",
419
+ "use_docker": False,
420
+ "timeout": 10,
421
+ }
422
+
423
+ if max_agents is None:
424
+ max_agents = self.max_agents
425
+
426
+ agent_configs = kwargs.get("agent_configs", [])
427
+ self.building_task = building_task
428
+
429
+ print(colored("==> Generating agents...", "green"), flush=True)
430
+ resp_agent_name = (
431
+ self.builder_model.create(
432
+ messages=[
433
+ {
434
+ "role": "user",
435
+ "content": self.AGENT_NAME_PROMPT.format(task=building_task, max_agents=max_agents),
436
+ }
437
+ ]
438
+ )
439
+ .choices[0]
440
+ .message.content
441
+ )
442
+ agent_name_list = [agent_name.strip().replace(" ", "_") for agent_name in resp_agent_name.split(",")]
443
+ print(f"{agent_name_list} are generated.", flush=True)
444
+
445
+ print(colored("==> Generating system message...", "green"), flush=True)
446
+ agent_sys_msg_list = []
447
+ for name in agent_name_list:
448
+ print(f"Preparing system message for {name}", flush=True)
449
+ resp_agent_sys_msg = (
450
+ self.builder_model.create(
451
+ messages=[
452
+ {
453
+ "role": "user",
454
+ "content": self.AGENT_SYS_MSG_PROMPT.format(
455
+ task=building_task,
456
+ position=name,
457
+ default_sys_msg=self.DEFAULT_DESCRIPTION,
458
+ ),
459
+ }
460
+ ]
461
+ )
462
+ .choices[0]
463
+ .message.content
464
+ )
465
+ agent_sys_msg_list.append(resp_agent_sys_msg)
466
+
467
+ print(colored("==> Generating description...", "green"), flush=True)
468
+ agent_description_list = []
469
+ for name, sys_msg in list(zip(agent_name_list, agent_sys_msg_list)):
470
+ print(f"Preparing description for {name}", flush=True)
471
+ resp_agent_description = (
472
+ self.builder_model.create(
473
+ messages=[
474
+ {
475
+ "role": "user",
476
+ "content": self.AGENT_DESCRIPTION_PROMPT.format(position=name, sys_msg=sys_msg),
477
+ }
478
+ ]
479
+ )
480
+ .choices[0]
481
+ .message.content
482
+ )
483
+ agent_description_list.append(resp_agent_description)
484
+
485
+ for name, sys_msg, description in list(zip(agent_name_list, agent_sys_msg_list, agent_description_list)):
486
+ agent_configs.append({
487
+ "name": name,
488
+ "model": self.agent_model,
489
+ "tags": self.agent_model_tags,
490
+ "system_message": sys_msg,
491
+ "description": description,
492
+ })
493
+
494
+ if coding is None:
495
+ resp = (
496
+ self.builder_model.create(
497
+ messages=[{"role": "user", "content": self.CODING_PROMPT.format(task=building_task)}]
498
+ )
499
+ .choices[0]
500
+ .message.content
501
+ )
502
+ coding = resp == "YES"
503
+
504
+ self.cached_configs.update({
505
+ "building_task": building_task,
506
+ "agent_configs": agent_configs,
507
+ "coding": coding,
508
+ "default_llm_config": default_llm_config,
509
+ "code_execution_config": code_execution_config,
510
+ })
511
+ _config_check(self.cached_configs)
512
+ return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)
513
+
514
+ def build_from_library(
515
+ self,
516
+ building_task: str,
517
+ library_path_or_json: str,
518
+ default_llm_config: Union[LLMConfig, dict[str, Any]],
519
+ top_k: int = 3,
520
+ coding: Optional[bool] = None,
521
+ code_execution_config: Optional[dict[str, Any]] = None,
522
+ use_oai_assistant: Optional[bool] = False,
523
+ embedding_model: Optional[str] = "all-mpnet-base-v2",
524
+ user_proxy: Optional[ConversableAgent] = None,
525
+ **kwargs: Any,
526
+ ) -> tuple[list[ConversableAgent], dict[str, Any]]:
527
+ """Build agents from a library.
528
+ The library is a list of agent configs, which contains the name and system_message for each agent.
529
+ We use a build manager to decide what agent in that library should be involved to the task.
530
+
531
+ Args:
532
+ building_task: instruction that helps build manager (gpt-4) to decide what agent should be built.
533
+ library_path_or_json: path or JSON string config of agent library.
534
+ default_llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...).
535
+ top_k: number of results to return.
536
+ coding: use to identify if the user proxy (a code interpreter) should be added.
537
+ code_execution_config: specific configs for user proxy (e.g., last_n_messages, work_dir, ...).
538
+ use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
539
+ embedding_model: a Sentence-Transformers model use for embedding similarity to select agents from library.
540
+ As reference, chromadb use "all-mpnet-base-v2" as default.
541
+ user_proxy: user proxy's class that can be used to replace the default user proxy.
542
+ **kwargs: Additional arguments to pass to _build_agents.
543
+
544
+ Returns:
545
+ agent_list: a list of agents.
546
+ cached_configs: cached configs.
547
+ """
548
+ import chromadb
549
+ from chromadb.utils import embedding_functions
550
+
551
+ if code_execution_config is None:
552
+ code_execution_config = {
553
+ "last_n_messages": 1,
554
+ "work_dir": "groupchat",
555
+ "use_docker": False,
556
+ "timeout": 120,
557
+ }
558
+
559
+ try:
560
+ agent_library = json.loads(library_path_or_json)
561
+ except json.decoder.JSONDecodeError:
562
+ with open(library_path_or_json) as f:
563
+ agent_library = json.load(f)
564
+ except Exception as e:
565
+ raise e
566
+
567
+ print(colored("==> Looking for suitable agents in the library...", "green"), flush=True)
568
+ skills = building_task.replace(":", " ").split("\n")
569
+ # skills = [line.split("-", 1)[1].strip() if line.startswith("-") else line for line in lines]
570
+ if len(skills) == 0:
571
+ skills = [building_task]
572
+
573
+ chroma_client = chromadb.Client()
574
+ collection = chroma_client.create_collection(
575
+ name="agent_list",
576
+ embedding_function=embedding_functions.SentenceTransformerEmbeddingFunction(model_name=embedding_model),
577
+ )
578
+ collection.add(
579
+ documents=[agent["description"] for agent in agent_library],
580
+ metadatas=[{"source": "agent_profile"} for _ in range(len(agent_library))],
581
+ ids=[f"agent_{i}" for i in range(len(agent_library))],
582
+ )
583
+ agent_desc_list = set()
584
+ for skill in skills:
585
+ recall = set(collection.query(query_texts=[skill], n_results=top_k)["documents"][0])
586
+ agent_desc_list = agent_desc_list.union(recall)
587
+
588
+ agent_config_list = []
589
+ for description in list(agent_desc_list):
590
+ for agent in agent_library:
591
+ if agent["description"] == description:
592
+ agent_config_list.append(agent.copy())
593
+ break
594
+ chroma_client.delete_collection(collection.name)
595
+
596
+ # double recall from the searching result
597
+ expert_pool = [f"{agent['name']}: {agent['description']}" for agent in agent_config_list]
598
+ while True:
599
+ skill_agent_pair_json = (
600
+ self.builder_model.create(
601
+ messages=[
602
+ {
603
+ "role": "user",
604
+ "content": self.AGENT_SELECTION_PROMPT.format(
605
+ skills=building_task, expert_pool=expert_pool, max_agents=self.max_agents
606
+ ),
607
+ }
608
+ ]
609
+ )
610
+ .choices[0]
611
+ .message.content
612
+ )
613
+ try:
614
+ skill_agent_pair_json = _retrieve_json(skill_agent_pair_json)
615
+ skill_agent_pair = json.loads(skill_agent_pair_json)
616
+ break
617
+ except Exception as e:
618
+ print(e, flush=True)
619
+ time.sleep(5)
620
+ continue
621
+
622
+ recalled_agent_config_list = []
623
+ recalled_name_desc = []
624
+ for skill, agent_profile in skill_agent_pair.items():
625
+ # If no suitable agent, generate an agent
626
+ if agent_profile == "None":
627
+ _, agent_config_temp = self.build(
628
+ building_task=skill,
629
+ default_llm_config=default_llm_config.copy(),
630
+ coding=False,
631
+ use_oai_assistant=use_oai_assistant,
632
+ max_agents=1,
633
+ )
634
+ self.clear_agent(agent_config_temp["agent_configs"][0]["name"])
635
+ recalled_agent_config_list.append(agent_config_temp["agent_configs"][0])
636
+ else:
637
+ if agent_profile in recalled_name_desc:
638
+ # prevent identical agents
639
+ continue
640
+ recalled_name_desc.append(agent_profile)
641
+ name = agent_profile.split(":")[0].strip()
642
+ desc = agent_profile.split(":")[1].strip()
643
+ for agent in agent_config_list:
644
+ if name == agent["name"] and desc == agent["description"]:
645
+ recalled_agent_config_list.append(agent.copy())
646
+
647
+ print(f"{[agent['name'] for agent in recalled_agent_config_list]} are selected.", flush=True)
648
+
649
+ if coding is None:
650
+ resp = (
651
+ self.builder_model.create(
652
+ messages=[{"role": "user", "content": self.CODING_PROMPT.format(task=building_task)}]
653
+ )
654
+ .choices[0]
655
+ .message.content
656
+ )
657
+ coding = resp == "YES"
658
+
659
+ self.cached_configs.update({
660
+ "building_task": building_task,
661
+ "agent_configs": recalled_agent_config_list,
662
+ "coding": coding,
663
+ "default_llm_config": default_llm_config,
664
+ "code_execution_config": code_execution_config,
665
+ })
666
+ _config_check(self.cached_configs)
667
+
668
+ return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)
669
+
670
+ def _build_agents(
671
+ self, use_oai_assistant: Optional[bool] = False, user_proxy: Optional[ConversableAgent] = None, **kwargs
672
+ ) -> tuple[list[ConversableAgent], dict[str, Any]]:
673
+ """Build agents with generated configs.
674
+
675
+ Args:
676
+ use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
677
+ user_proxy: user proxy's class that can be used to replace the default user proxy.
678
+ **kwargs: Additional keyword arguments.
679
+
680
+ Returns:
681
+ agent_list: a list of agents.
682
+ cached_configs: cached configs.
683
+ """
684
+ agent_configs = self.cached_configs["agent_configs"]
685
+ default_llm_config = self.cached_configs["default_llm_config"]
686
+ coding = self.cached_configs["coding"]
687
+ code_execution_config = self.cached_configs["code_execution_config"]
688
+
689
+ print(colored("==> Creating agents...", "green"), flush=True)
690
+ for config in agent_configs:
691
+ print(f"Creating agent {config['name']}...", flush=True)
692
+ self._create_agent(
693
+ agent_config=config.copy(),
694
+ member_name=[agent["name"] for agent in agent_configs],
695
+ llm_config=default_llm_config,
696
+ use_oai_assistant=use_oai_assistant,
697
+ )
698
+ agent_list = [agent_config[0] for agent_config in self.agent_procs_assign.values()]
699
+
700
+ if coding is True:
701
+ print("Adding user console proxy...", flush=True)
702
+ if user_proxy is None:
703
+ user_proxy = UserProxyAgent(
704
+ name="Computer_terminal",
705
+ is_termination_msg=lambda x: x == "TERMINATE" or x == "TERMINATE.",
706
+ code_execution_config=code_execution_config,
707
+ human_input_mode="NEVER",
708
+ default_auto_reply=self.DEFAULT_PROXY_AUTO_REPLY,
709
+ )
710
+ agent_list = agent_list + [user_proxy]
711
+
712
+ return agent_list, self.cached_configs.copy()
713
+
714
+ def save(self, filepath: Optional[str] = None) -> str:
715
+ """Save building configs. If the filepath is not specific, this function will create a filename by encrypt the
716
+ building_task string by md5 with "save_config_" prefix, and save config to the local path.
717
+
718
+ Args:
719
+ filepath: save path.
720
+
721
+ Return:
722
+ filepath: path save.
723
+ """
724
+ if filepath is None:
725
+ filepath = f"./save_config_{hashlib.md5(self.building_task.encode('utf-8')).hexdigest()}.json"
726
+ with open(filepath, "w") as save_file:
727
+ json.dump(self.cached_configs, save_file, indent=4)
728
+ print(colored(f"Building config saved to {filepath}", "green"), flush=True)
729
+
730
+ return filepath
731
+
732
+ def load(
733
+ self,
734
+ filepath: Optional[str] = None,
735
+ config_json: Optional[str] = None,
736
+ use_oai_assistant: Optional[bool] = False,
737
+ **kwargs: Any,
738
+ ) -> tuple[list[ConversableAgent], dict[str, Any]]:
739
+ """Load building configs and call the build function to complete building without calling online LLMs' api.
740
+
741
+ Args:
742
+ filepath: filepath or JSON string for the save config.
743
+ config_json: JSON string for the save config.
744
+ use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
745
+ **kwargs (Any): Additional arguments to pass to _build_agents:
746
+ - code_execution_config (Optional[dict[str, Any]]): If provided, overrides the
747
+ code execution configuration from the loaded config.
748
+
749
+ Returns:
750
+ agent_list: a list of agents.
751
+ cached_configs: cached configs.
752
+ """
753
+ # load json string.
754
+ if config_json is not None:
755
+ print(colored("Loading config from JSON...", "green"), flush=True)
756
+ cached_configs = json.loads(config_json)
757
+
758
+ # load from path.
759
+ if filepath is not None:
760
+ print(colored(f"Loading config from {filepath}", "green"), flush=True)
761
+ with open(filepath) as f:
762
+ cached_configs = json.load(f)
763
+
764
+ _config_check(cached_configs)
765
+
766
+ agent_configs = cached_configs["agent_configs"]
767
+ default_llm_config = cached_configs["default_llm_config"]
768
+ coding = cached_configs["coding"]
769
+
770
+ if kwargs.get("code_execution_config") is not None:
771
+ # for test
772
+ self.cached_configs.update({
773
+ "building_task": cached_configs["building_task"],
774
+ "agent_configs": agent_configs,
775
+ "coding": coding,
776
+ "default_llm_config": default_llm_config,
777
+ "code_execution_config": kwargs["code_execution_config"],
778
+ })
779
+ del kwargs["code_execution_config"]
780
+ return self._build_agents(use_oai_assistant, **kwargs)
781
+ else:
782
+ code_execution_config = cached_configs["code_execution_config"]
783
+ self.cached_configs.update({
784
+ "building_task": cached_configs["building_task"],
785
+ "agent_configs": agent_configs,
786
+ "coding": coding,
787
+ "default_llm_config": default_llm_config,
788
+ "code_execution_config": code_execution_config,
789
+ })
790
+ return self._build_agents(use_oai_assistant, **kwargs)