ag2 0.9.1a1__py3-none-any.whl → 0.9.1.post0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (357) hide show
  1. {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info}/METADATA +264 -73
  2. ag2-0.9.1.post0.dist-info/RECORD +392 -0
  3. {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info}/WHEEL +1 -2
  4. autogen/__init__.py +89 -0
  5. autogen/_website/__init__.py +3 -0
  6. autogen/_website/generate_api_references.py +427 -0
  7. autogen/_website/generate_mkdocs.py +1174 -0
  8. autogen/_website/notebook_processor.py +476 -0
  9. autogen/_website/process_notebooks.py +656 -0
  10. autogen/_website/utils.py +412 -0
  11. autogen/agentchat/__init__.py +44 -0
  12. autogen/agentchat/agent.py +182 -0
  13. autogen/agentchat/assistant_agent.py +85 -0
  14. autogen/agentchat/chat.py +309 -0
  15. autogen/agentchat/contrib/__init__.py +5 -0
  16. autogen/agentchat/contrib/agent_eval/README.md +7 -0
  17. autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
  18. autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
  19. autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
  20. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
  21. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
  22. autogen/agentchat/contrib/agent_eval/task.py +42 -0
  23. autogen/agentchat/contrib/agent_optimizer.py +429 -0
  24. autogen/agentchat/contrib/capabilities/__init__.py +5 -0
  25. autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
  26. autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
  27. autogen/agentchat/contrib/capabilities/teachability.py +393 -0
  28. autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
  29. autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
  30. autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
  31. autogen/agentchat/contrib/capabilities/transforms.py +566 -0
  32. autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
  33. autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
  34. autogen/agentchat/contrib/captainagent/__init__.py +9 -0
  35. autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
  36. autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
  37. autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
  38. autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
  39. autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
  40. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
  41. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
  42. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
  43. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
  44. autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
  45. autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
  46. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
  47. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
  48. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
  49. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
  50. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
  51. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
  52. autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
  53. autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
  54. autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
  55. autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
  56. autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
  57. autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
  58. autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
  59. autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
  60. autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
  61. autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
  62. autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
  63. autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
  64. autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
  65. autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
  66. autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
  67. autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
  68. autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
  69. autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
  70. autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
  71. autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
  72. autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
  73. autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
  74. autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
  75. autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
  76. autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
  77. autogen/agentchat/contrib/graph_rag/document.py +29 -0
  78. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
  79. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
  80. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
  81. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
  82. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
  83. autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
  84. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
  85. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
  86. autogen/agentchat/contrib/img_utils.py +397 -0
  87. autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
  88. autogen/agentchat/contrib/llava_agent.py +187 -0
  89. autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
  90. autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
  91. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
  92. autogen/agentchat/contrib/rag/__init__.py +10 -0
  93. autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
  94. autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
  95. autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
  96. autogen/agentchat/contrib/rag/query_engine.py +74 -0
  97. autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
  98. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
  99. autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
  100. autogen/agentchat/contrib/swarm_agent.py +1425 -0
  101. autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
  102. autogen/agentchat/contrib/vectordb/__init__.py +5 -0
  103. autogen/agentchat/contrib/vectordb/base.py +232 -0
  104. autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
  105. autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
  106. autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
  107. autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
  108. autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
  109. autogen/agentchat/contrib/vectordb/utils.py +126 -0
  110. autogen/agentchat/contrib/web_surfer.py +303 -0
  111. autogen/agentchat/conversable_agent.py +4020 -0
  112. autogen/agentchat/group/__init__.py +64 -0
  113. autogen/agentchat/group/available_condition.py +91 -0
  114. autogen/agentchat/group/context_condition.py +77 -0
  115. autogen/agentchat/group/context_expression.py +238 -0
  116. autogen/agentchat/group/context_str.py +41 -0
  117. autogen/agentchat/group/context_variables.py +192 -0
  118. autogen/agentchat/group/group_tool_executor.py +202 -0
  119. autogen/agentchat/group/group_utils.py +591 -0
  120. autogen/agentchat/group/handoffs.py +244 -0
  121. autogen/agentchat/group/llm_condition.py +93 -0
  122. autogen/agentchat/group/multi_agent_chat.py +237 -0
  123. autogen/agentchat/group/on_condition.py +58 -0
  124. autogen/agentchat/group/on_context_condition.py +54 -0
  125. autogen/agentchat/group/patterns/__init__.py +18 -0
  126. autogen/agentchat/group/patterns/auto.py +159 -0
  127. autogen/agentchat/group/patterns/manual.py +176 -0
  128. autogen/agentchat/group/patterns/pattern.py +288 -0
  129. autogen/agentchat/group/patterns/random.py +106 -0
  130. autogen/agentchat/group/patterns/round_robin.py +117 -0
  131. autogen/agentchat/group/reply_result.py +26 -0
  132. autogen/agentchat/group/speaker_selection_result.py +41 -0
  133. autogen/agentchat/group/targets/__init__.py +4 -0
  134. autogen/agentchat/group/targets/group_chat_target.py +132 -0
  135. autogen/agentchat/group/targets/group_manager_target.py +151 -0
  136. autogen/agentchat/group/targets/transition_target.py +413 -0
  137. autogen/agentchat/group/targets/transition_utils.py +6 -0
  138. autogen/agentchat/groupchat.py +1694 -0
  139. autogen/agentchat/realtime/__init__.py +3 -0
  140. autogen/agentchat/realtime/experimental/__init__.py +20 -0
  141. autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
  142. autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
  143. autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
  144. autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
  145. autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
  146. autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
  147. autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
  148. autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
  149. autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
  150. autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
  151. autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
  152. autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
  153. autogen/agentchat/realtime/experimental/function_observer.py +85 -0
  154. autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
  155. autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
  156. autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
  157. autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
  158. autogen/agentchat/realtime/experimental/websockets.py +21 -0
  159. autogen/agentchat/realtime_agent/__init__.py +21 -0
  160. autogen/agentchat/user_proxy_agent.py +111 -0
  161. autogen/agentchat/utils.py +206 -0
  162. autogen/agents/__init__.py +3 -0
  163. autogen/agents/contrib/__init__.py +10 -0
  164. autogen/agents/contrib/time/__init__.py +8 -0
  165. autogen/agents/contrib/time/time_reply_agent.py +73 -0
  166. autogen/agents/contrib/time/time_tool_agent.py +51 -0
  167. autogen/agents/experimental/__init__.py +27 -0
  168. autogen/agents/experimental/deep_research/__init__.py +7 -0
  169. autogen/agents/experimental/deep_research/deep_research.py +52 -0
  170. autogen/agents/experimental/discord/__init__.py +7 -0
  171. autogen/agents/experimental/discord/discord.py +66 -0
  172. autogen/agents/experimental/document_agent/__init__.py +19 -0
  173. autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
  174. autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
  175. autogen/agents/experimental/document_agent/document_agent.py +461 -0
  176. autogen/agents/experimental/document_agent/document_conditions.py +50 -0
  177. autogen/agents/experimental/document_agent/document_utils.py +380 -0
  178. autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
  179. autogen/agents/experimental/document_agent/parser_utils.py +130 -0
  180. autogen/agents/experimental/document_agent/url_utils.py +426 -0
  181. autogen/agents/experimental/reasoning/__init__.py +7 -0
  182. autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
  183. autogen/agents/experimental/slack/__init__.py +7 -0
  184. autogen/agents/experimental/slack/slack.py +73 -0
  185. autogen/agents/experimental/telegram/__init__.py +7 -0
  186. autogen/agents/experimental/telegram/telegram.py +77 -0
  187. autogen/agents/experimental/websurfer/__init__.py +7 -0
  188. autogen/agents/experimental/websurfer/websurfer.py +62 -0
  189. autogen/agents/experimental/wikipedia/__init__.py +7 -0
  190. autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
  191. autogen/browser_utils.py +309 -0
  192. autogen/cache/__init__.py +10 -0
  193. autogen/cache/abstract_cache_base.py +75 -0
  194. autogen/cache/cache.py +203 -0
  195. autogen/cache/cache_factory.py +88 -0
  196. autogen/cache/cosmos_db_cache.py +144 -0
  197. autogen/cache/disk_cache.py +102 -0
  198. autogen/cache/in_memory_cache.py +58 -0
  199. autogen/cache/redis_cache.py +123 -0
  200. autogen/code_utils.py +596 -0
  201. autogen/coding/__init__.py +22 -0
  202. autogen/coding/base.py +119 -0
  203. autogen/coding/docker_commandline_code_executor.py +268 -0
  204. autogen/coding/factory.py +47 -0
  205. autogen/coding/func_with_reqs.py +202 -0
  206. autogen/coding/jupyter/__init__.py +23 -0
  207. autogen/coding/jupyter/base.py +36 -0
  208. autogen/coding/jupyter/docker_jupyter_server.py +167 -0
  209. autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
  210. autogen/coding/jupyter/import_utils.py +82 -0
  211. autogen/coding/jupyter/jupyter_client.py +231 -0
  212. autogen/coding/jupyter/jupyter_code_executor.py +160 -0
  213. autogen/coding/jupyter/local_jupyter_server.py +172 -0
  214. autogen/coding/local_commandline_code_executor.py +405 -0
  215. autogen/coding/markdown_code_extractor.py +45 -0
  216. autogen/coding/utils.py +56 -0
  217. autogen/doc_utils.py +34 -0
  218. autogen/events/__init__.py +7 -0
  219. autogen/events/agent_events.py +1010 -0
  220. autogen/events/base_event.py +99 -0
  221. autogen/events/client_events.py +167 -0
  222. autogen/events/helpers.py +36 -0
  223. autogen/events/print_event.py +46 -0
  224. autogen/exception_utils.py +73 -0
  225. autogen/extensions/__init__.py +5 -0
  226. autogen/fast_depends/__init__.py +16 -0
  227. autogen/fast_depends/_compat.py +80 -0
  228. autogen/fast_depends/core/__init__.py +14 -0
  229. autogen/fast_depends/core/build.py +225 -0
  230. autogen/fast_depends/core/model.py +576 -0
  231. autogen/fast_depends/dependencies/__init__.py +15 -0
  232. autogen/fast_depends/dependencies/model.py +29 -0
  233. autogen/fast_depends/dependencies/provider.py +39 -0
  234. autogen/fast_depends/library/__init__.py +10 -0
  235. autogen/fast_depends/library/model.py +46 -0
  236. autogen/fast_depends/py.typed +6 -0
  237. autogen/fast_depends/schema.py +66 -0
  238. autogen/fast_depends/use.py +280 -0
  239. autogen/fast_depends/utils.py +187 -0
  240. autogen/formatting_utils.py +83 -0
  241. autogen/function_utils.py +13 -0
  242. autogen/graph_utils.py +178 -0
  243. autogen/import_utils.py +526 -0
  244. autogen/interop/__init__.py +22 -0
  245. autogen/interop/crewai/__init__.py +7 -0
  246. autogen/interop/crewai/crewai.py +88 -0
  247. autogen/interop/interoperability.py +71 -0
  248. autogen/interop/interoperable.py +46 -0
  249. autogen/interop/langchain/__init__.py +8 -0
  250. autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
  251. autogen/interop/langchain/langchain_tool.py +82 -0
  252. autogen/interop/litellm/__init__.py +7 -0
  253. autogen/interop/litellm/litellm_config_factory.py +113 -0
  254. autogen/interop/pydantic_ai/__init__.py +7 -0
  255. autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
  256. autogen/interop/registry.py +69 -0
  257. autogen/io/__init__.py +15 -0
  258. autogen/io/base.py +151 -0
  259. autogen/io/console.py +56 -0
  260. autogen/io/processors/__init__.py +12 -0
  261. autogen/io/processors/base.py +21 -0
  262. autogen/io/processors/console_event_processor.py +56 -0
  263. autogen/io/run_response.py +293 -0
  264. autogen/io/thread_io_stream.py +63 -0
  265. autogen/io/websockets.py +213 -0
  266. autogen/json_utils.py +43 -0
  267. autogen/llm_config.py +379 -0
  268. autogen/logger/__init__.py +11 -0
  269. autogen/logger/base_logger.py +128 -0
  270. autogen/logger/file_logger.py +261 -0
  271. autogen/logger/logger_factory.py +42 -0
  272. autogen/logger/logger_utils.py +57 -0
  273. autogen/logger/sqlite_logger.py +523 -0
  274. autogen/math_utils.py +339 -0
  275. autogen/mcp/__init__.py +7 -0
  276. autogen/mcp/mcp_client.py +208 -0
  277. autogen/messages/__init__.py +7 -0
  278. autogen/messages/agent_messages.py +948 -0
  279. autogen/messages/base_message.py +107 -0
  280. autogen/messages/client_messages.py +171 -0
  281. autogen/messages/print_message.py +49 -0
  282. autogen/oai/__init__.py +53 -0
  283. autogen/oai/anthropic.py +714 -0
  284. autogen/oai/bedrock.py +628 -0
  285. autogen/oai/cerebras.py +299 -0
  286. autogen/oai/client.py +1435 -0
  287. autogen/oai/client_utils.py +169 -0
  288. autogen/oai/cohere.py +479 -0
  289. autogen/oai/gemini.py +990 -0
  290. autogen/oai/gemini_types.py +129 -0
  291. autogen/oai/groq.py +305 -0
  292. autogen/oai/mistral.py +303 -0
  293. autogen/oai/oai_models/__init__.py +11 -0
  294. autogen/oai/oai_models/_models.py +16 -0
  295. autogen/oai/oai_models/chat_completion.py +87 -0
  296. autogen/oai/oai_models/chat_completion_audio.py +32 -0
  297. autogen/oai/oai_models/chat_completion_message.py +86 -0
  298. autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
  299. autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
  300. autogen/oai/oai_models/completion_usage.py +60 -0
  301. autogen/oai/ollama.py +643 -0
  302. autogen/oai/openai_utils.py +881 -0
  303. autogen/oai/together.py +370 -0
  304. autogen/retrieve_utils.py +491 -0
  305. autogen/runtime_logging.py +160 -0
  306. autogen/token_count_utils.py +267 -0
  307. autogen/tools/__init__.py +20 -0
  308. autogen/tools/contrib/__init__.py +9 -0
  309. autogen/tools/contrib/time/__init__.py +7 -0
  310. autogen/tools/contrib/time/time.py +41 -0
  311. autogen/tools/dependency_injection.py +254 -0
  312. autogen/tools/experimental/__init__.py +43 -0
  313. autogen/tools/experimental/browser_use/__init__.py +7 -0
  314. autogen/tools/experimental/browser_use/browser_use.py +161 -0
  315. autogen/tools/experimental/crawl4ai/__init__.py +7 -0
  316. autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
  317. autogen/tools/experimental/deep_research/__init__.py +7 -0
  318. autogen/tools/experimental/deep_research/deep_research.py +328 -0
  319. autogen/tools/experimental/duckduckgo/__init__.py +7 -0
  320. autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
  321. autogen/tools/experimental/google/__init__.py +14 -0
  322. autogen/tools/experimental/google/authentication/__init__.py +11 -0
  323. autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
  324. autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
  325. autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
  326. autogen/tools/experimental/google/drive/__init__.py +9 -0
  327. autogen/tools/experimental/google/drive/drive_functions.py +124 -0
  328. autogen/tools/experimental/google/drive/toolkit.py +88 -0
  329. autogen/tools/experimental/google/model.py +17 -0
  330. autogen/tools/experimental/google/toolkit_protocol.py +19 -0
  331. autogen/tools/experimental/google_search/__init__.py +8 -0
  332. autogen/tools/experimental/google_search/google_search.py +93 -0
  333. autogen/tools/experimental/google_search/youtube_search.py +181 -0
  334. autogen/tools/experimental/messageplatform/__init__.py +17 -0
  335. autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
  336. autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
  337. autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
  338. autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
  339. autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
  340. autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
  341. autogen/tools/experimental/perplexity/__init__.py +7 -0
  342. autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
  343. autogen/tools/experimental/tavily/__init__.py +7 -0
  344. autogen/tools/experimental/tavily/tavily_search.py +183 -0
  345. autogen/tools/experimental/web_search_preview/__init__.py +7 -0
  346. autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
  347. autogen/tools/experimental/wikipedia/__init__.py +7 -0
  348. autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
  349. autogen/tools/function_utils.py +411 -0
  350. autogen/tools/tool.py +187 -0
  351. autogen/tools/toolkit.py +86 -0
  352. autogen/types.py +29 -0
  353. autogen/version.py +7 -0
  354. ag2-0.9.1a1.dist-info/RECORD +0 -6
  355. ag2-0.9.1a1.dist-info/top_level.txt +0 -1
  356. {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/LICENSE +0 -0
  357. {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/NOTICE.md +0 -0
@@ -0,0 +1,161 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import Annotated, Any, Optional, Union
6
+
7
+ from pydantic import BaseModel, field_validator
8
+
9
+ from ....doc_utils import export_module
10
+ from ....import_utils import optional_import_block, require_optional_import
11
+ from ....llm_config import LLMConfig
12
+ from ... import Depends, Tool
13
+ from ...dependency_injection import on
14
+
15
+ with optional_import_block():
16
+ from browser_use import Agent, Controller
17
+ from browser_use.browser.browser import Browser, BrowserConfig
18
+
19
+ from ....interop.langchain.langchain_chat_model_factory import LangChainChatModelFactory
20
+
21
+
22
+ __all__ = ["BrowserUseResult", "BrowserUseTool", "ExtractedContent"]
23
+
24
+
25
+ @export_module("autogen.tools.experimental.browser_use")
26
+ class ExtractedContent(BaseModel):
27
+ """Extracted content from the browser.
28
+
29
+ Attributes:
30
+ content: The extracted content.
31
+ url: The URL of the extracted content
32
+ """
33
+
34
+ content: str
35
+ url: Optional[str]
36
+
37
+ @field_validator("url")
38
+ @classmethod
39
+ def check_url(cls, v: str) -> Optional[str]:
40
+ """Check if the URL is about:blank and return None if it is.
41
+
42
+ Args:
43
+ v: The URL to check.
44
+ """
45
+ if v == "about:blank":
46
+ return None
47
+ return v
48
+
49
+
50
+ @export_module("autogen.tools.experimental.browser_use")
51
+ class BrowserUseResult(BaseModel):
52
+ """The result of using the browser to perform a task.
53
+
54
+ Attributes:
55
+ extracted_content: List of extracted content.
56
+ final_result: The final result.
57
+ """
58
+
59
+ extracted_content: list[ExtractedContent]
60
+ final_result: Optional[str]
61
+
62
+
63
+ @require_optional_import(
64
+ [
65
+ "langchain_anthropic",
66
+ "langchain_google_genai",
67
+ "langchain_ollama",
68
+ "langchain_openai",
69
+ "langchain_core",
70
+ "browser_use",
71
+ ],
72
+ "browser-use",
73
+ )
74
+ @export_module("autogen.tools.experimental")
75
+ class BrowserUseTool(Tool):
76
+ """BrowserUseTool is a tool that uses the browser to perform a task."""
77
+
78
+ def __init__( # type: ignore[no-any-unimported]
79
+ self,
80
+ *,
81
+ llm_config: Union[LLMConfig, dict[str, Any]],
82
+ browser: Optional["Browser"] = None,
83
+ agent_kwargs: Optional[dict[str, Any]] = None,
84
+ browser_config: Optional[dict[str, Any]] = None,
85
+ ):
86
+ """Use the browser to perform a task.
87
+
88
+ Args:
89
+ llm_config: The LLM configuration.
90
+ browser: The browser to use. If defined, browser_config must be None
91
+ agent_kwargs: Additional keyword arguments to pass to the Agent
92
+ browser_config: The browser configuration to use. If defined, browser must be None
93
+ """
94
+ if agent_kwargs is None:
95
+ agent_kwargs = {}
96
+
97
+ if browser_config is None:
98
+ browser_config = {}
99
+
100
+ if browser is not None and browser_config:
101
+ raise ValueError(
102
+ f"Cannot provide both browser and additional keyword parameters: {browser=}, {browser_config=}"
103
+ )
104
+
105
+ async def browser_use( # type: ignore[no-any-unimported]
106
+ task: Annotated[str, "The task to perform."],
107
+ llm_config: Annotated[Union[LLMConfig, dict[str, Any]], Depends(on(llm_config))],
108
+ browser: Annotated[Optional[Browser], Depends(on(browser))],
109
+ agent_kwargs: Annotated[dict[str, Any], Depends(on(agent_kwargs))],
110
+ browser_config: Annotated[dict[str, Any], Depends(on(browser_config))],
111
+ ) -> BrowserUseResult:
112
+ agent_kwargs = agent_kwargs.copy()
113
+ browser_config = browser_config.copy()
114
+ if browser is None:
115
+ # set default value for headless
116
+ headless = browser_config.pop("headless", True)
117
+
118
+ browser_config = BrowserConfig(headless=headless, **browser_config)
119
+ browser = Browser(config=browser_config)
120
+
121
+ # set default value for generate_gif
122
+ if "generate_gif" not in agent_kwargs:
123
+ agent_kwargs["generate_gif"] = False
124
+
125
+ llm = LangChainChatModelFactory.create_base_chat_model(llm_config)
126
+
127
+ max_steps = agent_kwargs.pop("max_steps", 100)
128
+
129
+ agent = Agent(
130
+ task=task,
131
+ llm=llm,
132
+ browser=browser,
133
+ controller=BrowserUseTool._get_controller(llm_config),
134
+ **agent_kwargs,
135
+ )
136
+
137
+ result = await agent.run(max_steps=max_steps)
138
+
139
+ extracted_content = [
140
+ ExtractedContent(content=content, url=url)
141
+ for content, url in zip(result.extracted_content(), result.urls())
142
+ ]
143
+ return BrowserUseResult(
144
+ extracted_content=extracted_content,
145
+ final_result=result.final_result(),
146
+ )
147
+
148
+ super().__init__(
149
+ name="browser_use",
150
+ description="Use the browser to perform a task.",
151
+ func_or_tool=browser_use,
152
+ )
153
+
154
+ @staticmethod
155
+ def _get_controller(llm_config: Union[LLMConfig, dict[str, Any]]) -> Any:
156
+ response_format = (
157
+ llm_config["config_list"][0].get("response_format", None)
158
+ if "config_list" in llm_config
159
+ else llm_config.get("response_format")
160
+ )
161
+ return Controller(output_model=response_format)
@@ -0,0 +1,7 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from .crawl4ai import Crawl4AITool
6
+
7
+ __all__ = ["Crawl4AITool"]
@@ -0,0 +1,153 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import Annotated, Any, Optional, Union
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from ....doc_utils import export_module
10
+ from ....import_utils import optional_import_block, require_optional_import
11
+ from ....interop import LiteLLmConfigFactory
12
+ from ....llm_config import LLMConfig
13
+ from ... import Tool
14
+ from ...dependency_injection import Depends, on
15
+
16
+ with optional_import_block():
17
+ from crawl4ai import AsyncWebCrawler, BrowserConfig, CacheMode, CrawlerRunConfig
18
+ from crawl4ai.extraction_strategy import LLMExtractionStrategy
19
+
20
+ __all__ = ["Crawl4AITool"]
21
+
22
+
23
+ @require_optional_import(["crawl4ai"], "crawl4ai")
24
+ @export_module("autogen.tools.experimental")
25
+ class Crawl4AITool(Tool):
26
+ """
27
+ Crawl a website and extract information using the crawl4ai library.
28
+ """
29
+
30
+ def __init__(
31
+ self,
32
+ llm_config: Optional[Union[LLMConfig, dict[str, Any]]] = None,
33
+ extraction_model: Optional[type[BaseModel]] = None,
34
+ llm_strategy_kwargs: Optional[dict[str, Any]] = None,
35
+ ) -> None:
36
+ """
37
+ Initialize the Crawl4AITool.
38
+
39
+ Args:
40
+ llm_config: The config dictionary for the LLM model. If None, the tool will run without LLM.
41
+ extraction_model: The Pydantic model to use for extraction. If None, the tool will use the default schema.
42
+ llm_strategy_kwargs: The keyword arguments to pass to the LLM extraction strategy.
43
+ """
44
+ Crawl4AITool._validate_llm_strategy_kwargs(llm_strategy_kwargs, llm_config_provided=(llm_config is not None))
45
+
46
+ async def crawl4ai_helper( # type: ignore[no-any-unimported]
47
+ url: str,
48
+ browser_cfg: Optional["BrowserConfig"] = None,
49
+ crawl_config: Optional["CrawlerRunConfig"] = None,
50
+ ) -> Any:
51
+ async with AsyncWebCrawler(config=browser_cfg) as crawler:
52
+ result = await crawler.arun(
53
+ url=url,
54
+ config=crawl_config,
55
+ )
56
+
57
+ if crawl_config is None:
58
+ response = result.markdown
59
+ else:
60
+ response = result.extracted_content if result.success else result.error_message
61
+
62
+ return response
63
+
64
+ async def crawl4ai_without_llm(
65
+ url: Annotated[str, "The url to crawl and extract information from."],
66
+ ) -> Any:
67
+ return await crawl4ai_helper(url=url)
68
+
69
+ async def crawl4ai_with_llm(
70
+ url: Annotated[str, "The url to crawl and extract information from."],
71
+ instruction: Annotated[str, "The instruction to provide on how and what to extract."],
72
+ llm_config: Annotated[Any, Depends(on(llm_config))],
73
+ llm_strategy_kwargs: Annotated[Optional[dict[str, Any]], Depends(on(llm_strategy_kwargs))],
74
+ extraction_model: Annotated[Optional[type[BaseModel]], Depends(on(extraction_model))],
75
+ ) -> Any:
76
+ browser_cfg = BrowserConfig(headless=True)
77
+ crawl_config = Crawl4AITool._get_crawl_config(
78
+ llm_config=llm_config,
79
+ instruction=instruction,
80
+ extraction_model=extraction_model,
81
+ llm_strategy_kwargs=llm_strategy_kwargs,
82
+ )
83
+
84
+ return await crawl4ai_helper(url=url, browser_cfg=browser_cfg, crawl_config=crawl_config)
85
+
86
+ super().__init__(
87
+ name="crawl4ai",
88
+ description="Crawl a website and extract information.",
89
+ func_or_tool=crawl4ai_without_llm if llm_config is None else crawl4ai_with_llm,
90
+ )
91
+
92
+ @staticmethod
93
+ def _validate_llm_strategy_kwargs(llm_strategy_kwargs: Optional[dict[str, Any]], llm_config_provided: bool) -> None:
94
+ if not llm_strategy_kwargs:
95
+ return
96
+
97
+ if not llm_config_provided:
98
+ raise ValueError("llm_strategy_kwargs can only be provided if llm_config is also provided.")
99
+
100
+ check_parameters_error_msg = "".join(
101
+ f"'{key}' should not be provided in llm_strategy_kwargs. It is automatically set based on llm_config.\n"
102
+ for key in ["provider", "api_token"]
103
+ if key in llm_strategy_kwargs
104
+ )
105
+
106
+ check_parameters_error_msg += "".join(
107
+ "'schema' should not be provided in llm_strategy_kwargs. It is automatically set based on extraction_model type.\n"
108
+ if "schema" in llm_strategy_kwargs
109
+ else ""
110
+ )
111
+
112
+ check_parameters_error_msg += "".join(
113
+ "'instruction' should not be provided in llm_strategy_kwargs. It is provided at the time of calling the tool.\n"
114
+ if "instruction" in llm_strategy_kwargs
115
+ else ""
116
+ )
117
+
118
+ if check_parameters_error_msg:
119
+ raise ValueError(check_parameters_error_msg)
120
+
121
+ @staticmethod
122
+ def _get_crawl_config( # type: ignore[no-any-unimported]
123
+ llm_config: Union[LLMConfig, dict[str, Any]],
124
+ instruction: str,
125
+ llm_strategy_kwargs: Optional[dict[str, Any]] = None,
126
+ extraction_model: Optional[type[BaseModel]] = None,
127
+ ) -> "CrawlerRunConfig":
128
+ lite_llm_config = LiteLLmConfigFactory.create_lite_llm_config(llm_config)
129
+
130
+ if llm_strategy_kwargs is None:
131
+ llm_strategy_kwargs = {}
132
+
133
+ schema = (
134
+ extraction_model.model_json_schema()
135
+ if (extraction_model and issubclass(extraction_model, BaseModel))
136
+ else None
137
+ )
138
+
139
+ extraction_type = llm_strategy_kwargs.pop("extraction_type", "schema" if schema else "block")
140
+
141
+ # 1. Define the LLM extraction strategy
142
+ llm_strategy = LLMExtractionStrategy(
143
+ **lite_llm_config,
144
+ schema=schema,
145
+ extraction_type=extraction_type,
146
+ instruction=instruction,
147
+ **llm_strategy_kwargs,
148
+ )
149
+
150
+ # 2. Build the crawler config
151
+ crawl_config = CrawlerRunConfig(extraction_strategy=llm_strategy, cache_mode=CacheMode.BYPASS)
152
+
153
+ return crawl_config
@@ -0,0 +1,7 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from .deep_research import DeepResearchTool
6
+
7
+ __all__ = ["DeepResearchTool"]
@@ -0,0 +1,328 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import copy
6
+ from typing import Annotated, Any, Callable, Union
7
+
8
+ from pydantic import BaseModel, Field
9
+
10
+ from ....agentchat import ConversableAgent
11
+ from ....doc_utils import export_module
12
+ from ....llm_config import LLMConfig
13
+ from ... import Depends, Tool
14
+ from ...dependency_injection import on
15
+
16
+ __all__ = ["DeepResearchTool"]
17
+
18
+
19
+ class Subquestion(BaseModel):
20
+ question: Annotated[str, Field(description="The original question.")]
21
+
22
+ def format(self) -> str:
23
+ return f"Question: {self.question}\n"
24
+
25
+
26
+ class SubquestionAnswer(Subquestion):
27
+ answer: Annotated[str, Field(description="The answer to the question.")]
28
+
29
+ def format(self) -> str:
30
+ return f"Question: {self.question}\n{self.answer}\n"
31
+
32
+
33
+ class Task(BaseModel):
34
+ question: Annotated[str, Field(description="The original question.")]
35
+ subquestions: Annotated[list[Subquestion], Field(description="The subquestions that need to be answered.")]
36
+
37
+ def format(self) -> str:
38
+ return f"Task: {self.question}\n\n" + "\n".join(
39
+ "Subquestion " + str(i + 1) + ":\n" + subquestion.format()
40
+ for i, subquestion in enumerate(self.subquestions)
41
+ )
42
+
43
+
44
+ class CompletedTask(BaseModel):
45
+ question: Annotated[str, Field(description="The original question.")]
46
+ subquestions: Annotated[list[SubquestionAnswer], Field(description="The subquestions and their answers")]
47
+
48
+ def format(self) -> str:
49
+ return f"Task: {self.question}\n\n" + "\n".join(
50
+ "Subquestion " + str(i + 1) + ":\n" + subquestion.format()
51
+ for i, subquestion in enumerate(self.subquestions)
52
+ )
53
+
54
+
55
+ class InformationCrumb(BaseModel):
56
+ source_url: str
57
+ source_title: str
58
+ source_summary: str
59
+ relevant_info: str
60
+
61
+
62
+ class GatheredInformation(BaseModel):
63
+ information: list[InformationCrumb]
64
+
65
+ def format(self) -> str:
66
+ return "Here is the gathered information: \n" + "\n".join(
67
+ f"URL: {info.source_url}\nTitle: {info.source_title}\nSummary: {info.source_summary}\nRelevant Information: {info.relevant_info}\n\n"
68
+ for info in self.information
69
+ )
70
+
71
+
72
+ @export_module("autogen.tools.experimental")
73
+ class DeepResearchTool(Tool):
74
+ """A tool that delegates a web research task to the subteams of agents."""
75
+
76
+ ANSWER_CONFIRMED_PREFIX = "Answer confirmed:"
77
+
78
+ def __init__(
79
+ self,
80
+ llm_config: Union[LLMConfig, dict[str, Any]],
81
+ max_web_steps: int = 30,
82
+ ):
83
+ """Initialize the DeepResearchTool.
84
+
85
+ Args:
86
+ llm_config (LLMConfig, dict[str, Any]): The LLM configuration.
87
+ max_web_steps (int, optional): The maximum number of web steps. Defaults to 30.
88
+ """
89
+ self.llm_config = llm_config
90
+
91
+ self.summarizer_agent = ConversableAgent(
92
+ name="SummarizerAgent",
93
+ system_message=(
94
+ "You are an agent with a task of answering the question provided by the user."
95
+ "First you need to split the question into subquestions by calling the 'split_question_and_answer_subquestions' method."
96
+ "Then you need to sintesize the answers the original question by combining the answers to the subquestions."
97
+ ),
98
+ is_termination_msg=lambda x: x.get("content", "")
99
+ and x.get("content", "").startswith(self.ANSWER_CONFIRMED_PREFIX),
100
+ llm_config=llm_config,
101
+ human_input_mode="NEVER",
102
+ )
103
+
104
+ self.critic_agent = ConversableAgent(
105
+ name="CriticAgent",
106
+ system_message=(
107
+ "You are a critic agent responsible for evaluating the answer provided by the summarizer agent.\n"
108
+ "Your task is to assess the quality of the answer based on its coherence, relevance, and completeness.\n"
109
+ "Provide constructive feedback on how the answer can be improved.\n"
110
+ "If the answer is satisfactory, call the 'confirm_answer' method to end the task.\n"
111
+ ),
112
+ is_termination_msg=lambda x: x.get("content", "")
113
+ and x.get("content", "").startswith(self.ANSWER_CONFIRMED_PREFIX),
114
+ llm_config=llm_config,
115
+ human_input_mode="NEVER",
116
+ )
117
+
118
+ def delegate_research_task(
119
+ task: Annotated[str, "The task to perform a research on."],
120
+ llm_config: Annotated[Union[LLMConfig, dict[str, Any]], Depends(on(llm_config))],
121
+ max_web_steps: Annotated[int, Depends(on(max_web_steps))],
122
+ ) -> str:
123
+ """Delegate a research task to the agent.
124
+
125
+ Args:
126
+ task (str): The task to perform a research on.
127
+ llm_config (LLMConfig, dict[str, Any]): The LLM configuration.
128
+ max_web_steps (int): The maximum number of web steps.
129
+
130
+ Returns:
131
+ str: The answer to the research task.
132
+ """
133
+
134
+ @self.summarizer_agent.register_for_execution()
135
+ @self.critic_agent.register_for_llm(description="Call this method to confirm the final answer.")
136
+ def confirm_summary(answer: str, reasoning: str) -> str:
137
+ return f"{self.ANSWER_CONFIRMED_PREFIX}" + answer + "\nReasoning: " + reasoning
138
+
139
+ split_question_and_answer_subquestions = DeepResearchTool._get_split_question_and_answer_subquestions(
140
+ llm_config=llm_config,
141
+ max_web_steps=max_web_steps,
142
+ )
143
+
144
+ self.summarizer_agent.register_for_llm(description="Split the question into subquestions and get answers.")(
145
+ split_question_and_answer_subquestions
146
+ )
147
+ self.critic_agent.register_for_execution()(split_question_and_answer_subquestions)
148
+
149
+ result = self.critic_agent.initiate_chat(
150
+ self.summarizer_agent,
151
+ message="Please answer the following question: " + task,
152
+ # This outer chat should preserve the history of the conversation
153
+ clear_history=False,
154
+ )
155
+
156
+ return result.summary
157
+
158
+ super().__init__(
159
+ name=delegate_research_task.__name__,
160
+ description="Delegate a research task to the deep research agent.",
161
+ func_or_tool=delegate_research_task,
162
+ )
163
+
164
+ SUBQUESTIONS_ANSWER_PREFIX = "Subquestions answered:"
165
+
166
+ @staticmethod
167
+ def _get_split_question_and_answer_subquestions(
168
+ llm_config: Union[LLMConfig, dict[str, Any]], max_web_steps: int
169
+ ) -> Callable[..., Any]:
170
+ def split_question_and_answer_subquestions(
171
+ question: Annotated[str, "The question to split and answer."],
172
+ llm_config: Annotated[Union[LLMConfig, dict[str, Any]], Depends(on(llm_config))],
173
+ max_web_steps: Annotated[int, Depends(on(max_web_steps))],
174
+ ) -> str:
175
+ decomposition_agent = ConversableAgent(
176
+ name="DecompositionAgent",
177
+ system_message=(
178
+ "You are an expert at breaking down complex questions into smaller, focused subquestions.\n"
179
+ "Your task is to take any question provided and divide it into clear, actionable subquestions that can be individually answered.\n"
180
+ "Ensure the subquestions are logical, non-redundant, and cover all key aspects of the original question.\n"
181
+ "Avoid providing answers or interpretations—focus solely on decomposition.\n"
182
+ "Do not include banal, general knowledge questions\n"
183
+ "Do not include questions that go into unnecessary detail that is not relevant to the original question\n"
184
+ "Do not include question that require knowledge of the original or other subquestions to answer\n"
185
+ "Some rule of thumb is to have only one subquestion for easy questions, 3 for medium questions, and 5 for hard questions.\n"
186
+ ),
187
+ llm_config=llm_config,
188
+ is_termination_msg=lambda x: x.get("content", "")
189
+ and x.get("content", "").startswith(DeepResearchTool.SUBQUESTIONS_ANSWER_PREFIX),
190
+ human_input_mode="NEVER",
191
+ )
192
+
193
+ example_task = Task(
194
+ question="What is the capital of France?",
195
+ subquestions=[Subquestion(question="What is the capital of France?")],
196
+ )
197
+ decomposition_critic = ConversableAgent(
198
+ name="DecompositionCritic",
199
+ system_message=(
200
+ "You are a critic agent responsible for evaluating the subquestions provided by the initial analysis agent.\n"
201
+ "You need to confirm whether the subquestions are clear, actionable, and cover all key aspects of the original question.\n"
202
+ "Do not accept redundant or unnecessary subquestions, focus solely on the minimal viable subset of subqestions necessary to answer the original question. \n"
203
+ "Do not accept banal, general knowledge questions\n"
204
+ "Do not accept questions that go into unnecessary detail that is not relevant to the original question\n"
205
+ "Remove questions that can be answered with combining knowledge from other questions\n"
206
+ "After you are satisfied with the subquestions, call the 'generate_subquestions' method to answer each subquestion.\n"
207
+ "This is an example of an argument that can be passed to the 'generate_subquestions' method:\n"
208
+ f"{{'task': {example_task.model_dump()}}}\n"
209
+ "Some rule of thumb is to have only one subquestion for easy questions, 3 for medium questions, and 5 for hard questions.\n"
210
+ ),
211
+ llm_config=llm_config,
212
+ is_termination_msg=lambda x: x.get("content", "")
213
+ and x.get("content", "").startswith(DeepResearchTool.SUBQUESTIONS_ANSWER_PREFIX),
214
+ human_input_mode="NEVER",
215
+ )
216
+
217
+ generate_subquestions = DeepResearchTool._get_generate_subquestions(
218
+ llm_config=llm_config, max_web_steps=max_web_steps
219
+ )
220
+ decomposition_agent.register_for_execution()(generate_subquestions)
221
+ decomposition_critic.register_for_llm(description="Generate subquestions for a task.")(
222
+ generate_subquestions
223
+ )
224
+
225
+ result = decomposition_critic.initiate_chat(
226
+ decomposition_agent,
227
+ message="Analyse and gather subqestions for the following question: " + question,
228
+ )
229
+
230
+ return result.summary
231
+
232
+ return split_question_and_answer_subquestions
233
+
234
+ @staticmethod
235
+ def _get_generate_subquestions(
236
+ llm_config: Union[LLMConfig, dict[str, Any]],
237
+ max_web_steps: int,
238
+ ) -> Callable[..., str]:
239
+ """Get the generate_subquestions method.
240
+
241
+ Args:
242
+ llm_config (Union[LLMConfig, dict[str, Any]]): The LLM configuration.
243
+ max_web_steps (int): The maximum number of web steps.
244
+
245
+ Returns:
246
+ Callable[..., str]: The generate_subquestions method.
247
+ """
248
+
249
+ def generate_subquestions(
250
+ task: Task,
251
+ llm_config: Annotated[Union[LLMConfig, dict[str, Any]], Depends(on(llm_config))],
252
+ max_web_steps: Annotated[int, Depends(on(max_web_steps))],
253
+ ) -> str:
254
+ if not task.subquestions:
255
+ task.subquestions = [Subquestion(question=task.question)]
256
+
257
+ subquestions_answers: list[SubquestionAnswer] = []
258
+ for subquestion in task.subquestions:
259
+ answer = DeepResearchTool._answer_question(
260
+ subquestion.question, llm_config=llm_config, max_web_steps=max_web_steps
261
+ )
262
+ subquestions_answers.append(SubquestionAnswer(question=subquestion.question, answer=answer))
263
+
264
+ completed_task = CompletedTask(question=task.question, subquestions=subquestions_answers)
265
+
266
+ return f"{DeepResearchTool.SUBQUESTIONS_ANSWER_PREFIX} \n" + completed_task.format()
267
+
268
+ return generate_subquestions
269
+
270
+ @staticmethod
271
+ def _answer_question(
272
+ question: str,
273
+ llm_config: Union[LLMConfig, dict[str, Any]],
274
+ max_web_steps: int,
275
+ ) -> str:
276
+ from ....agents.experimental.websurfer import WebSurferAgent
277
+
278
+ websurfer_config = copy.deepcopy(llm_config)
279
+
280
+ websurfer_config["config_list"][0]["response_format"] = GatheredInformation
281
+
282
+ def is_termination_msg(x: dict[str, Any]) -> bool:
283
+ content = x.get("content", "")
284
+ return (content is not None) and content.startswith(DeepResearchTool.ANSWER_CONFIRMED_PREFIX)
285
+
286
+ websurfer_agent = WebSurferAgent(
287
+ llm_config=llm_config,
288
+ web_tool_llm_config=websurfer_config,
289
+ name="WebSurferAgent",
290
+ system_message=(
291
+ "You are a web surfer agent responsible for gathering information from the web to provide information for answering a question\n"
292
+ "You will be asked to find information related to the question and provide a summary of the information gathered.\n"
293
+ "The summary should include the URL, title, summary, and relevant information for each piece of information gathered.\n"
294
+ ),
295
+ is_termination_msg=is_termination_msg,
296
+ human_input_mode="NEVER",
297
+ web_tool_kwargs={
298
+ "agent_kwargs": {"max_steps": max_web_steps},
299
+ },
300
+ )
301
+
302
+ websurfer_critic = ConversableAgent(
303
+ name="WebSurferCritic",
304
+ system_message=(
305
+ "You are a critic agent responsible for evaluating the answer provided by the web surfer agent.\n"
306
+ "You need to confirm whether the information provided by the websurfer is correct and sufficient to answer the question.\n"
307
+ "You can ask the web surfer to provide more information or provide and confirm the answer.\n"
308
+ ),
309
+ llm_config=llm_config,
310
+ is_termination_msg=is_termination_msg,
311
+ human_input_mode="NEVER",
312
+ )
313
+
314
+ @websurfer_agent.register_for_execution()
315
+ @websurfer_critic.register_for_llm(
316
+ description="Call this method when you agree that the original question can be answered with the gathered information and provide the answer."
317
+ )
318
+ def confirm_answer(answer: str) -> str:
319
+ return f"{DeepResearchTool.ANSWER_CONFIRMED_PREFIX} " + answer
320
+
321
+ websurfer_critic.register_for_execution()(websurfer_agent.tool)
322
+
323
+ result = websurfer_critic.initiate_chat(
324
+ websurfer_agent,
325
+ message="Please find the answer to this question: " + question,
326
+ )
327
+
328
+ return result.summary
@@ -0,0 +1,7 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from .duckduckgo_search import DuckDuckGoSearchTool
6
+
7
+ __all__ = ["DuckDuckGoSearchTool"]