ag2 0.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (423) hide show
  1. ag2-0.10.2.dist-info/METADATA +819 -0
  2. ag2-0.10.2.dist-info/RECORD +423 -0
  3. ag2-0.10.2.dist-info/WHEEL +4 -0
  4. ag2-0.10.2.dist-info/licenses/LICENSE +201 -0
  5. ag2-0.10.2.dist-info/licenses/NOTICE.md +19 -0
  6. autogen/__init__.py +88 -0
  7. autogen/_website/__init__.py +3 -0
  8. autogen/_website/generate_api_references.py +426 -0
  9. autogen/_website/generate_mkdocs.py +1216 -0
  10. autogen/_website/notebook_processor.py +475 -0
  11. autogen/_website/process_notebooks.py +656 -0
  12. autogen/_website/utils.py +413 -0
  13. autogen/a2a/__init__.py +36 -0
  14. autogen/a2a/agent_executor.py +86 -0
  15. autogen/a2a/client.py +357 -0
  16. autogen/a2a/errors.py +18 -0
  17. autogen/a2a/httpx_client_factory.py +79 -0
  18. autogen/a2a/server.py +221 -0
  19. autogen/a2a/utils.py +207 -0
  20. autogen/agentchat/__init__.py +47 -0
  21. autogen/agentchat/agent.py +180 -0
  22. autogen/agentchat/assistant_agent.py +86 -0
  23. autogen/agentchat/chat.py +325 -0
  24. autogen/agentchat/contrib/__init__.py +5 -0
  25. autogen/agentchat/contrib/agent_eval/README.md +7 -0
  26. autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
  27. autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
  28. autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
  29. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
  30. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
  31. autogen/agentchat/contrib/agent_eval/task.py +42 -0
  32. autogen/agentchat/contrib/agent_optimizer.py +432 -0
  33. autogen/agentchat/contrib/capabilities/__init__.py +5 -0
  34. autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
  35. autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
  36. autogen/agentchat/contrib/capabilities/teachability.py +393 -0
  37. autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
  38. autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
  39. autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
  40. autogen/agentchat/contrib/capabilities/transforms.py +578 -0
  41. autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
  42. autogen/agentchat/contrib/capabilities/vision_capability.py +215 -0
  43. autogen/agentchat/contrib/captainagent/__init__.py +9 -0
  44. autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
  45. autogen/agentchat/contrib/captainagent/captainagent.py +514 -0
  46. autogen/agentchat/contrib/captainagent/tool_retriever.py +334 -0
  47. autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
  48. autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
  49. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
  50. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
  51. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
  52. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
  53. autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
  54. autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
  55. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
  56. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
  57. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
  58. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
  59. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
  60. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
  61. autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
  62. autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
  63. autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
  64. autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
  65. autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
  66. autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
  67. autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
  68. autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
  69. autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
  70. autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
  71. autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
  72. autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
  73. autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
  74. autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
  75. autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
  76. autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
  77. autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
  78. autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
  79. autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
  80. autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
  81. autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
  82. autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
  83. autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
  84. autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
  85. autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
  86. autogen/agentchat/contrib/graph_rag/document.py +29 -0
  87. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +167 -0
  88. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
  89. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
  90. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
  91. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +263 -0
  92. autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
  93. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
  94. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
  95. autogen/agentchat/contrib/img_utils.py +397 -0
  96. autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
  97. autogen/agentchat/contrib/llava_agent.py +189 -0
  98. autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
  99. autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
  100. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
  101. autogen/agentchat/contrib/rag/__init__.py +10 -0
  102. autogen/agentchat/contrib/rag/chromadb_query_engine.py +268 -0
  103. autogen/agentchat/contrib/rag/llamaindex_query_engine.py +195 -0
  104. autogen/agentchat/contrib/rag/mongodb_query_engine.py +319 -0
  105. autogen/agentchat/contrib/rag/query_engine.py +76 -0
  106. autogen/agentchat/contrib/retrieve_assistant_agent.py +59 -0
  107. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +704 -0
  108. autogen/agentchat/contrib/society_of_mind_agent.py +200 -0
  109. autogen/agentchat/contrib/swarm_agent.py +1404 -0
  110. autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
  111. autogen/agentchat/contrib/vectordb/__init__.py +5 -0
  112. autogen/agentchat/contrib/vectordb/base.py +224 -0
  113. autogen/agentchat/contrib/vectordb/chromadb.py +316 -0
  114. autogen/agentchat/contrib/vectordb/couchbase.py +405 -0
  115. autogen/agentchat/contrib/vectordb/mongodb.py +551 -0
  116. autogen/agentchat/contrib/vectordb/pgvectordb.py +927 -0
  117. autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
  118. autogen/agentchat/contrib/vectordb/utils.py +126 -0
  119. autogen/agentchat/contrib/web_surfer.py +304 -0
  120. autogen/agentchat/conversable_agent.py +4307 -0
  121. autogen/agentchat/group/__init__.py +67 -0
  122. autogen/agentchat/group/available_condition.py +91 -0
  123. autogen/agentchat/group/context_condition.py +77 -0
  124. autogen/agentchat/group/context_expression.py +238 -0
  125. autogen/agentchat/group/context_str.py +39 -0
  126. autogen/agentchat/group/context_variables.py +182 -0
  127. autogen/agentchat/group/events/transition_events.py +111 -0
  128. autogen/agentchat/group/group_tool_executor.py +324 -0
  129. autogen/agentchat/group/group_utils.py +659 -0
  130. autogen/agentchat/group/guardrails.py +179 -0
  131. autogen/agentchat/group/handoffs.py +303 -0
  132. autogen/agentchat/group/llm_condition.py +93 -0
  133. autogen/agentchat/group/multi_agent_chat.py +291 -0
  134. autogen/agentchat/group/on_condition.py +55 -0
  135. autogen/agentchat/group/on_context_condition.py +51 -0
  136. autogen/agentchat/group/patterns/__init__.py +18 -0
  137. autogen/agentchat/group/patterns/auto.py +160 -0
  138. autogen/agentchat/group/patterns/manual.py +177 -0
  139. autogen/agentchat/group/patterns/pattern.py +295 -0
  140. autogen/agentchat/group/patterns/random.py +106 -0
  141. autogen/agentchat/group/patterns/round_robin.py +117 -0
  142. autogen/agentchat/group/reply_result.py +24 -0
  143. autogen/agentchat/group/safeguards/__init__.py +21 -0
  144. autogen/agentchat/group/safeguards/api.py +241 -0
  145. autogen/agentchat/group/safeguards/enforcer.py +1158 -0
  146. autogen/agentchat/group/safeguards/events.py +140 -0
  147. autogen/agentchat/group/safeguards/validator.py +435 -0
  148. autogen/agentchat/group/speaker_selection_result.py +41 -0
  149. autogen/agentchat/group/targets/__init__.py +4 -0
  150. autogen/agentchat/group/targets/function_target.py +245 -0
  151. autogen/agentchat/group/targets/group_chat_target.py +133 -0
  152. autogen/agentchat/group/targets/group_manager_target.py +151 -0
  153. autogen/agentchat/group/targets/transition_target.py +424 -0
  154. autogen/agentchat/group/targets/transition_utils.py +6 -0
  155. autogen/agentchat/groupchat.py +1832 -0
  156. autogen/agentchat/realtime/__init__.py +3 -0
  157. autogen/agentchat/realtime/experimental/__init__.py +20 -0
  158. autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
  159. autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
  160. autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
  161. autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
  162. autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
  163. autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
  164. autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
  165. autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
  166. autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
  167. autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
  168. autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
  169. autogen/agentchat/realtime/experimental/clients/realtime_client.py +191 -0
  170. autogen/agentchat/realtime/experimental/function_observer.py +84 -0
  171. autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
  172. autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
  173. autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
  174. autogen/agentchat/realtime/experimental/realtime_swarm.py +533 -0
  175. autogen/agentchat/realtime/experimental/websockets.py +21 -0
  176. autogen/agentchat/realtime_agent/__init__.py +21 -0
  177. autogen/agentchat/user_proxy_agent.py +114 -0
  178. autogen/agentchat/utils.py +206 -0
  179. autogen/agents/__init__.py +3 -0
  180. autogen/agents/contrib/__init__.py +10 -0
  181. autogen/agents/contrib/time/__init__.py +8 -0
  182. autogen/agents/contrib/time/time_reply_agent.py +74 -0
  183. autogen/agents/contrib/time/time_tool_agent.py +52 -0
  184. autogen/agents/experimental/__init__.py +27 -0
  185. autogen/agents/experimental/deep_research/__init__.py +7 -0
  186. autogen/agents/experimental/deep_research/deep_research.py +52 -0
  187. autogen/agents/experimental/discord/__init__.py +7 -0
  188. autogen/agents/experimental/discord/discord.py +66 -0
  189. autogen/agents/experimental/document_agent/__init__.py +19 -0
  190. autogen/agents/experimental/document_agent/chroma_query_engine.py +301 -0
  191. autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +113 -0
  192. autogen/agents/experimental/document_agent/document_agent.py +643 -0
  193. autogen/agents/experimental/document_agent/document_conditions.py +50 -0
  194. autogen/agents/experimental/document_agent/document_utils.py +376 -0
  195. autogen/agents/experimental/document_agent/inmemory_query_engine.py +214 -0
  196. autogen/agents/experimental/document_agent/parser_utils.py +134 -0
  197. autogen/agents/experimental/document_agent/url_utils.py +417 -0
  198. autogen/agents/experimental/reasoning/__init__.py +7 -0
  199. autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
  200. autogen/agents/experimental/slack/__init__.py +7 -0
  201. autogen/agents/experimental/slack/slack.py +73 -0
  202. autogen/agents/experimental/telegram/__init__.py +7 -0
  203. autogen/agents/experimental/telegram/telegram.py +76 -0
  204. autogen/agents/experimental/websurfer/__init__.py +7 -0
  205. autogen/agents/experimental/websurfer/websurfer.py +70 -0
  206. autogen/agents/experimental/wikipedia/__init__.py +7 -0
  207. autogen/agents/experimental/wikipedia/wikipedia.py +88 -0
  208. autogen/browser_utils.py +309 -0
  209. autogen/cache/__init__.py +10 -0
  210. autogen/cache/abstract_cache_base.py +71 -0
  211. autogen/cache/cache.py +203 -0
  212. autogen/cache/cache_factory.py +88 -0
  213. autogen/cache/cosmos_db_cache.py +144 -0
  214. autogen/cache/disk_cache.py +97 -0
  215. autogen/cache/in_memory_cache.py +54 -0
  216. autogen/cache/redis_cache.py +119 -0
  217. autogen/code_utils.py +598 -0
  218. autogen/coding/__init__.py +30 -0
  219. autogen/coding/base.py +120 -0
  220. autogen/coding/docker_commandline_code_executor.py +283 -0
  221. autogen/coding/factory.py +56 -0
  222. autogen/coding/func_with_reqs.py +203 -0
  223. autogen/coding/jupyter/__init__.py +23 -0
  224. autogen/coding/jupyter/base.py +36 -0
  225. autogen/coding/jupyter/docker_jupyter_server.py +160 -0
  226. autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
  227. autogen/coding/jupyter/import_utils.py +82 -0
  228. autogen/coding/jupyter/jupyter_client.py +224 -0
  229. autogen/coding/jupyter/jupyter_code_executor.py +154 -0
  230. autogen/coding/jupyter/local_jupyter_server.py +164 -0
  231. autogen/coding/local_commandline_code_executor.py +341 -0
  232. autogen/coding/markdown_code_extractor.py +44 -0
  233. autogen/coding/utils.py +55 -0
  234. autogen/coding/yepcode_code_executor.py +197 -0
  235. autogen/doc_utils.py +35 -0
  236. autogen/environments/__init__.py +10 -0
  237. autogen/environments/docker_python_environment.py +365 -0
  238. autogen/environments/python_environment.py +125 -0
  239. autogen/environments/system_python_environment.py +85 -0
  240. autogen/environments/venv_python_environment.py +220 -0
  241. autogen/environments/working_directory.py +74 -0
  242. autogen/events/__init__.py +7 -0
  243. autogen/events/agent_events.py +1016 -0
  244. autogen/events/base_event.py +100 -0
  245. autogen/events/client_events.py +168 -0
  246. autogen/events/helpers.py +44 -0
  247. autogen/events/print_event.py +45 -0
  248. autogen/exception_utils.py +73 -0
  249. autogen/extensions/__init__.py +5 -0
  250. autogen/fast_depends/__init__.py +16 -0
  251. autogen/fast_depends/_compat.py +75 -0
  252. autogen/fast_depends/core/__init__.py +14 -0
  253. autogen/fast_depends/core/build.py +206 -0
  254. autogen/fast_depends/core/model.py +527 -0
  255. autogen/fast_depends/dependencies/__init__.py +15 -0
  256. autogen/fast_depends/dependencies/model.py +30 -0
  257. autogen/fast_depends/dependencies/provider.py +40 -0
  258. autogen/fast_depends/library/__init__.py +10 -0
  259. autogen/fast_depends/library/model.py +46 -0
  260. autogen/fast_depends/py.typed +6 -0
  261. autogen/fast_depends/schema.py +66 -0
  262. autogen/fast_depends/use.py +272 -0
  263. autogen/fast_depends/utils.py +177 -0
  264. autogen/formatting_utils.py +83 -0
  265. autogen/function_utils.py +13 -0
  266. autogen/graph_utils.py +173 -0
  267. autogen/import_utils.py +539 -0
  268. autogen/interop/__init__.py +22 -0
  269. autogen/interop/crewai/__init__.py +7 -0
  270. autogen/interop/crewai/crewai.py +88 -0
  271. autogen/interop/interoperability.py +71 -0
  272. autogen/interop/interoperable.py +46 -0
  273. autogen/interop/langchain/__init__.py +8 -0
  274. autogen/interop/langchain/langchain_chat_model_factory.py +156 -0
  275. autogen/interop/langchain/langchain_tool.py +78 -0
  276. autogen/interop/litellm/__init__.py +7 -0
  277. autogen/interop/litellm/litellm_config_factory.py +178 -0
  278. autogen/interop/pydantic_ai/__init__.py +7 -0
  279. autogen/interop/pydantic_ai/pydantic_ai.py +172 -0
  280. autogen/interop/registry.py +70 -0
  281. autogen/io/__init__.py +15 -0
  282. autogen/io/base.py +151 -0
  283. autogen/io/console.py +56 -0
  284. autogen/io/processors/__init__.py +12 -0
  285. autogen/io/processors/base.py +21 -0
  286. autogen/io/processors/console_event_processor.py +61 -0
  287. autogen/io/run_response.py +294 -0
  288. autogen/io/thread_io_stream.py +63 -0
  289. autogen/io/websockets.py +214 -0
  290. autogen/json_utils.py +42 -0
  291. autogen/llm_clients/MIGRATION_TO_V2.md +782 -0
  292. autogen/llm_clients/__init__.py +77 -0
  293. autogen/llm_clients/client_v2.py +122 -0
  294. autogen/llm_clients/models/__init__.py +55 -0
  295. autogen/llm_clients/models/content_blocks.py +389 -0
  296. autogen/llm_clients/models/unified_message.py +145 -0
  297. autogen/llm_clients/models/unified_response.py +83 -0
  298. autogen/llm_clients/openai_completions_client.py +444 -0
  299. autogen/llm_config/__init__.py +11 -0
  300. autogen/llm_config/client.py +59 -0
  301. autogen/llm_config/config.py +461 -0
  302. autogen/llm_config/entry.py +169 -0
  303. autogen/llm_config/types.py +37 -0
  304. autogen/llm_config/utils.py +223 -0
  305. autogen/logger/__init__.py +11 -0
  306. autogen/logger/base_logger.py +129 -0
  307. autogen/logger/file_logger.py +262 -0
  308. autogen/logger/logger_factory.py +42 -0
  309. autogen/logger/logger_utils.py +57 -0
  310. autogen/logger/sqlite_logger.py +524 -0
  311. autogen/math_utils.py +338 -0
  312. autogen/mcp/__init__.py +7 -0
  313. autogen/mcp/__main__.py +78 -0
  314. autogen/mcp/helpers.py +45 -0
  315. autogen/mcp/mcp_client.py +349 -0
  316. autogen/mcp/mcp_proxy/__init__.py +19 -0
  317. autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +62 -0
  318. autogen/mcp/mcp_proxy/mcp_proxy.py +577 -0
  319. autogen/mcp/mcp_proxy/operation_grouping.py +166 -0
  320. autogen/mcp/mcp_proxy/operation_renaming.py +110 -0
  321. autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
  322. autogen/mcp/mcp_proxy/security.py +399 -0
  323. autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
  324. autogen/messages/__init__.py +7 -0
  325. autogen/messages/agent_messages.py +946 -0
  326. autogen/messages/base_message.py +108 -0
  327. autogen/messages/client_messages.py +172 -0
  328. autogen/messages/print_message.py +48 -0
  329. autogen/oai/__init__.py +61 -0
  330. autogen/oai/anthropic.py +1516 -0
  331. autogen/oai/bedrock.py +800 -0
  332. autogen/oai/cerebras.py +302 -0
  333. autogen/oai/client.py +1658 -0
  334. autogen/oai/client_utils.py +196 -0
  335. autogen/oai/cohere.py +494 -0
  336. autogen/oai/gemini.py +1045 -0
  337. autogen/oai/gemini_types.py +156 -0
  338. autogen/oai/groq.py +319 -0
  339. autogen/oai/mistral.py +311 -0
  340. autogen/oai/oai_models/__init__.py +23 -0
  341. autogen/oai/oai_models/_models.py +16 -0
  342. autogen/oai/oai_models/chat_completion.py +86 -0
  343. autogen/oai/oai_models/chat_completion_audio.py +32 -0
  344. autogen/oai/oai_models/chat_completion_message.py +97 -0
  345. autogen/oai/oai_models/chat_completion_message_tool_call.py +60 -0
  346. autogen/oai/oai_models/chat_completion_token_logprob.py +62 -0
  347. autogen/oai/oai_models/completion_usage.py +59 -0
  348. autogen/oai/ollama.py +657 -0
  349. autogen/oai/openai_responses.py +451 -0
  350. autogen/oai/openai_utils.py +897 -0
  351. autogen/oai/together.py +387 -0
  352. autogen/remote/__init__.py +18 -0
  353. autogen/remote/agent.py +199 -0
  354. autogen/remote/agent_service.py +197 -0
  355. autogen/remote/errors.py +17 -0
  356. autogen/remote/httpx_client_factory.py +131 -0
  357. autogen/remote/protocol.py +37 -0
  358. autogen/remote/retry.py +102 -0
  359. autogen/remote/runtime.py +96 -0
  360. autogen/retrieve_utils.py +490 -0
  361. autogen/runtime_logging.py +161 -0
  362. autogen/testing/__init__.py +12 -0
  363. autogen/testing/messages.py +45 -0
  364. autogen/testing/test_agent.py +111 -0
  365. autogen/token_count_utils.py +280 -0
  366. autogen/tools/__init__.py +20 -0
  367. autogen/tools/contrib/__init__.py +9 -0
  368. autogen/tools/contrib/time/__init__.py +7 -0
  369. autogen/tools/contrib/time/time.py +40 -0
  370. autogen/tools/dependency_injection.py +249 -0
  371. autogen/tools/experimental/__init__.py +54 -0
  372. autogen/tools/experimental/browser_use/__init__.py +7 -0
  373. autogen/tools/experimental/browser_use/browser_use.py +154 -0
  374. autogen/tools/experimental/code_execution/__init__.py +7 -0
  375. autogen/tools/experimental/code_execution/python_code_execution.py +86 -0
  376. autogen/tools/experimental/crawl4ai/__init__.py +7 -0
  377. autogen/tools/experimental/crawl4ai/crawl4ai.py +150 -0
  378. autogen/tools/experimental/deep_research/__init__.py +7 -0
  379. autogen/tools/experimental/deep_research/deep_research.py +329 -0
  380. autogen/tools/experimental/duckduckgo/__init__.py +7 -0
  381. autogen/tools/experimental/duckduckgo/duckduckgo_search.py +103 -0
  382. autogen/tools/experimental/firecrawl/__init__.py +7 -0
  383. autogen/tools/experimental/firecrawl/firecrawl_tool.py +836 -0
  384. autogen/tools/experimental/google/__init__.py +14 -0
  385. autogen/tools/experimental/google/authentication/__init__.py +11 -0
  386. autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
  387. autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
  388. autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
  389. autogen/tools/experimental/google/drive/__init__.py +9 -0
  390. autogen/tools/experimental/google/drive/drive_functions.py +124 -0
  391. autogen/tools/experimental/google/drive/toolkit.py +88 -0
  392. autogen/tools/experimental/google/model.py +17 -0
  393. autogen/tools/experimental/google/toolkit_protocol.py +19 -0
  394. autogen/tools/experimental/google_search/__init__.py +8 -0
  395. autogen/tools/experimental/google_search/google_search.py +93 -0
  396. autogen/tools/experimental/google_search/youtube_search.py +181 -0
  397. autogen/tools/experimental/messageplatform/__init__.py +17 -0
  398. autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
  399. autogen/tools/experimental/messageplatform/discord/discord.py +284 -0
  400. autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
  401. autogen/tools/experimental/messageplatform/slack/slack.py +385 -0
  402. autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
  403. autogen/tools/experimental/messageplatform/telegram/telegram.py +271 -0
  404. autogen/tools/experimental/perplexity/__init__.py +7 -0
  405. autogen/tools/experimental/perplexity/perplexity_search.py +249 -0
  406. autogen/tools/experimental/reliable/__init__.py +10 -0
  407. autogen/tools/experimental/reliable/reliable.py +1311 -0
  408. autogen/tools/experimental/searxng/__init__.py +7 -0
  409. autogen/tools/experimental/searxng/searxng_search.py +142 -0
  410. autogen/tools/experimental/tavily/__init__.py +7 -0
  411. autogen/tools/experimental/tavily/tavily_search.py +176 -0
  412. autogen/tools/experimental/web_search_preview/__init__.py +7 -0
  413. autogen/tools/experimental/web_search_preview/web_search_preview.py +120 -0
  414. autogen/tools/experimental/wikipedia/__init__.py +7 -0
  415. autogen/tools/experimental/wikipedia/wikipedia.py +284 -0
  416. autogen/tools/function_utils.py +412 -0
  417. autogen/tools/tool.py +188 -0
  418. autogen/tools/toolkit.py +86 -0
  419. autogen/types.py +29 -0
  420. autogen/version.py +7 -0
  421. templates/client_template/main.jinja2 +72 -0
  422. templates/config_template/config.jinja2 +7 -0
  423. templates/main.jinja2 +61 -0
autogen/oai/bedrock.py ADDED
@@ -0,0 +1,800 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ #
5
+ # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
+ # SPDX-License-Identifier: MIT
7
+ """Create a compatible client for the Amazon Bedrock Converse API.
8
+
9
+ Example usage:
10
+ Install the `boto3` package by running `pip install --upgrade boto3`.
11
+ - https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html
12
+
13
+ ```python
14
+ import autogen
15
+
16
+ config_list = [
17
+ {
18
+ "api_type": "bedrock",
19
+ "model": "meta.llama3-1-8b-instruct-v1:0",
20
+ "aws_region": "us-west-2",
21
+ "aws_access_key": "",
22
+ "aws_secret_key": "",
23
+ "price": [0.003, 0.015],
24
+ }
25
+ ]
26
+
27
+ assistant = autogen.AssistantAgent("assistant", llm_config={"config_list": config_list})
28
+ ```
29
+ """
30
+
31
+ from __future__ import annotations
32
+
33
+ import base64
34
+ import json
35
+ import os
36
+ import re
37
+ import time
38
+ import warnings
39
+ from typing import Any, Literal
40
+
41
+ import requests
42
+ from pydantic import BaseModel, Field, SecretStr, field_serializer
43
+ from typing_extensions import Required, Unpack
44
+
45
+ from ..import_utils import optional_import_block, require_optional_import
46
+ from ..llm_config.entry import LLMConfigEntry, LLMConfigEntryDict
47
+ from .client_utils import validate_parameter
48
+ from .oai_models import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall, Choice, CompletionUsage
49
+
50
+ with optional_import_block():
51
+ import boto3
52
+ from botocore.config import Config
53
+
54
+
55
+ class BedrockEntryDict(LLMConfigEntryDict, total=False):
56
+ api_type: Literal["bedrock"]
57
+ aws_region: Required[str]
58
+ aws_access_key: SecretStr | None
59
+ aws_secret_key: SecretStr | None
60
+ aws_session_token: SecretStr | None
61
+ aws_profile_name: str | None
62
+ top_k: int | None
63
+ k: int | None
64
+ seed: int | None
65
+ cache_seed: int | None
66
+ supports_system_prompts: bool
67
+ price: list[float] | None
68
+ timeout: int | None
69
+
70
+
71
+ class BedrockLLMConfigEntry(LLMConfigEntry):
72
+ api_type: Literal["bedrock"] = "bedrock"
73
+
74
+ # Bedrock-specific options
75
+ aws_region: str
76
+ aws_access_key: SecretStr | None = None
77
+ aws_secret_key: SecretStr | None = None
78
+ aws_session_token: SecretStr | None = None
79
+ aws_profile_name: str | None = None
80
+ top_k: int | None = None
81
+ k: int | None = None
82
+ seed: int | None = None
83
+ cache_seed: int | None = None
84
+ supports_system_prompts: bool = True
85
+ price: list[float] | None = Field(default=None, min_length=2, max_length=2)
86
+ timeout: int | None = None
87
+
88
+ @field_serializer("aws_access_key", "aws_secret_key", "aws_session_token", when_used="unless-none")
89
+ def serialize_aws_secrets(self, v: SecretStr) -> str:
90
+ return v.get_secret_value()
91
+
92
+ def create_client(self):
93
+ raise NotImplementedError("BedrockLLMConfigEntry.create_client must be implemented.")
94
+
95
+
96
+ @require_optional_import("boto3", "bedrock")
97
+ class BedrockClient:
98
+ """Client for Amazon's Bedrock Converse API."""
99
+
100
+ RESPONSE_USAGE_KEYS: list[str] = ["prompt_tokens", "completion_tokens", "total_tokens", "cost", "model"]
101
+
102
+ _retries = 5
103
+
104
+ def __init__(self, **kwargs: Unpack[BedrockEntryDict]):
105
+ """Initialises BedrockClient for Amazon's Bedrock Converse API"""
106
+ self._aws_access_key = kwargs.get("aws_access_key") or os.getenv("AWS_ACCESS_KEY")
107
+ self._aws_secret_key = kwargs.get("aws_secret_key") or os.getenv("AWS_SECRET_KEY")
108
+ self._aws_session_token = kwargs.get("aws_session_token") or os.getenv("AWS_SESSION_TOKEN")
109
+ self._aws_region = kwargs.get("aws_region") or os.getenv("AWS_REGION")
110
+ self._aws_profile_name = kwargs.get("aws_profile_name")
111
+ self._timeout = kwargs.get("timeout")
112
+
113
+ if self._aws_region is None:
114
+ raise ValueError("Region is required to use the Amazon Bedrock API.")
115
+
116
+ if self._timeout is None:
117
+ self._timeout = 60
118
+
119
+ # Initialize Bedrock client, session, and runtime
120
+ bedrock_config = Config(
121
+ region_name=self._aws_region,
122
+ signature_version="v4",
123
+ retries={"max_attempts": self._retries, "mode": "standard"},
124
+ read_timeout=self._timeout,
125
+ )
126
+
127
+ session = boto3.Session(
128
+ aws_access_key_id=self._aws_access_key,
129
+ aws_secret_access_key=self._aws_secret_key,
130
+ aws_session_token=self._aws_session_token,
131
+ profile_name=self._aws_profile_name,
132
+ )
133
+
134
+ # if "response_format" in kwargs and kwargs["response_format"] is not None:
135
+ # warnings.warn("response_format is not supported for Bedrock, it will be ignored.", UserWarning)
136
+ self._response_format: BaseModel | dict[str, Any] | None = kwargs.get("response_format")
137
+ # if haven't got any access_key or secret_key in environment variable or via arguments then
138
+ if (
139
+ self._aws_access_key is None
140
+ or self._aws_access_key == ""
141
+ or self._aws_secret_key is None
142
+ or self._aws_secret_key == ""
143
+ ):
144
+ # attempts to get client from attached role of managed service (lambda, ec2, ecs, etc.)
145
+ self.bedrock_runtime = boto3.client(service_name="bedrock-runtime", config=bedrock_config)
146
+ else:
147
+ session = boto3.Session(
148
+ aws_access_key_id=self._aws_access_key,
149
+ aws_secret_access_key=self._aws_secret_key,
150
+ aws_session_token=self._aws_session_token,
151
+ profile_name=self._aws_profile_name,
152
+ )
153
+ self.bedrock_runtime = session.client(service_name="bedrock-runtime", config=bedrock_config)
154
+
155
+ def _get_response_format_schema(self, response_format: BaseModel | dict[str, Any]) -> dict[str, Any]:
156
+ """Extract and normalize JSON schema from response_format.
157
+
158
+ Args:
159
+ response_format: Either a Pydantic BaseModel subclass or a dict containing JSON schema.
160
+
161
+ Returns:
162
+ Normalized JSON schema dict.
163
+ """
164
+ schema = response_format.copy() if isinstance(response_format, dict) else response_format.model_json_schema()
165
+
166
+ # Ensure root is an object type
167
+ if "type" not in schema:
168
+ schema["type"] = "object"
169
+ elif schema.get("type") != "object":
170
+ # Wrap in object if not already
171
+ schema = {"type": "object", "properties": {"data": schema}, "required": ["data"]}
172
+
173
+ # Ensure properties and required exist
174
+ if "properties" not in schema:
175
+ schema["properties"] = {}
176
+ if "required" not in schema:
177
+ schema["required"] = []
178
+
179
+ return schema
180
+
181
+ def _create_structured_output_tool(self, response_format: BaseModel | dict[str, Any]) -> dict[str, Any]:
182
+ """Convert response_format into a Bedrock tool definition for structured outputs.
183
+
184
+ Args:
185
+ response_format: Either a Pydantic BaseModel subclass or a dict containing JSON schema.
186
+
187
+ Returns:
188
+ Tool definition compatible with format_tools().
189
+ """
190
+ schema = self._get_response_format_schema(response_format)
191
+
192
+ # Create tool definition matching the format expected by format_tools
193
+ return {
194
+ "type": "function",
195
+ "function": {
196
+ "name": "__structured_output",
197
+ "description": "Generate structured output matching the specified schema",
198
+ "parameters": schema,
199
+ },
200
+ }
201
+
202
+ def _merge_tools_with_structured_output(
203
+ self, user_tools: list[dict[str, Any]], structured_output_tool: dict[str, Any]
204
+ ) -> dict[Literal["tools"], list[dict[str, Any]]]:
205
+ """Merge user tools with structured output tool.
206
+
207
+ Args:
208
+ user_tools: List of user-defined tool definitions (can be empty).
209
+ structured_output_tool: The structured output tool from _create_structured_output_tool().
210
+
211
+ Returns:
212
+ Dict with "tools" key containing all tools in Bedrock format.
213
+ """
214
+ all_tools = list(user_tools) if user_tools else []
215
+ all_tools.append(structured_output_tool)
216
+ return format_tools(all_tools)
217
+
218
+ def _extract_structured_output_from_tool_call(
219
+ self, tool_calls: list[ChatCompletionMessageToolCall]
220
+ ) -> dict[str, Any] | None:
221
+ """Extract structured output data from tool call response.
222
+
223
+ Args:
224
+ tool_calls: List of tool calls from Bedrock response.
225
+
226
+ Returns:
227
+ Parsed JSON dict from __structured_output tool call, or None if not found.
228
+ """
229
+ for tool_call in tool_calls:
230
+ if tool_call.function.name == "__structured_output":
231
+ try:
232
+ return json.loads(tool_call.function.arguments)
233
+ except json.JSONDecodeError as e:
234
+ raise ValueError(f"Failed to parse structured output from tool call: {e!s}") from e
235
+ return None
236
+
237
+ def _validate_and_format_structured_output(self, structured_data: dict[str, Any]) -> str:
238
+ """Validate structured data against schema and format for response message.
239
+
240
+ Args:
241
+ structured_data: Parsed dict from tool call.
242
+
243
+ Returns:
244
+ Formatted string representation of structured output.
245
+ """
246
+ if not self._response_format:
247
+ return json.dumps(structured_data)
248
+
249
+ try:
250
+ # Validate against schema
251
+ if isinstance(self._response_format, dict):
252
+ # For dict schemas, just return JSON
253
+ validated_data = structured_data
254
+ else:
255
+ # Pydantic model - validate
256
+ validated_data = self._response_format.model_validate(structured_data)
257
+
258
+ # Format the response
259
+ from .client_utils import FormatterProtocol
260
+
261
+ if isinstance(validated_data, FormatterProtocol):
262
+ return validated_data.format()
263
+ elif hasattr(validated_data, "model_dump_json"):
264
+ # Pydantic model
265
+ return validated_data.model_dump_json()
266
+ else:
267
+ return json.dumps(structured_data)
268
+
269
+ except Exception as e:
270
+ raise ValueError(f"Failed to validate structured output against schema: {e!s}") from e
271
+
272
+ def message_retrieval(self, response):
273
+ """Retrieve the messages from the response."""
274
+ return [choice.message for choice in response.choices]
275
+
276
+ def parse_custom_params(self, params: dict[str, Any]):
277
+ """Parses custom parameters for logic in this client class"""
278
+ # Should we separate system messages into its own request parameter, default is True
279
+ # This is required because not all models support a system prompt (e.g. Mistral Instruct).
280
+ self._supports_system_prompts = params.get("supports_system_prompts", True)
281
+
282
+ def parse_params(self, params: BedrockEntryDict | dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
283
+ """Loads the valid parameters required to invoke Bedrock Converse
284
+ Returns a tuple of (base_params, additional_params)
285
+ """
286
+ # Amazon Bedrock base model IDs are here:
287
+ # https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
288
+ self._model_id = params.get("model")
289
+ assert self._model_id, "Please provide the 'model` in the config_list to use Amazon Bedrock"
290
+
291
+ # Parameters vary based on the model used.
292
+ # As we won't cater for all models and parameters, it's the developer's
293
+ # responsibility to implement the parameters and they will only be
294
+ # included if the developer has it in the config.
295
+ #
296
+ # Important:
297
+ # No defaults will be used (as they can vary per model)
298
+ # No ranges will be used (as they can vary)
299
+ # We will cover all the main parameters but there may be others
300
+ # that need to be added later
301
+ #
302
+ # Here are some pages that show the parameters available for different models
303
+ # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-titan-text.html
304
+ # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-text-completion.html
305
+ # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-cohere-command-r-plus.html
306
+ # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html
307
+ # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-mistral-chat-completion.html
308
+
309
+ # Here are the possible "base" parameters and their suitable types
310
+ base_params = {}
311
+
312
+ if "temperature" in params:
313
+ base_params["temperature"] = validate_parameter(
314
+ params, "temperature", (float, int), False, None, None, None
315
+ )
316
+
317
+ if "top_p" in params:
318
+ base_params["topP"] = validate_parameter(params, "top_p", (float, int), False, None, None, None)
319
+
320
+ if "max_tokens" in params:
321
+ base_params["maxTokens"] = validate_parameter(params, "max_tokens", (int,), False, None, None, None)
322
+
323
+ # Here are the possible "model-specific" parameters and their suitable types, known as additional parameters
324
+ additional_params = {}
325
+
326
+ for param_name, suitable_types in (
327
+ ("top_k", (int,)),
328
+ ("k", (int,)),
329
+ ("seed", (int,)),
330
+ ):
331
+ if param_name in params:
332
+ additional_params[param_name] = validate_parameter(
333
+ params, param_name, suitable_types, False, None, None, None
334
+ )
335
+
336
+ # For this release we will not support streaming as many models do not support streaming with tool use
337
+ if params.get("stream", False):
338
+ warnings.warn(
339
+ "Streaming is not currently supported, streaming will be disabled.",
340
+ UserWarning,
341
+ )
342
+
343
+ return base_params, additional_params
344
+
345
+ def create(self, params) -> ChatCompletion:
346
+ """Run Amazon Bedrock inference and return AG2 response"""
347
+ # Set custom client class settings
348
+ self.parse_custom_params(params)
349
+
350
+ # Parse the inference parameters
351
+ base_params, additional_params = self.parse_params(params)
352
+
353
+ # Handle response_format for structured outputs
354
+ has_response_format = params.get("response_format") is not None
355
+ if has_response_format:
356
+ self._response_format = params["response_format"]
357
+ structured_output_tool = self._create_structured_output_tool(params["response_format"])
358
+
359
+ # Merge with user tools if any
360
+ user_tools = params.get("tools", [])
361
+ tool_config = self._merge_tools_with_structured_output(user_tools, structured_output_tool)
362
+
363
+ # Force the structured output tool
364
+ tool_config["toolChoice"] = {"tool": {"name": "__structured_output"}}
365
+ has_tools = len(tool_config["tools"]) > 0
366
+ else:
367
+ has_tools = "tools" in params
368
+ tool_config = format_tools(params["tools"] if has_tools else [])
369
+ has_tools = len(tool_config["tools"]) > 0
370
+
371
+ messages = oai_messages_to_bedrock_messages(
372
+ params["messages"], has_tools or has_response_format, self._supports_system_prompts
373
+ )
374
+
375
+ if self._supports_system_prompts:
376
+ system_messages = extract_system_messages(params["messages"])
377
+
378
+ request_args = {"messages": messages, "modelId": self._model_id}
379
+
380
+ # Base and additional args
381
+ if len(base_params) > 0:
382
+ request_args["inferenceConfig"] = base_params
383
+
384
+ if len(additional_params) > 0:
385
+ request_args["additionalModelRequestFields"] = additional_params
386
+
387
+ if self._supports_system_prompts:
388
+ request_args["system"] = system_messages
389
+
390
+ if len(tool_config["tools"]) > 0:
391
+ request_args["toolConfig"] = tool_config
392
+
393
+ response = self.bedrock_runtime.converse(**request_args)
394
+ if response is None:
395
+ raise RuntimeError(f"Failed to get response from Bedrock after retrying {self._retries} times.")
396
+
397
+ finish_reason = convert_stop_reason_to_finish_reason(response["stopReason"])
398
+ response_message = response["output"]["message"]
399
+
400
+ tool_calls = format_tool_calls(response_message["content"]) if finish_reason == "tool_calls" else None
401
+
402
+ # Extract structured output if response_format was used
403
+ text = ""
404
+ if has_response_format and finish_reason == "tool_calls" and tool_calls:
405
+ structured_data = self._extract_structured_output_from_tool_call(tool_calls)
406
+ if structured_data:
407
+ text = self._validate_and_format_structured_output(structured_data)
408
+ else:
409
+ # Fallback: extract text content if tool call extraction failed
410
+ for content in response_message["content"]:
411
+ if "text" in content:
412
+ text = content["text"]
413
+ break
414
+ else:
415
+ # Normal text extraction
416
+ for content in response_message["content"]:
417
+ if "text" in content:
418
+ text = content["text"]
419
+ # NOTE: other types of output may be dealt with here
420
+ break
421
+
422
+ message = ChatCompletionMessage(role="assistant", content=text, tool_calls=tool_calls)
423
+
424
+ response_usage = response["usage"]
425
+ usage = CompletionUsage(
426
+ prompt_tokens=response_usage["inputTokens"],
427
+ completion_tokens=response_usage["outputTokens"],
428
+ total_tokens=response_usage["totalTokens"],
429
+ )
430
+
431
+ return ChatCompletion(
432
+ id=response["ResponseMetadata"]["RequestId"],
433
+ choices=[Choice(finish_reason=finish_reason, index=0, message=message)],
434
+ created=int(time.time()),
435
+ model=self._model_id,
436
+ object="chat.completion",
437
+ usage=usage,
438
+ )
439
+
440
+ def cost(self, response: ChatCompletion) -> float:
441
+ """Calculate the cost of the response."""
442
+ return calculate_cost(response.usage.prompt_tokens, response.usage.completion_tokens, response.model)
443
+
444
+ @staticmethod
445
+ def get_usage(response) -> dict:
446
+ """Get the usage of tokens and their cost information."""
447
+ return {
448
+ "prompt_tokens": response.usage.prompt_tokens,
449
+ "completion_tokens": response.usage.completion_tokens,
450
+ "total_tokens": response.usage.total_tokens,
451
+ "cost": response.cost,
452
+ "model": response.model,
453
+ }
454
+
455
+
456
+ def extract_system_messages(messages: list[dict[str, Any]]) -> list:
457
+ """Extract the system messages from the list of messages.
458
+
459
+ Args:
460
+ messages (list[dict[str, Any]]): List of messages.
461
+
462
+ Returns:
463
+ List[SystemMessage]: List of System messages.
464
+ """
465
+ """
466
+ system_messages = [message.get("content")[0]["text"] for message in messages if message.get("role") == "system"]
467
+ return system_messages # ''.join(system_messages)
468
+ """
469
+
470
+ for message in messages:
471
+ if message.get("role") == "system":
472
+ if isinstance(message["content"], str):
473
+ return [{"text": message.get("content")}]
474
+ else:
475
+ return [{"text": message.get("content")[0]["text"]}]
476
+ return []
477
+
478
+
479
+ def oai_messages_to_bedrock_messages(
480
+ messages: list[dict[str, Any]], has_tools: bool, supports_system_prompts: bool
481
+ ) -> list[dict[str, Any]]:
482
+ """Convert messages from OAI format to Bedrock format.
483
+ We correct for any specific role orders and types, etc.
484
+ AWS Bedrock requires messages to alternate between user and assistant roles. This function ensures that the messages
485
+ are in the correct order and format for Bedrock by inserting "Please continue" messages as needed.
486
+ This is the same method as the one in the Autogen Anthropic client
487
+ """
488
+ # Track whether we have tools passed in. If not, tool use / result messages should be converted to text messages.
489
+ # Bedrock requires a tools parameter with the tools listed, if there are other messages with tool use or tool results.
490
+ # This can occur when we don't need tool calling, such as for group chat speaker selection
491
+
492
+ # Convert messages to Bedrock compliant format
493
+
494
+ # Take out system messages if the model supports it, otherwise leave them in.
495
+ if supports_system_prompts:
496
+ messages = [x for x in messages if x["role"] != "system"]
497
+ else:
498
+ # Replace role="system" with role="user"
499
+ for msg in messages:
500
+ if msg["role"] == "system":
501
+ msg["role"] = "user"
502
+
503
+ processed_messages = []
504
+
505
+ # Used to interweave user messages to ensure user/assistant alternating
506
+ user_continue_message = {"content": [{"text": "Please continue."}], "role": "user"}
507
+ assistant_continue_message = {
508
+ "content": [{"text": "Please continue."}],
509
+ "role": "assistant",
510
+ }
511
+
512
+ tool_use_messages = 0
513
+ tool_result_messages = 0
514
+ last_tool_use_index = -1
515
+ last_tool_result_index = -1
516
+ # user_role_index = 0 if supports_system_prompts else 1 # If system prompts are supported, messages start with user, otherwise they'll be the second message
517
+ for message in messages:
518
+ # New messages will be added here, manage role alternations
519
+ expected_role = "user" if len(processed_messages) % 2 == 0 else "assistant"
520
+
521
+ if "tool_calls" in message:
522
+ # Map the tool call options to Bedrock's format
523
+ tool_uses = []
524
+ tool_names = []
525
+ for tool_call in message["tool_calls"]:
526
+ tool_uses.append({
527
+ "toolUse": {
528
+ "toolUseId": tool_call["id"],
529
+ "name": tool_call["function"]["name"],
530
+ "input": json.loads(tool_call["function"]["arguments"]),
531
+ }
532
+ })
533
+ if has_tools:
534
+ tool_use_messages += 1
535
+ tool_names.append(tool_call["function"]["name"])
536
+
537
+ if expected_role == "user":
538
+ # Insert an extra user message as we will append an assistant message
539
+ processed_messages.append(user_continue_message)
540
+
541
+ if has_tools:
542
+ processed_messages.append({"role": "assistant", "content": tool_uses})
543
+ last_tool_use_index = len(processed_messages) - 1
544
+ else:
545
+ # Not using tools, so put in a plain text message
546
+ processed_messages.append({
547
+ "role": "assistant",
548
+ "content": [{"text": f"Some internal function(s) that could be used: [{', '.join(tool_names)}]"}],
549
+ })
550
+ elif "tool_call_id" in message:
551
+ if has_tools:
552
+ # Map the tool usage call to tool_result for Bedrock
553
+ tool_result = {
554
+ "toolResult": {
555
+ "toolUseId": message["tool_call_id"],
556
+ "content": [{"text": message["content"]}],
557
+ }
558
+ }
559
+
560
+ # If the previous message also had a tool_result, add it to that
561
+ # Otherwise append a new message
562
+ if last_tool_result_index == len(processed_messages) - 1:
563
+ processed_messages[-1]["content"].append(tool_result)
564
+ else:
565
+ if expected_role == "assistant":
566
+ # Insert an extra assistant message as we will append a user message
567
+ processed_messages.append(assistant_continue_message)
568
+
569
+ processed_messages.append({"role": "user", "content": [tool_result]})
570
+ last_tool_result_index = len(processed_messages) - 1
571
+
572
+ tool_result_messages += 1
573
+ else:
574
+ # Not using tools, so put in a plain text message
575
+ processed_messages.append({
576
+ "role": "user",
577
+ "content": [{"text": f"Running the function returned: {message['content']}"}],
578
+ })
579
+ elif message["content"] == "":
580
+ # Ignoring empty messages
581
+ pass
582
+ else:
583
+ if expected_role != message["role"] and not (len(processed_messages) == 0 and message["role"] == "system"):
584
+ # Inserting the alternating continue message (ignore if it's the first message and a system message)
585
+ processed_messages.append(
586
+ user_continue_message if expected_role == "user" else assistant_continue_message
587
+ )
588
+
589
+ processed_messages.append({
590
+ "role": message["role"],
591
+ "content": parse_content_parts(message=message),
592
+ })
593
+
594
+ # We'll replace the last tool_use if there's no tool_result (occurs if we finish the conversation before running the function)
595
+ if has_tools and tool_use_messages != tool_result_messages:
596
+ processed_messages[last_tool_use_index] = assistant_continue_message
597
+
598
+ # name is not a valid field on messages
599
+ for message in processed_messages:
600
+ if "name" in message:
601
+ message.pop("name", None)
602
+
603
+ # Note: When using reflection_with_llm we may end up with an "assistant" message as the last message and that may cause a blank response
604
+ # So, if the last role is not user, add a 'user' continue message at the end
605
+ if processed_messages[-1]["role"] != "user":
606
+ processed_messages.append(user_continue_message)
607
+
608
+ return processed_messages
609
+
610
+
611
+ def parse_content_parts(
612
+ message: dict[str, Any],
613
+ ) -> list[dict[str, Any]]:
614
+ content: str | list[dict[str, Any]] = message.get("content")
615
+ if isinstance(content, str):
616
+ return [
617
+ {
618
+ "text": content,
619
+ }
620
+ ]
621
+ content_parts = []
622
+ for part in content:
623
+ # part_content: Dict = part.get("content")
624
+ if "text" in part: # part_content:
625
+ content_parts.append({
626
+ "text": part.get("text"),
627
+ })
628
+ elif "image_url" in part: # part_content:
629
+ image_data, content_type = parse_image(part.get("image_url").get("url"))
630
+ content_parts.append({
631
+ "image": {
632
+ "format": content_type[6:], # image/
633
+ "source": {"bytes": image_data},
634
+ },
635
+ })
636
+ else:
637
+ # Ignore..
638
+ continue
639
+ return content_parts
640
+
641
+
642
+ def parse_image(image_url: str) -> tuple[bytes, str]:
643
+ """Try to get the raw data from an image url.
644
+
645
+ Ref: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ImageSource.html
646
+ returns a tuple of (Image Data, Content Type)
647
+ """
648
+ pattern = r"^data:(image/[a-z]*);base64,\s*"
649
+ content_type = re.search(pattern, image_url)
650
+ # if already base64 encoded.
651
+ # Only supports 'image/jpeg', 'image/png', 'image/gif' or 'image/webp'
652
+ if content_type:
653
+ image_data = re.sub(pattern, "", image_url)
654
+ return base64.b64decode(image_data), content_type.group(1)
655
+
656
+ # Send a request to the image URL
657
+ response = requests.get(image_url)
658
+ # Check if the request was successful
659
+ if response.status_code == 200:
660
+ content_type = response.headers.get("Content-Type")
661
+ if not content_type.startswith("image"):
662
+ content_type = "image/jpeg"
663
+ # Get the image content
664
+ image_content = response.content
665
+ return image_content, content_type
666
+ else:
667
+ raise RuntimeError("Unable to access the image url")
668
+
669
+
670
+ def format_tools(tools: list[dict[str, Any]]) -> dict[Literal["tools"], list[dict[str, Any]]]:
671
+ converted_schema = {"tools": []}
672
+
673
+ for tool in tools:
674
+ if tool["type"] == "function":
675
+ function = tool["function"]
676
+ converted_tool = {
677
+ "toolSpec": {
678
+ "name": function["name"],
679
+ "description": function["description"],
680
+ "inputSchema": {"json": {"type": "object", "properties": {}, "required": []}},
681
+ }
682
+ }
683
+
684
+ for prop_name, prop_details in function["parameters"]["properties"].items():
685
+ if not isinstance(prop_details, dict):
686
+ raise TypeError(f"Property '{prop_name}' schema must be a dict, got {type(prop_details)!r}")
687
+
688
+ prop_schema: dict[str, Any] = {"description": prop_details.get("description", "")}
689
+
690
+ for key in (
691
+ "type",
692
+ "enum",
693
+ "default",
694
+ "anyOf",
695
+ "oneOf",
696
+ "allOf",
697
+ "items",
698
+ "const",
699
+ "format",
700
+ "minimum",
701
+ "maximum",
702
+ "minItems",
703
+ "maxItems",
704
+ "minLength",
705
+ "maxLength",
706
+ "pattern",
707
+ "additionalProperties",
708
+ ):
709
+ if key in prop_details:
710
+ prop_schema[key] = prop_details[key]
711
+
712
+ converted_tool["toolSpec"]["inputSchema"]["json"]["properties"][prop_name] = prop_schema
713
+ if "enum" in prop_details:
714
+ converted_tool["toolSpec"]["inputSchema"]["json"]["properties"][prop_name]["enum"] = prop_details[
715
+ "enum"
716
+ ]
717
+ if "default" in prop_details:
718
+ converted_tool["toolSpec"]["inputSchema"]["json"]["properties"][prop_name]["default"] = (
719
+ prop_details["default"]
720
+ )
721
+
722
+ if "required" in function["parameters"]:
723
+ converted_tool["toolSpec"]["inputSchema"]["json"]["required"] = function["parameters"]["required"]
724
+
725
+ converted_schema["tools"].append(converted_tool)
726
+
727
+ return converted_schema
728
+
729
+
730
+ def format_tool_calls(content):
731
+ """Converts Converse API response tool calls to AG2 format"""
732
+ tool_calls = []
733
+ for tool_request in content:
734
+ if "toolUse" in tool_request:
735
+ tool = tool_request["toolUse"]
736
+
737
+ tool_calls.append(
738
+ ChatCompletionMessageToolCall(
739
+ id=tool["toolUseId"],
740
+ function={
741
+ "name": tool["name"],
742
+ "arguments": json.dumps(tool["input"]),
743
+ },
744
+ type="function",
745
+ )
746
+ )
747
+ return tool_calls
748
+
749
+
750
+ def convert_stop_reason_to_finish_reason(
751
+ stop_reason: str,
752
+ ) -> Literal["stop", "length", "tool_calls", "content_filter"]:
753
+ """Converts Bedrock finish reasons to our finish reasons, according to OpenAI:
754
+
755
+ - stop: if the model hit a natural stop point or a provided stop sequence,
756
+ - length: if the maximum number of tokens specified in the request was reached,
757
+ - content_filter: if content was omitted due to a flag from our content filters,
758
+ - tool_calls: if the model called a tool
759
+ """
760
+ if stop_reason:
761
+ finish_reason_mapping = {
762
+ "tool_use": "tool_calls",
763
+ "finished": "stop",
764
+ "end_turn": "stop",
765
+ "max_tokens": "length",
766
+ "stop_sequence": "stop",
767
+ "complete": "stop",
768
+ "content_filtered": "content_filter",
769
+ }
770
+ return finish_reason_mapping.get(stop_reason.lower(), stop_reason.lower())
771
+
772
+ warnings.warn(f"Unsupported stop reason: {stop_reason}", UserWarning)
773
+ return None
774
+
775
+
776
+ # NOTE: As this will be quite dynamic, it's expected that the developer will use the "price" parameter in their config
777
+ # These may be removed.
778
+ PRICES_PER_K_TOKENS = {
779
+ "meta.llama3-8b-instruct-v1:0": (0.0003, 0.0006),
780
+ "meta.llama3-70b-instruct-v1:0": (0.00265, 0.0035),
781
+ "mistral.mistral-7b-instruct-v0:2": (0.00015, 0.0002),
782
+ "mistral.mixtral-8x7b-instruct-v0:1": (0.00045, 0.0007),
783
+ "mistral.mistral-large-2402-v1:0": (0.004, 0.012),
784
+ "mistral.mistral-small-2402-v1:0": (0.001, 0.003),
785
+ }
786
+
787
+
788
+ def calculate_cost(input_tokens: int, output_tokens: int, model_id: str) -> float:
789
+ """Calculate the cost of the completion using the Bedrock pricing."""
790
+ if model_id in PRICES_PER_K_TOKENS:
791
+ input_cost_per_k, output_cost_per_k = PRICES_PER_K_TOKENS[model_id]
792
+ input_cost = (input_tokens / 1000) * input_cost_per_k
793
+ output_cost = (output_tokens / 1000) * output_cost_per_k
794
+ return input_cost + output_cost
795
+ else:
796
+ warnings.warn(
797
+ f'Cannot get the costs for {model_id}. The cost will be 0. In your config_list, add field {{"price" : [prompt_price_per_1k, completion_token_price_per_1k]}} for customized pricing.',
798
+ UserWarning,
799
+ )
800
+ return 0