ag2 0.9.1a1__py3-none-any.whl → 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (371) hide show
  1. {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/METADATA +272 -75
  2. ag2-0.9.2.dist-info/RECORD +406 -0
  3. {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/WHEEL +1 -2
  4. autogen/__init__.py +89 -0
  5. autogen/_website/__init__.py +3 -0
  6. autogen/_website/generate_api_references.py +427 -0
  7. autogen/_website/generate_mkdocs.py +1174 -0
  8. autogen/_website/notebook_processor.py +476 -0
  9. autogen/_website/process_notebooks.py +656 -0
  10. autogen/_website/utils.py +412 -0
  11. autogen/agentchat/__init__.py +44 -0
  12. autogen/agentchat/agent.py +182 -0
  13. autogen/agentchat/assistant_agent.py +85 -0
  14. autogen/agentchat/chat.py +309 -0
  15. autogen/agentchat/contrib/__init__.py +5 -0
  16. autogen/agentchat/contrib/agent_eval/README.md +7 -0
  17. autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
  18. autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
  19. autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
  20. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
  21. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
  22. autogen/agentchat/contrib/agent_eval/task.py +42 -0
  23. autogen/agentchat/contrib/agent_optimizer.py +429 -0
  24. autogen/agentchat/contrib/capabilities/__init__.py +5 -0
  25. autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
  26. autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
  27. autogen/agentchat/contrib/capabilities/teachability.py +393 -0
  28. autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
  29. autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
  30. autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
  31. autogen/agentchat/contrib/capabilities/transforms.py +566 -0
  32. autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
  33. autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
  34. autogen/agentchat/contrib/captainagent/__init__.py +9 -0
  35. autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
  36. autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
  37. autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
  38. autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
  39. autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
  40. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
  41. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
  42. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
  43. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
  44. autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
  45. autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
  46. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
  47. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
  48. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
  49. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
  50. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
  51. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
  52. autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
  53. autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
  54. autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
  55. autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
  56. autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
  57. autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
  58. autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
  59. autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
  60. autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
  61. autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
  62. autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
  63. autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
  64. autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
  65. autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
  66. autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
  67. autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
  68. autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
  69. autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
  70. autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
  71. autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
  72. autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
  73. autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
  74. autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
  75. autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
  76. autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
  77. autogen/agentchat/contrib/graph_rag/document.py +29 -0
  78. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
  79. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
  80. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
  81. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
  82. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
  83. autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
  84. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
  85. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
  86. autogen/agentchat/contrib/img_utils.py +397 -0
  87. autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
  88. autogen/agentchat/contrib/llava_agent.py +187 -0
  89. autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
  90. autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
  91. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
  92. autogen/agentchat/contrib/rag/__init__.py +10 -0
  93. autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
  94. autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
  95. autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
  96. autogen/agentchat/contrib/rag/query_engine.py +74 -0
  97. autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
  98. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
  99. autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
  100. autogen/agentchat/contrib/swarm_agent.py +1425 -0
  101. autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
  102. autogen/agentchat/contrib/vectordb/__init__.py +5 -0
  103. autogen/agentchat/contrib/vectordb/base.py +232 -0
  104. autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
  105. autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
  106. autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
  107. autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
  108. autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
  109. autogen/agentchat/contrib/vectordb/utils.py +126 -0
  110. autogen/agentchat/contrib/web_surfer.py +303 -0
  111. autogen/agentchat/conversable_agent.py +4023 -0
  112. autogen/agentchat/group/__init__.py +64 -0
  113. autogen/agentchat/group/available_condition.py +91 -0
  114. autogen/agentchat/group/context_condition.py +77 -0
  115. autogen/agentchat/group/context_expression.py +238 -0
  116. autogen/agentchat/group/context_str.py +41 -0
  117. autogen/agentchat/group/context_variables.py +192 -0
  118. autogen/agentchat/group/group_tool_executor.py +202 -0
  119. autogen/agentchat/group/group_utils.py +591 -0
  120. autogen/agentchat/group/handoffs.py +244 -0
  121. autogen/agentchat/group/llm_condition.py +93 -0
  122. autogen/agentchat/group/multi_agent_chat.py +237 -0
  123. autogen/agentchat/group/on_condition.py +58 -0
  124. autogen/agentchat/group/on_context_condition.py +54 -0
  125. autogen/agentchat/group/patterns/__init__.py +18 -0
  126. autogen/agentchat/group/patterns/auto.py +159 -0
  127. autogen/agentchat/group/patterns/manual.py +176 -0
  128. autogen/agentchat/group/patterns/pattern.py +288 -0
  129. autogen/agentchat/group/patterns/random.py +106 -0
  130. autogen/agentchat/group/patterns/round_robin.py +117 -0
  131. autogen/agentchat/group/reply_result.py +26 -0
  132. autogen/agentchat/group/speaker_selection_result.py +41 -0
  133. autogen/agentchat/group/targets/__init__.py +4 -0
  134. autogen/agentchat/group/targets/group_chat_target.py +132 -0
  135. autogen/agentchat/group/targets/group_manager_target.py +151 -0
  136. autogen/agentchat/group/targets/transition_target.py +413 -0
  137. autogen/agentchat/group/targets/transition_utils.py +6 -0
  138. autogen/agentchat/groupchat.py +1694 -0
  139. autogen/agentchat/realtime/__init__.py +3 -0
  140. autogen/agentchat/realtime/experimental/__init__.py +20 -0
  141. autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
  142. autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
  143. autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
  144. autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
  145. autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
  146. autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
  147. autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
  148. autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
  149. autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
  150. autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
  151. autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
  152. autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
  153. autogen/agentchat/realtime/experimental/function_observer.py +85 -0
  154. autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
  155. autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
  156. autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
  157. autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
  158. autogen/agentchat/realtime/experimental/websockets.py +21 -0
  159. autogen/agentchat/realtime_agent/__init__.py +21 -0
  160. autogen/agentchat/user_proxy_agent.py +111 -0
  161. autogen/agentchat/utils.py +206 -0
  162. autogen/agents/__init__.py +3 -0
  163. autogen/agents/contrib/__init__.py +10 -0
  164. autogen/agents/contrib/time/__init__.py +8 -0
  165. autogen/agents/contrib/time/time_reply_agent.py +73 -0
  166. autogen/agents/contrib/time/time_tool_agent.py +51 -0
  167. autogen/agents/experimental/__init__.py +27 -0
  168. autogen/agents/experimental/deep_research/__init__.py +7 -0
  169. autogen/agents/experimental/deep_research/deep_research.py +52 -0
  170. autogen/agents/experimental/discord/__init__.py +7 -0
  171. autogen/agents/experimental/discord/discord.py +66 -0
  172. autogen/agents/experimental/document_agent/__init__.py +19 -0
  173. autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
  174. autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
  175. autogen/agents/experimental/document_agent/document_agent.py +461 -0
  176. autogen/agents/experimental/document_agent/document_conditions.py +50 -0
  177. autogen/agents/experimental/document_agent/document_utils.py +380 -0
  178. autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
  179. autogen/agents/experimental/document_agent/parser_utils.py +130 -0
  180. autogen/agents/experimental/document_agent/url_utils.py +426 -0
  181. autogen/agents/experimental/reasoning/__init__.py +7 -0
  182. autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
  183. autogen/agents/experimental/slack/__init__.py +7 -0
  184. autogen/agents/experimental/slack/slack.py +73 -0
  185. autogen/agents/experimental/telegram/__init__.py +7 -0
  186. autogen/agents/experimental/telegram/telegram.py +77 -0
  187. autogen/agents/experimental/websurfer/__init__.py +7 -0
  188. autogen/agents/experimental/websurfer/websurfer.py +62 -0
  189. autogen/agents/experimental/wikipedia/__init__.py +7 -0
  190. autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
  191. autogen/browser_utils.py +309 -0
  192. autogen/cache/__init__.py +10 -0
  193. autogen/cache/abstract_cache_base.py +75 -0
  194. autogen/cache/cache.py +203 -0
  195. autogen/cache/cache_factory.py +88 -0
  196. autogen/cache/cosmos_db_cache.py +144 -0
  197. autogen/cache/disk_cache.py +102 -0
  198. autogen/cache/in_memory_cache.py +58 -0
  199. autogen/cache/redis_cache.py +123 -0
  200. autogen/code_utils.py +596 -0
  201. autogen/coding/__init__.py +22 -0
  202. autogen/coding/base.py +119 -0
  203. autogen/coding/docker_commandline_code_executor.py +268 -0
  204. autogen/coding/factory.py +47 -0
  205. autogen/coding/func_with_reqs.py +202 -0
  206. autogen/coding/jupyter/__init__.py +23 -0
  207. autogen/coding/jupyter/base.py +36 -0
  208. autogen/coding/jupyter/docker_jupyter_server.py +167 -0
  209. autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
  210. autogen/coding/jupyter/import_utils.py +82 -0
  211. autogen/coding/jupyter/jupyter_client.py +231 -0
  212. autogen/coding/jupyter/jupyter_code_executor.py +160 -0
  213. autogen/coding/jupyter/local_jupyter_server.py +172 -0
  214. autogen/coding/local_commandline_code_executor.py +405 -0
  215. autogen/coding/markdown_code_extractor.py +45 -0
  216. autogen/coding/utils.py +56 -0
  217. autogen/doc_utils.py +34 -0
  218. autogen/events/__init__.py +7 -0
  219. autogen/events/agent_events.py +1013 -0
  220. autogen/events/base_event.py +99 -0
  221. autogen/events/client_events.py +167 -0
  222. autogen/events/helpers.py +36 -0
  223. autogen/events/print_event.py +46 -0
  224. autogen/exception_utils.py +73 -0
  225. autogen/extensions/__init__.py +5 -0
  226. autogen/fast_depends/__init__.py +16 -0
  227. autogen/fast_depends/_compat.py +80 -0
  228. autogen/fast_depends/core/__init__.py +14 -0
  229. autogen/fast_depends/core/build.py +225 -0
  230. autogen/fast_depends/core/model.py +576 -0
  231. autogen/fast_depends/dependencies/__init__.py +15 -0
  232. autogen/fast_depends/dependencies/model.py +29 -0
  233. autogen/fast_depends/dependencies/provider.py +39 -0
  234. autogen/fast_depends/library/__init__.py +10 -0
  235. autogen/fast_depends/library/model.py +46 -0
  236. autogen/fast_depends/py.typed +6 -0
  237. autogen/fast_depends/schema.py +66 -0
  238. autogen/fast_depends/use.py +280 -0
  239. autogen/fast_depends/utils.py +187 -0
  240. autogen/formatting_utils.py +83 -0
  241. autogen/function_utils.py +13 -0
  242. autogen/graph_utils.py +178 -0
  243. autogen/import_utils.py +526 -0
  244. autogen/interop/__init__.py +22 -0
  245. autogen/interop/crewai/__init__.py +7 -0
  246. autogen/interop/crewai/crewai.py +88 -0
  247. autogen/interop/interoperability.py +71 -0
  248. autogen/interop/interoperable.py +46 -0
  249. autogen/interop/langchain/__init__.py +8 -0
  250. autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
  251. autogen/interop/langchain/langchain_tool.py +82 -0
  252. autogen/interop/litellm/__init__.py +7 -0
  253. autogen/interop/litellm/litellm_config_factory.py +179 -0
  254. autogen/interop/pydantic_ai/__init__.py +7 -0
  255. autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
  256. autogen/interop/registry.py +69 -0
  257. autogen/io/__init__.py +15 -0
  258. autogen/io/base.py +151 -0
  259. autogen/io/console.py +56 -0
  260. autogen/io/processors/__init__.py +12 -0
  261. autogen/io/processors/base.py +21 -0
  262. autogen/io/processors/console_event_processor.py +56 -0
  263. autogen/io/run_response.py +293 -0
  264. autogen/io/thread_io_stream.py +63 -0
  265. autogen/io/websockets.py +213 -0
  266. autogen/json_utils.py +43 -0
  267. autogen/llm_config.py +382 -0
  268. autogen/logger/__init__.py +11 -0
  269. autogen/logger/base_logger.py +128 -0
  270. autogen/logger/file_logger.py +261 -0
  271. autogen/logger/logger_factory.py +42 -0
  272. autogen/logger/logger_utils.py +57 -0
  273. autogen/logger/sqlite_logger.py +523 -0
  274. autogen/math_utils.py +339 -0
  275. autogen/mcp/__init__.py +7 -0
  276. autogen/mcp/__main__.py +78 -0
  277. autogen/mcp/mcp_client.py +208 -0
  278. autogen/mcp/mcp_proxy/__init__.py +19 -0
  279. autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +63 -0
  280. autogen/mcp/mcp_proxy/mcp_proxy.py +581 -0
  281. autogen/mcp/mcp_proxy/operation_grouping.py +158 -0
  282. autogen/mcp/mcp_proxy/operation_renaming.py +114 -0
  283. autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
  284. autogen/mcp/mcp_proxy/security.py +400 -0
  285. autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
  286. autogen/messages/__init__.py +7 -0
  287. autogen/messages/agent_messages.py +948 -0
  288. autogen/messages/base_message.py +107 -0
  289. autogen/messages/client_messages.py +171 -0
  290. autogen/messages/print_message.py +49 -0
  291. autogen/oai/__init__.py +53 -0
  292. autogen/oai/anthropic.py +714 -0
  293. autogen/oai/bedrock.py +628 -0
  294. autogen/oai/cerebras.py +299 -0
  295. autogen/oai/client.py +1444 -0
  296. autogen/oai/client_utils.py +169 -0
  297. autogen/oai/cohere.py +479 -0
  298. autogen/oai/gemini.py +998 -0
  299. autogen/oai/gemini_types.py +155 -0
  300. autogen/oai/groq.py +305 -0
  301. autogen/oai/mistral.py +303 -0
  302. autogen/oai/oai_models/__init__.py +11 -0
  303. autogen/oai/oai_models/_models.py +16 -0
  304. autogen/oai/oai_models/chat_completion.py +87 -0
  305. autogen/oai/oai_models/chat_completion_audio.py +32 -0
  306. autogen/oai/oai_models/chat_completion_message.py +86 -0
  307. autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
  308. autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
  309. autogen/oai/oai_models/completion_usage.py +60 -0
  310. autogen/oai/ollama.py +643 -0
  311. autogen/oai/openai_utils.py +881 -0
  312. autogen/oai/together.py +370 -0
  313. autogen/retrieve_utils.py +491 -0
  314. autogen/runtime_logging.py +160 -0
  315. autogen/token_count_utils.py +267 -0
  316. autogen/tools/__init__.py +20 -0
  317. autogen/tools/contrib/__init__.py +9 -0
  318. autogen/tools/contrib/time/__init__.py +7 -0
  319. autogen/tools/contrib/time/time.py +41 -0
  320. autogen/tools/dependency_injection.py +254 -0
  321. autogen/tools/experimental/__init__.py +48 -0
  322. autogen/tools/experimental/browser_use/__init__.py +7 -0
  323. autogen/tools/experimental/browser_use/browser_use.py +161 -0
  324. autogen/tools/experimental/crawl4ai/__init__.py +7 -0
  325. autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
  326. autogen/tools/experimental/deep_research/__init__.py +7 -0
  327. autogen/tools/experimental/deep_research/deep_research.py +328 -0
  328. autogen/tools/experimental/duckduckgo/__init__.py +7 -0
  329. autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
  330. autogen/tools/experimental/google/__init__.py +14 -0
  331. autogen/tools/experimental/google/authentication/__init__.py +11 -0
  332. autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
  333. autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
  334. autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
  335. autogen/tools/experimental/google/drive/__init__.py +9 -0
  336. autogen/tools/experimental/google/drive/drive_functions.py +124 -0
  337. autogen/tools/experimental/google/drive/toolkit.py +88 -0
  338. autogen/tools/experimental/google/model.py +17 -0
  339. autogen/tools/experimental/google/toolkit_protocol.py +19 -0
  340. autogen/tools/experimental/google_search/__init__.py +8 -0
  341. autogen/tools/experimental/google_search/google_search.py +93 -0
  342. autogen/tools/experimental/google_search/youtube_search.py +181 -0
  343. autogen/tools/experimental/messageplatform/__init__.py +17 -0
  344. autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
  345. autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
  346. autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
  347. autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
  348. autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
  349. autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
  350. autogen/tools/experimental/perplexity/__init__.py +7 -0
  351. autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
  352. autogen/tools/experimental/reliable/__init__.py +10 -0
  353. autogen/tools/experimental/reliable/reliable.py +1316 -0
  354. autogen/tools/experimental/tavily/__init__.py +7 -0
  355. autogen/tools/experimental/tavily/tavily_search.py +183 -0
  356. autogen/tools/experimental/web_search_preview/__init__.py +7 -0
  357. autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
  358. autogen/tools/experimental/wikipedia/__init__.py +7 -0
  359. autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
  360. autogen/tools/function_utils.py +411 -0
  361. autogen/tools/tool.py +187 -0
  362. autogen/tools/toolkit.py +86 -0
  363. autogen/types.py +29 -0
  364. autogen/version.py +7 -0
  365. templates/client_template/main.jinja2 +69 -0
  366. templates/config_template/config.jinja2 +7 -0
  367. templates/main.jinja2 +61 -0
  368. ag2-0.9.1a1.dist-info/RECORD +0 -6
  369. ag2-0.9.1a1.dist-info/top_level.txt +0 -1
  370. {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/LICENSE +0 -0
  371. {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/NOTICE.md +0 -0
@@ -0,0 +1,714 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ #
5
+ # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
+ # SPDX-License-Identifier: MIT
7
+ """Create an OpenAI-compatible client for the Anthropic API.
8
+
9
+ Example usage:
10
+ Install the `anthropic` package by running `pip install --upgrade anthropic`.
11
+ - https://docs.anthropic.com/en/docs/quickstart-guide
12
+
13
+ ```python
14
+ import autogen
15
+
16
+ config_list = [
17
+ {
18
+ "model": "claude-3-sonnet-20240229",
19
+ "api_key": os.getenv("ANTHROPIC_API_KEY"),
20
+ "api_type": "anthropic",
21
+ }
22
+ ]
23
+
24
+ assistant = autogen.AssistantAgent("assistant", llm_config={"config_list": config_list})
25
+ ```
26
+
27
+ Example usage for Anthropic Bedrock:
28
+
29
+ Install the `anthropic` package by running `pip install --upgrade anthropic`.
30
+ - https://docs.anthropic.com/en/docs/quickstart-guide
31
+
32
+ ```python
33
+ import autogen
34
+
35
+ config_list = [
36
+ {
37
+ "model": "anthropic.claude-3-5-sonnet-20240620-v1:0",
38
+ "aws_access_key":<accessKey>,
39
+ "aws_secret_key":<secretKey>,
40
+ "aws_session_token":<sessionTok>,
41
+ "aws_region":"us-east-1",
42
+ "api_type": "anthropic",
43
+ }
44
+ ]
45
+
46
+ assistant = autogen.AssistantAgent("assistant", llm_config={"config_list": config_list})
47
+ ```
48
+
49
+ Example usage for Anthropic VertexAI:
50
+
51
+ Install the `anthropic` package by running `pip install anthropic[vertex]`.
52
+ - https://docs.anthropic.com/en/docs/quickstart-guide
53
+
54
+ ```python
55
+
56
+ import autogen
57
+ config_list = [
58
+ {
59
+ "model": "claude-3-5-sonnet-20240620-v1:0",
60
+ "gcp_project_id": "dummy_project_id",
61
+ "gcp_region": "us-west-2",
62
+ "gcp_auth_token": "dummy_auth_token",
63
+ "api_type": "anthropic",
64
+ }
65
+ ]
66
+
67
+ assistant = autogen.AssistantAgent("assistant", llm_config={"config_list": config_list})
68
+ ```python
69
+ """
70
+
71
+ from __future__ import annotations
72
+
73
+ import inspect
74
+ import json
75
+ import os
76
+ import re
77
+ import time
78
+ import warnings
79
+ from typing import Any, Literal, Optional, Union
80
+
81
+ from pydantic import BaseModel, Field
82
+
83
+ from ..import_utils import optional_import_block, require_optional_import
84
+ from ..llm_config import LLMConfigEntry, register_llm_config
85
+ from .client_utils import FormatterProtocol, validate_parameter
86
+ from .oai_models import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall, Choice, CompletionUsage
87
+
88
+ with optional_import_block():
89
+ from anthropic import Anthropic, AnthropicBedrock, AnthropicVertex
90
+ from anthropic import __version__ as anthropic_version
91
+ from anthropic.types import Message, TextBlock, ToolUseBlock
92
+
93
+ TOOL_ENABLED = anthropic_version >= "0.23.1"
94
+ if TOOL_ENABLED:
95
+ pass
96
+
97
+
98
+ ANTHROPIC_PRICING_1k = {
99
+ "claude-3-7-sonnet-20250219": (0.003, 0.015),
100
+ "claude-3-5-sonnet-20241022": (0.003, 0.015),
101
+ "claude-3-5-haiku-20241022": (0.0008, 0.004),
102
+ "claude-3-5-sonnet-20240620": (0.003, 0.015),
103
+ "claude-3-sonnet-20240229": (0.003, 0.015),
104
+ "claude-3-opus-20240229": (0.015, 0.075),
105
+ "claude-3-haiku-20240307": (0.00025, 0.00125),
106
+ "claude-2.1": (0.008, 0.024),
107
+ "claude-2.0": (0.008, 0.024),
108
+ "claude-instant-1.2": (0.008, 0.024),
109
+ }
110
+
111
+
112
+ @register_llm_config
113
+ class AnthropicLLMConfigEntry(LLMConfigEntry):
114
+ api_type: Literal["anthropic"] = "anthropic"
115
+ timeout: Optional[int] = Field(default=None, ge=1)
116
+ temperature: float = Field(default=1.0, ge=0.0, le=1.0)
117
+ top_k: Optional[int] = Field(default=None, ge=1)
118
+ top_p: Optional[float] = Field(default=None, ge=0.0, le=1.0)
119
+ stop_sequences: Optional[list[str]] = None
120
+ stream: bool = False
121
+ max_tokens: int = Field(default=4096, ge=1)
122
+ price: Optional[list[float]] = Field(default=None, min_length=2, max_length=2)
123
+ tool_choice: Optional[dict] = None
124
+ thinking: Optional[dict] = None
125
+
126
+ gcp_project_id: Optional[str] = None
127
+ gcp_region: Optional[str] = None
128
+ gcp_auth_token: Optional[str] = None
129
+
130
+ def create_client(self):
131
+ raise NotImplementedError("AnthropicLLMConfigEntry.create_client is not implemented.")
132
+
133
+
134
+ @require_optional_import("anthropic", "anthropic")
135
+ class AnthropicClient:
136
+ def __init__(self, **kwargs: Any):
137
+ """Initialize the Anthropic API client.
138
+
139
+ Args:
140
+ **kwargs: The configuration parameters for the client.
141
+ """
142
+ self._api_key = kwargs.get("api_key")
143
+ self._aws_access_key = kwargs.get("aws_access_key")
144
+ self._aws_secret_key = kwargs.get("aws_secret_key")
145
+ self._aws_session_token = kwargs.get("aws_session_token")
146
+ self._aws_region = kwargs.get("aws_region")
147
+ self._gcp_project_id = kwargs.get("gcp_project_id")
148
+ self._gcp_region = kwargs.get("gcp_region")
149
+ self._gcp_auth_token = kwargs.get("gcp_auth_token")
150
+ self._base_url = kwargs.get("base_url")
151
+
152
+ if not self._api_key:
153
+ self._api_key = os.getenv("ANTHROPIC_API_KEY")
154
+
155
+ if not self._aws_access_key:
156
+ self._aws_access_key = os.getenv("AWS_ACCESS_KEY")
157
+
158
+ if not self._aws_secret_key:
159
+ self._aws_secret_key = os.getenv("AWS_SECRET_KEY")
160
+
161
+ if not self._aws_region:
162
+ self._aws_region = os.getenv("AWS_REGION")
163
+
164
+ if not self._gcp_region:
165
+ self._gcp_region = os.getenv("GCP_REGION")
166
+
167
+ if self._api_key is None:
168
+ if self._aws_region:
169
+ if self._aws_access_key is None or self._aws_secret_key is None:
170
+ raise ValueError("API key or AWS credentials are required to use the Anthropic API.")
171
+ elif self._gcp_region:
172
+ if self._gcp_project_id is None or self._gcp_region is None:
173
+ raise ValueError("API key or GCP credentials are required to use the Anthropic API.")
174
+ else:
175
+ raise ValueError("API key or AWS credentials or GCP credentials are required to use the Anthropic API.")
176
+
177
+ if self._api_key is not None:
178
+ client_kwargs = {"api_key": self._api_key}
179
+ if self._base_url:
180
+ client_kwargs["base_url"] = self._base_url
181
+ self._client = Anthropic(**client_kwargs)
182
+ elif self._gcp_region is not None:
183
+ kw = {}
184
+ for i, p in enumerate(inspect.signature(AnthropicVertex).parameters):
185
+ if hasattr(self, f"_gcp_{p}"):
186
+ kw[p] = getattr(self, f"_gcp_{p}")
187
+ if self._base_url:
188
+ kw["base_url"] = self._base_url
189
+ self._client = AnthropicVertex(**kw)
190
+ else:
191
+ client_kwargs = {
192
+ "aws_access_key": self._aws_access_key,
193
+ "aws_secret_key": self._aws_secret_key,
194
+ "aws_session_token": self._aws_session_token,
195
+ "aws_region": self._aws_region,
196
+ }
197
+ if self._base_url:
198
+ client_kwargs["base_url"] = self._base_url
199
+ self._client = AnthropicBedrock(**client_kwargs)
200
+
201
+ self._last_tooluse_status = {}
202
+
203
+ # Store the response format, if provided (for structured outputs)
204
+ self._response_format: Optional[type[BaseModel]] = None
205
+
206
+ def load_config(self, params: dict[str, Any]):
207
+ """Load the configuration for the Anthropic API client."""
208
+ anthropic_params = {}
209
+
210
+ anthropic_params["model"] = params.get("model")
211
+ assert anthropic_params["model"], "Please provide a `model` in the config_list to use the Anthropic API."
212
+
213
+ anthropic_params["temperature"] = validate_parameter(
214
+ params, "temperature", (float, int), False, 1.0, (0.0, 1.0), None
215
+ )
216
+ anthropic_params["max_tokens"] = validate_parameter(params, "max_tokens", int, False, 4096, (1, None), None)
217
+ anthropic_params["timeout"] = validate_parameter(params, "timeout", int, True, None, (1, None), None)
218
+ anthropic_params["top_k"] = validate_parameter(params, "top_k", int, True, None, (1, None), None)
219
+ anthropic_params["top_p"] = validate_parameter(params, "top_p", (float, int), True, None, (0.0, 1.0), None)
220
+ anthropic_params["stop_sequences"] = validate_parameter(params, "stop_sequences", list, True, None, None, None)
221
+ anthropic_params["stream"] = validate_parameter(params, "stream", bool, False, False, None, None)
222
+ if "thinking" in params:
223
+ anthropic_params["thinking"] = params["thinking"]
224
+
225
+ if anthropic_params["stream"]:
226
+ warnings.warn(
227
+ "Streaming is not currently supported, streaming will be disabled.",
228
+ UserWarning,
229
+ )
230
+ anthropic_params["stream"] = False
231
+
232
+ # Note the Anthropic API supports "tool" for tool_choice but you must specify the tool name so we will ignore that here
233
+ # Dictionary, see options here: https://docs.anthropic.com/en/docs/build-with-claude/tool-use/overview#controlling-claudes-output
234
+ # type = auto, any, tool, none | name = the name of the tool if type=tool
235
+ anthropic_params["tool_choice"] = validate_parameter(params, "tool_choice", dict, True, None, None, None)
236
+
237
+ return anthropic_params
238
+
239
+ def cost(self, response) -> float:
240
+ """Calculate the cost of the completion using the Anthropic pricing."""
241
+ return response.cost
242
+
243
+ @property
244
+ def api_key(self):
245
+ return self._api_key
246
+
247
+ @property
248
+ def aws_access_key(self):
249
+ return self._aws_access_key
250
+
251
+ @property
252
+ def aws_secret_key(self):
253
+ return self._aws_secret_key
254
+
255
+ @property
256
+ def aws_session_token(self):
257
+ return self._aws_session_token
258
+
259
+ @property
260
+ def aws_region(self):
261
+ return self._aws_region
262
+
263
+ @property
264
+ def gcp_project_id(self):
265
+ return self._gcp_project_id
266
+
267
+ @property
268
+ def gcp_region(self):
269
+ return self._gcp_region
270
+
271
+ @property
272
+ def gcp_auth_token(self):
273
+ return self._gcp_auth_token
274
+
275
+ def create(self, params: dict[str, Any]) -> ChatCompletion:
276
+ """Creates a completion using the Anthropic API."""
277
+ if "tools" in params:
278
+ converted_functions = self.convert_tools_to_functions(params["tools"])
279
+ params["functions"] = params.get("functions", []) + converted_functions
280
+
281
+ # Convert AG2 messages to Anthropic messages
282
+ anthropic_messages = oai_messages_to_anthropic_messages(params)
283
+ anthropic_params = self.load_config(params)
284
+
285
+ # If response_format exists, we want structured outputs
286
+ # Anthropic doesn't support response_format, so using Anthropic's "JSON Mode":
287
+ # https://github.com/anthropics/anthropic-cookbook/blob/main/misc/how_to_enable_json_mode.ipynb
288
+ if params.get("response_format"):
289
+ self._response_format = params["response_format"]
290
+ self._add_response_format_to_system(params)
291
+
292
+ # TODO: support stream
293
+ params = params.copy()
294
+ if "functions" in params:
295
+ tools_configs = params.pop("functions")
296
+ tools_configs = [self.openai_func_to_anthropic(tool) for tool in tools_configs]
297
+ params["tools"] = tools_configs
298
+
299
+ # Anthropic doesn't accept None values, so we need to use keyword argument unpacking instead of setting parameters.
300
+ # Copy params we need into anthropic_params
301
+ # Remove any that don't have values
302
+ anthropic_params["messages"] = anthropic_messages
303
+ if "system" in params:
304
+ anthropic_params["system"] = params["system"]
305
+ if "tools" in params:
306
+ anthropic_params["tools"] = params["tools"]
307
+ if anthropic_params["top_k"] is None:
308
+ del anthropic_params["top_k"]
309
+ if anthropic_params["top_p"] is None:
310
+ del anthropic_params["top_p"]
311
+ if anthropic_params["stop_sequences"] is None:
312
+ del anthropic_params["stop_sequences"]
313
+ if anthropic_params["tool_choice"] is None:
314
+ del anthropic_params["tool_choice"]
315
+
316
+ response = self._client.messages.create(**anthropic_params)
317
+
318
+ tool_calls = []
319
+ message_text = ""
320
+
321
+ if self._response_format:
322
+ try:
323
+ parsed_response = self._extract_json_response(response)
324
+ message_text = _format_json_response(parsed_response)
325
+ except ValueError as e:
326
+ message_text = str(e)
327
+
328
+ anthropic_finish = "stop"
329
+ else:
330
+ if response is not None:
331
+ # If we have tool use as the response, populate completed tool calls for our return OAI response
332
+ if response.stop_reason == "tool_use":
333
+ anthropic_finish = "tool_calls"
334
+ for content in response.content:
335
+ if type(content) == ToolUseBlock:
336
+ tool_calls.append(
337
+ ChatCompletionMessageToolCall(
338
+ id=content.id,
339
+ function={"name": content.name, "arguments": json.dumps(content.input)},
340
+ type="function",
341
+ )
342
+ )
343
+ else:
344
+ anthropic_finish = "stop"
345
+ tool_calls = None
346
+
347
+ # Retrieve any text content from the response
348
+ for content in response.content:
349
+ if type(content) == TextBlock:
350
+ message_text = content.text
351
+ break
352
+
353
+ # Calculate and save the cost onto the response
354
+ prompt_tokens = response.usage.input_tokens
355
+ completion_tokens = response.usage.output_tokens
356
+
357
+ # Convert output back to AG2 response format
358
+ message = ChatCompletionMessage(
359
+ role="assistant",
360
+ content=message_text,
361
+ function_call=None,
362
+ tool_calls=tool_calls,
363
+ )
364
+ choices = [Choice(finish_reason=anthropic_finish, index=0, message=message)]
365
+
366
+ response_oai = ChatCompletion(
367
+ id=response.id,
368
+ model=anthropic_params["model"],
369
+ created=int(time.time()),
370
+ object="chat.completion",
371
+ choices=choices,
372
+ usage=CompletionUsage(
373
+ prompt_tokens=prompt_tokens,
374
+ completion_tokens=completion_tokens,
375
+ total_tokens=prompt_tokens + completion_tokens,
376
+ ),
377
+ cost=_calculate_cost(prompt_tokens, completion_tokens, anthropic_params["model"]),
378
+ )
379
+
380
+ return response_oai
381
+
382
+ def message_retrieval(self, response) -> list:
383
+ """Retrieve and return a list of strings or a list of Choice.Message from the response.
384
+
385
+ NOTE: if a list of Choice.Message is returned, it currently needs to contain the fields of OpenAI's ChatCompletion Message object,
386
+ since that is expected for function or tool calling in the rest of the codebase at the moment, unless a custom agent is being used.
387
+ """
388
+ return [choice.message for choice in response.choices]
389
+
390
+ @staticmethod
391
+ def openai_func_to_anthropic(openai_func: dict) -> dict:
392
+ res = openai_func.copy()
393
+ res["input_schema"] = res.pop("parameters")
394
+ return res
395
+
396
+ @staticmethod
397
+ def get_usage(response: ChatCompletion) -> dict:
398
+ """Get the usage of tokens and their cost information."""
399
+ return {
400
+ "prompt_tokens": response.usage.prompt_tokens if response.usage is not None else 0,
401
+ "completion_tokens": response.usage.completion_tokens if response.usage is not None else 0,
402
+ "total_tokens": response.usage.total_tokens if response.usage is not None else 0,
403
+ "cost": response.cost if hasattr(response, "cost") else 0.0,
404
+ "model": response.model,
405
+ }
406
+
407
+ @staticmethod
408
+ def convert_tools_to_functions(tools: list) -> list:
409
+ """
410
+ Convert tool definitions into Anthropic-compatible functions,
411
+ updating nested $ref paths in property schemas.
412
+
413
+ Args:
414
+ tools (list): List of tool definitions.
415
+
416
+ Returns:
417
+ list: List of functions with updated $ref paths.
418
+ """
419
+
420
+ def update_refs(obj, defs_keys, prop_name):
421
+ """Recursively update $ref values that start with "#/$defs/"."""
422
+ if isinstance(obj, dict):
423
+ for key, value in obj.items():
424
+ if key == "$ref" and isinstance(value, str) and value.startswith("#/$defs/"):
425
+ ref_key = value[len("#/$defs/") :]
426
+ if ref_key in defs_keys:
427
+ obj[key] = f"#/properties/{prop_name}/$defs/{ref_key}"
428
+ else:
429
+ update_refs(value, defs_keys, prop_name)
430
+ elif isinstance(obj, list):
431
+ for item in obj:
432
+ update_refs(item, defs_keys, prop_name)
433
+
434
+ functions = []
435
+ for tool in tools:
436
+ if tool.get("type") == "function" and "function" in tool:
437
+ function = tool["function"]
438
+ parameters = function.get("parameters", {})
439
+ properties = parameters.get("properties", {})
440
+ for prop_name, prop_schema in properties.items():
441
+ if "$defs" in prop_schema:
442
+ defs_keys = set(prop_schema["$defs"].keys())
443
+ update_refs(prop_schema, defs_keys, prop_name)
444
+ functions.append(function)
445
+ return functions
446
+
447
+ def _add_response_format_to_system(self, params: dict[str, Any]):
448
+ """Add prompt that will generate properly formatted JSON for structured outputs to system parameter.
449
+
450
+ Based on Anthropic's JSON Mode cookbook, we ask the LLM to put the JSON within <json_response> tags.
451
+
452
+ Args:
453
+ params (dict): The client parameters
454
+ """
455
+ if not params.get("system"):
456
+ return
457
+
458
+ # Get the schema of the Pydantic model
459
+ if isinstance(self._response_format, dict):
460
+ schema = self._response_format
461
+ else:
462
+ schema = self._response_format.model_json_schema()
463
+
464
+ # Add instructions for JSON formatting
465
+ format_content = f"""Please provide your response as a JSON object that matches the following schema:
466
+ {json.dumps(schema, indent=2)}
467
+
468
+ Format your response as valid JSON within <json_response> tags.
469
+ Do not include any text before or after the tags.
470
+ Ensure the JSON is properly formatted and matches the schema exactly."""
471
+
472
+ # Add formatting to last user message
473
+ params["system"] += "\n\n" + format_content
474
+
475
+ def _extract_json_response(self, response: Message) -> Any:
476
+ """Extract and validate JSON response from the output for structured outputs.
477
+
478
+ Args:
479
+ response (Message): The response from the API.
480
+
481
+ Returns:
482
+ Any: The parsed JSON response.
483
+ """
484
+ if not self._response_format:
485
+ return response
486
+
487
+ # Extract content from response
488
+ content = response.content[0].text if response.content else ""
489
+
490
+ # Try to extract JSON from tags first
491
+ json_match = re.search(r"<json_response>(.*?)</json_response>", content, re.DOTALL)
492
+ if json_match:
493
+ json_str = json_match.group(1).strip()
494
+ else:
495
+ # Fallback to finding first JSON object
496
+ json_start = content.find("{")
497
+ json_end = content.rfind("}")
498
+ if json_start == -1 or json_end == -1:
499
+ raise ValueError("No valid JSON found in response for Structured Output.")
500
+ json_str = content[json_start : json_end + 1]
501
+
502
+ try:
503
+ # Parse JSON and validate against the Pydantic model if Pydantic model was provided
504
+ json_data = json.loads(json_str)
505
+ if isinstance(self._response_format, dict):
506
+ return json_str
507
+ else:
508
+ return self._response_format.model_validate(json_data)
509
+
510
+ except Exception as e:
511
+ raise ValueError(f"Failed to parse response as valid JSON matching the schema for Structured Output: {e!s}")
512
+
513
+
514
+ def _format_json_response(response: Any) -> str:
515
+ """Formats the JSON response for structured outputs using the format method if it exists."""
516
+ if isinstance(response, str):
517
+ return response
518
+ elif isinstance(response, FormatterProtocol):
519
+ return response.format()
520
+ else:
521
+ return response.model_dump_json()
522
+
523
+
524
+ def process_image_content(content_item: dict[str, Any]) -> dict[str, Any]:
525
+ """Process an OpenAI image content item into Claude format."""
526
+ if content_item["type"] != "image_url":
527
+ return content_item
528
+
529
+ url = content_item["image_url"]["url"]
530
+ try:
531
+ # Handle data URLs
532
+ if url.startswith("data:"):
533
+ data_url_pattern = r"data:image/([a-zA-Z]+);base64,(.+)"
534
+ match = re.match(data_url_pattern, url)
535
+ if match:
536
+ media_type, base64_data = match.groups()
537
+ return {
538
+ "type": "image",
539
+ "source": {"type": "base64", "media_type": f"image/{media_type}", "data": base64_data},
540
+ }
541
+
542
+ else:
543
+ print("Error processing image.")
544
+ # Return original content if image processing fails
545
+ return content_item
546
+
547
+ except Exception as e:
548
+ print(f"Error processing image image: {e}")
549
+ # Return original content if image processing fails
550
+ return content_item
551
+
552
+
553
+ def process_message_content(message: dict[str, Any]) -> Union[str, list[dict[str, Any]]]:
554
+ """Process message content, handling both string and list formats with images."""
555
+ content = message.get("content", "")
556
+
557
+ # Handle empty content
558
+ if content == "":
559
+ return content
560
+
561
+ # If content is already a string, return as is
562
+ if isinstance(content, str):
563
+ return content
564
+
565
+ # Handle list content (mixed text and images)
566
+ if isinstance(content, list):
567
+ processed_content = []
568
+ for item in content:
569
+ if item["type"] == "text":
570
+ processed_content.append({"type": "text", "text": item["text"]})
571
+ elif item["type"] == "image_url":
572
+ processed_content.append(process_image_content(item))
573
+ return processed_content
574
+
575
+ return content
576
+
577
+
578
+ @require_optional_import("anthropic", "anthropic")
579
+ def oai_messages_to_anthropic_messages(params: dict[str, Any]) -> list[dict[str, Any]]:
580
+ """Convert messages from OAI format to Anthropic format.
581
+ We correct for any specific role orders and types, etc.
582
+ """
583
+ # Track whether we have tools passed in. If not, tool use / result messages should be converted to text messages.
584
+ # Anthropic requires a tools parameter with the tools listed, if there are other messages with tool use or tool results.
585
+ # This can occur when we don't need tool calling, such as for group chat speaker selection.
586
+ has_tools = "tools" in params
587
+
588
+ # Convert messages to Anthropic compliant format
589
+ processed_messages = []
590
+
591
+ # Used to interweave user messages to ensure user/assistant alternating
592
+ user_continue_message = {"content": "Please continue.", "role": "user"}
593
+ assistant_continue_message = {"content": "Please continue.", "role": "assistant"}
594
+
595
+ tool_use_messages = 0
596
+ tool_result_messages = 0
597
+ last_tool_use_index = -1
598
+ last_tool_result_index = -1
599
+ for message in params["messages"]:
600
+ if message["role"] == "system":
601
+ content = process_message_content(message)
602
+ if isinstance(content, list):
603
+ # For system messages with images, concatenate only the text portions
604
+ text_content = " ".join(item.get("text", "") for item in content if item.get("type") == "text")
605
+ params["system"] = params.get("system", "") + (" " if "system" in params else "") + text_content
606
+ else:
607
+ params["system"] = params.get("system", "") + ("\n" if "system" in params else "") + content
608
+ else:
609
+ # New messages will be added here, manage role alternations
610
+ expected_role = "user" if len(processed_messages) % 2 == 0 else "assistant"
611
+
612
+ if "tool_calls" in message:
613
+ # Map the tool call options to Anthropic's ToolUseBlock
614
+ tool_uses = []
615
+ tool_names = []
616
+ for tool_call in message["tool_calls"]:
617
+ tool_uses.append(
618
+ ToolUseBlock(
619
+ type="tool_use",
620
+ id=tool_call["id"],
621
+ name=tool_call["function"]["name"],
622
+ input=json.loads(tool_call["function"]["arguments"]),
623
+ )
624
+ )
625
+ if has_tools:
626
+ tool_use_messages += 1
627
+ tool_names.append(tool_call["function"]["name"])
628
+
629
+ if expected_role == "user":
630
+ # Insert an extra user message as we will append an assistant message
631
+ processed_messages.append(user_continue_message)
632
+
633
+ if has_tools:
634
+ processed_messages.append({"role": "assistant", "content": tool_uses})
635
+ last_tool_use_index = len(processed_messages) - 1
636
+ else:
637
+ # Not using tools, so put in a plain text message
638
+ processed_messages.append({
639
+ "role": "assistant",
640
+ "content": f"Some internal function(s) that could be used: [{', '.join(tool_names)}]",
641
+ })
642
+ elif "tool_call_id" in message:
643
+ if has_tools:
644
+ # Map the tool usage call to tool_result for Anthropic
645
+ tool_result = {
646
+ "type": "tool_result",
647
+ "tool_use_id": message["tool_call_id"],
648
+ "content": message["content"],
649
+ }
650
+
651
+ # If the previous message also had a tool_result, add it to that
652
+ # Otherwise append a new message
653
+ if last_tool_result_index == len(processed_messages) - 1:
654
+ processed_messages[-1]["content"].append(tool_result)
655
+ else:
656
+ if expected_role == "assistant":
657
+ # Insert an extra assistant message as we will append a user message
658
+ processed_messages.append(assistant_continue_message)
659
+
660
+ processed_messages.append({"role": "user", "content": [tool_result]})
661
+ last_tool_result_index = len(processed_messages) - 1
662
+
663
+ tool_result_messages += 1
664
+ else:
665
+ # Not using tools, so put in a plain text message
666
+ processed_messages.append({
667
+ "role": "user",
668
+ "content": f"Running the function returned: {message['content']}",
669
+ })
670
+ elif message["content"] == "":
671
+ # Ignoring empty messages
672
+ pass
673
+ else:
674
+ if expected_role != message["role"]:
675
+ # Inserting the alternating continue message
676
+ processed_messages.append(
677
+ user_continue_message if expected_role == "user" else assistant_continue_message
678
+ )
679
+ # Process messages for images
680
+ processed_content = process_message_content(message)
681
+ processed_message = message.copy()
682
+ processed_message["content"] = processed_content
683
+ processed_messages.append(processed_message)
684
+
685
+ # We'll replace the last tool_use if there's no tool_result (occurs if we finish the conversation before running the function)
686
+ if has_tools and tool_use_messages != tool_result_messages:
687
+ processed_messages[last_tool_use_index] = assistant_continue_message
688
+
689
+ # name is not a valid field on messages
690
+ for message in processed_messages:
691
+ if "name" in message:
692
+ message.pop("name", None)
693
+
694
+ # Note: When using reflection_with_llm we may end up with an "assistant" message as the last message and that may cause a blank response
695
+ # So, if the last role is not user, add a 'user' continue message at the end
696
+ if processed_messages[-1]["role"] != "user":
697
+ processed_messages.append(user_continue_message)
698
+
699
+ return processed_messages
700
+
701
+
702
+ def _calculate_cost(input_tokens: int, output_tokens: int, model: str) -> float:
703
+ """Calculate the cost of the completion using the Anthropic pricing."""
704
+ total = 0.0
705
+
706
+ if model in ANTHROPIC_PRICING_1k:
707
+ input_cost_per_1k, output_cost_per_1k = ANTHROPIC_PRICING_1k[model]
708
+ input_cost = (input_tokens / 1000) * input_cost_per_1k
709
+ output_cost = (output_tokens / 1000) * output_cost_per_1k
710
+ total = input_cost + output_cost
711
+ else:
712
+ warnings.warn(f"Cost calculation not available for model {model}", UserWarning)
713
+
714
+ return total