ag2 0.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (423) hide show
  1. ag2-0.10.2.dist-info/METADATA +819 -0
  2. ag2-0.10.2.dist-info/RECORD +423 -0
  3. ag2-0.10.2.dist-info/WHEEL +4 -0
  4. ag2-0.10.2.dist-info/licenses/LICENSE +201 -0
  5. ag2-0.10.2.dist-info/licenses/NOTICE.md +19 -0
  6. autogen/__init__.py +88 -0
  7. autogen/_website/__init__.py +3 -0
  8. autogen/_website/generate_api_references.py +426 -0
  9. autogen/_website/generate_mkdocs.py +1216 -0
  10. autogen/_website/notebook_processor.py +475 -0
  11. autogen/_website/process_notebooks.py +656 -0
  12. autogen/_website/utils.py +413 -0
  13. autogen/a2a/__init__.py +36 -0
  14. autogen/a2a/agent_executor.py +86 -0
  15. autogen/a2a/client.py +357 -0
  16. autogen/a2a/errors.py +18 -0
  17. autogen/a2a/httpx_client_factory.py +79 -0
  18. autogen/a2a/server.py +221 -0
  19. autogen/a2a/utils.py +207 -0
  20. autogen/agentchat/__init__.py +47 -0
  21. autogen/agentchat/agent.py +180 -0
  22. autogen/agentchat/assistant_agent.py +86 -0
  23. autogen/agentchat/chat.py +325 -0
  24. autogen/agentchat/contrib/__init__.py +5 -0
  25. autogen/agentchat/contrib/agent_eval/README.md +7 -0
  26. autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
  27. autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
  28. autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
  29. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
  30. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
  31. autogen/agentchat/contrib/agent_eval/task.py +42 -0
  32. autogen/agentchat/contrib/agent_optimizer.py +432 -0
  33. autogen/agentchat/contrib/capabilities/__init__.py +5 -0
  34. autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
  35. autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
  36. autogen/agentchat/contrib/capabilities/teachability.py +393 -0
  37. autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
  38. autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
  39. autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
  40. autogen/agentchat/contrib/capabilities/transforms.py +578 -0
  41. autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
  42. autogen/agentchat/contrib/capabilities/vision_capability.py +215 -0
  43. autogen/agentchat/contrib/captainagent/__init__.py +9 -0
  44. autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
  45. autogen/agentchat/contrib/captainagent/captainagent.py +514 -0
  46. autogen/agentchat/contrib/captainagent/tool_retriever.py +334 -0
  47. autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
  48. autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
  49. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
  50. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
  51. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
  52. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
  53. autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
  54. autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
  55. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
  56. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
  57. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
  58. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
  59. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
  60. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
  61. autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
  62. autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
  63. autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
  64. autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
  65. autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
  66. autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
  67. autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
  68. autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
  69. autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
  70. autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
  71. autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
  72. autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
  73. autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
  74. autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
  75. autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
  76. autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
  77. autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
  78. autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
  79. autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
  80. autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
  81. autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
  82. autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
  83. autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
  84. autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
  85. autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
  86. autogen/agentchat/contrib/graph_rag/document.py +29 -0
  87. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +167 -0
  88. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
  89. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
  90. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
  91. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +263 -0
  92. autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
  93. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
  94. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
  95. autogen/agentchat/contrib/img_utils.py +397 -0
  96. autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
  97. autogen/agentchat/contrib/llava_agent.py +189 -0
  98. autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
  99. autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
  100. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
  101. autogen/agentchat/contrib/rag/__init__.py +10 -0
  102. autogen/agentchat/contrib/rag/chromadb_query_engine.py +268 -0
  103. autogen/agentchat/contrib/rag/llamaindex_query_engine.py +195 -0
  104. autogen/agentchat/contrib/rag/mongodb_query_engine.py +319 -0
  105. autogen/agentchat/contrib/rag/query_engine.py +76 -0
  106. autogen/agentchat/contrib/retrieve_assistant_agent.py +59 -0
  107. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +704 -0
  108. autogen/agentchat/contrib/society_of_mind_agent.py +200 -0
  109. autogen/agentchat/contrib/swarm_agent.py +1404 -0
  110. autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
  111. autogen/agentchat/contrib/vectordb/__init__.py +5 -0
  112. autogen/agentchat/contrib/vectordb/base.py +224 -0
  113. autogen/agentchat/contrib/vectordb/chromadb.py +316 -0
  114. autogen/agentchat/contrib/vectordb/couchbase.py +405 -0
  115. autogen/agentchat/contrib/vectordb/mongodb.py +551 -0
  116. autogen/agentchat/contrib/vectordb/pgvectordb.py +927 -0
  117. autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
  118. autogen/agentchat/contrib/vectordb/utils.py +126 -0
  119. autogen/agentchat/contrib/web_surfer.py +304 -0
  120. autogen/agentchat/conversable_agent.py +4307 -0
  121. autogen/agentchat/group/__init__.py +67 -0
  122. autogen/agentchat/group/available_condition.py +91 -0
  123. autogen/agentchat/group/context_condition.py +77 -0
  124. autogen/agentchat/group/context_expression.py +238 -0
  125. autogen/agentchat/group/context_str.py +39 -0
  126. autogen/agentchat/group/context_variables.py +182 -0
  127. autogen/agentchat/group/events/transition_events.py +111 -0
  128. autogen/agentchat/group/group_tool_executor.py +324 -0
  129. autogen/agentchat/group/group_utils.py +659 -0
  130. autogen/agentchat/group/guardrails.py +179 -0
  131. autogen/agentchat/group/handoffs.py +303 -0
  132. autogen/agentchat/group/llm_condition.py +93 -0
  133. autogen/agentchat/group/multi_agent_chat.py +291 -0
  134. autogen/agentchat/group/on_condition.py +55 -0
  135. autogen/agentchat/group/on_context_condition.py +51 -0
  136. autogen/agentchat/group/patterns/__init__.py +18 -0
  137. autogen/agentchat/group/patterns/auto.py +160 -0
  138. autogen/agentchat/group/patterns/manual.py +177 -0
  139. autogen/agentchat/group/patterns/pattern.py +295 -0
  140. autogen/agentchat/group/patterns/random.py +106 -0
  141. autogen/agentchat/group/patterns/round_robin.py +117 -0
  142. autogen/agentchat/group/reply_result.py +24 -0
  143. autogen/agentchat/group/safeguards/__init__.py +21 -0
  144. autogen/agentchat/group/safeguards/api.py +241 -0
  145. autogen/agentchat/group/safeguards/enforcer.py +1158 -0
  146. autogen/agentchat/group/safeguards/events.py +140 -0
  147. autogen/agentchat/group/safeguards/validator.py +435 -0
  148. autogen/agentchat/group/speaker_selection_result.py +41 -0
  149. autogen/agentchat/group/targets/__init__.py +4 -0
  150. autogen/agentchat/group/targets/function_target.py +245 -0
  151. autogen/agentchat/group/targets/group_chat_target.py +133 -0
  152. autogen/agentchat/group/targets/group_manager_target.py +151 -0
  153. autogen/agentchat/group/targets/transition_target.py +424 -0
  154. autogen/agentchat/group/targets/transition_utils.py +6 -0
  155. autogen/agentchat/groupchat.py +1832 -0
  156. autogen/agentchat/realtime/__init__.py +3 -0
  157. autogen/agentchat/realtime/experimental/__init__.py +20 -0
  158. autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
  159. autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
  160. autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
  161. autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
  162. autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
  163. autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
  164. autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
  165. autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
  166. autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
  167. autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
  168. autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
  169. autogen/agentchat/realtime/experimental/clients/realtime_client.py +191 -0
  170. autogen/agentchat/realtime/experimental/function_observer.py +84 -0
  171. autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
  172. autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
  173. autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
  174. autogen/agentchat/realtime/experimental/realtime_swarm.py +533 -0
  175. autogen/agentchat/realtime/experimental/websockets.py +21 -0
  176. autogen/agentchat/realtime_agent/__init__.py +21 -0
  177. autogen/agentchat/user_proxy_agent.py +114 -0
  178. autogen/agentchat/utils.py +206 -0
  179. autogen/agents/__init__.py +3 -0
  180. autogen/agents/contrib/__init__.py +10 -0
  181. autogen/agents/contrib/time/__init__.py +8 -0
  182. autogen/agents/contrib/time/time_reply_agent.py +74 -0
  183. autogen/agents/contrib/time/time_tool_agent.py +52 -0
  184. autogen/agents/experimental/__init__.py +27 -0
  185. autogen/agents/experimental/deep_research/__init__.py +7 -0
  186. autogen/agents/experimental/deep_research/deep_research.py +52 -0
  187. autogen/agents/experimental/discord/__init__.py +7 -0
  188. autogen/agents/experimental/discord/discord.py +66 -0
  189. autogen/agents/experimental/document_agent/__init__.py +19 -0
  190. autogen/agents/experimental/document_agent/chroma_query_engine.py +301 -0
  191. autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +113 -0
  192. autogen/agents/experimental/document_agent/document_agent.py +643 -0
  193. autogen/agents/experimental/document_agent/document_conditions.py +50 -0
  194. autogen/agents/experimental/document_agent/document_utils.py +376 -0
  195. autogen/agents/experimental/document_agent/inmemory_query_engine.py +214 -0
  196. autogen/agents/experimental/document_agent/parser_utils.py +134 -0
  197. autogen/agents/experimental/document_agent/url_utils.py +417 -0
  198. autogen/agents/experimental/reasoning/__init__.py +7 -0
  199. autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
  200. autogen/agents/experimental/slack/__init__.py +7 -0
  201. autogen/agents/experimental/slack/slack.py +73 -0
  202. autogen/agents/experimental/telegram/__init__.py +7 -0
  203. autogen/agents/experimental/telegram/telegram.py +76 -0
  204. autogen/agents/experimental/websurfer/__init__.py +7 -0
  205. autogen/agents/experimental/websurfer/websurfer.py +70 -0
  206. autogen/agents/experimental/wikipedia/__init__.py +7 -0
  207. autogen/agents/experimental/wikipedia/wikipedia.py +88 -0
  208. autogen/browser_utils.py +309 -0
  209. autogen/cache/__init__.py +10 -0
  210. autogen/cache/abstract_cache_base.py +71 -0
  211. autogen/cache/cache.py +203 -0
  212. autogen/cache/cache_factory.py +88 -0
  213. autogen/cache/cosmos_db_cache.py +144 -0
  214. autogen/cache/disk_cache.py +97 -0
  215. autogen/cache/in_memory_cache.py +54 -0
  216. autogen/cache/redis_cache.py +119 -0
  217. autogen/code_utils.py +598 -0
  218. autogen/coding/__init__.py +30 -0
  219. autogen/coding/base.py +120 -0
  220. autogen/coding/docker_commandline_code_executor.py +283 -0
  221. autogen/coding/factory.py +56 -0
  222. autogen/coding/func_with_reqs.py +203 -0
  223. autogen/coding/jupyter/__init__.py +23 -0
  224. autogen/coding/jupyter/base.py +36 -0
  225. autogen/coding/jupyter/docker_jupyter_server.py +160 -0
  226. autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
  227. autogen/coding/jupyter/import_utils.py +82 -0
  228. autogen/coding/jupyter/jupyter_client.py +224 -0
  229. autogen/coding/jupyter/jupyter_code_executor.py +154 -0
  230. autogen/coding/jupyter/local_jupyter_server.py +164 -0
  231. autogen/coding/local_commandline_code_executor.py +341 -0
  232. autogen/coding/markdown_code_extractor.py +44 -0
  233. autogen/coding/utils.py +55 -0
  234. autogen/coding/yepcode_code_executor.py +197 -0
  235. autogen/doc_utils.py +35 -0
  236. autogen/environments/__init__.py +10 -0
  237. autogen/environments/docker_python_environment.py +365 -0
  238. autogen/environments/python_environment.py +125 -0
  239. autogen/environments/system_python_environment.py +85 -0
  240. autogen/environments/venv_python_environment.py +220 -0
  241. autogen/environments/working_directory.py +74 -0
  242. autogen/events/__init__.py +7 -0
  243. autogen/events/agent_events.py +1016 -0
  244. autogen/events/base_event.py +100 -0
  245. autogen/events/client_events.py +168 -0
  246. autogen/events/helpers.py +44 -0
  247. autogen/events/print_event.py +45 -0
  248. autogen/exception_utils.py +73 -0
  249. autogen/extensions/__init__.py +5 -0
  250. autogen/fast_depends/__init__.py +16 -0
  251. autogen/fast_depends/_compat.py +75 -0
  252. autogen/fast_depends/core/__init__.py +14 -0
  253. autogen/fast_depends/core/build.py +206 -0
  254. autogen/fast_depends/core/model.py +527 -0
  255. autogen/fast_depends/dependencies/__init__.py +15 -0
  256. autogen/fast_depends/dependencies/model.py +30 -0
  257. autogen/fast_depends/dependencies/provider.py +40 -0
  258. autogen/fast_depends/library/__init__.py +10 -0
  259. autogen/fast_depends/library/model.py +46 -0
  260. autogen/fast_depends/py.typed +6 -0
  261. autogen/fast_depends/schema.py +66 -0
  262. autogen/fast_depends/use.py +272 -0
  263. autogen/fast_depends/utils.py +177 -0
  264. autogen/formatting_utils.py +83 -0
  265. autogen/function_utils.py +13 -0
  266. autogen/graph_utils.py +173 -0
  267. autogen/import_utils.py +539 -0
  268. autogen/interop/__init__.py +22 -0
  269. autogen/interop/crewai/__init__.py +7 -0
  270. autogen/interop/crewai/crewai.py +88 -0
  271. autogen/interop/interoperability.py +71 -0
  272. autogen/interop/interoperable.py +46 -0
  273. autogen/interop/langchain/__init__.py +8 -0
  274. autogen/interop/langchain/langchain_chat_model_factory.py +156 -0
  275. autogen/interop/langchain/langchain_tool.py +78 -0
  276. autogen/interop/litellm/__init__.py +7 -0
  277. autogen/interop/litellm/litellm_config_factory.py +178 -0
  278. autogen/interop/pydantic_ai/__init__.py +7 -0
  279. autogen/interop/pydantic_ai/pydantic_ai.py +172 -0
  280. autogen/interop/registry.py +70 -0
  281. autogen/io/__init__.py +15 -0
  282. autogen/io/base.py +151 -0
  283. autogen/io/console.py +56 -0
  284. autogen/io/processors/__init__.py +12 -0
  285. autogen/io/processors/base.py +21 -0
  286. autogen/io/processors/console_event_processor.py +61 -0
  287. autogen/io/run_response.py +294 -0
  288. autogen/io/thread_io_stream.py +63 -0
  289. autogen/io/websockets.py +214 -0
  290. autogen/json_utils.py +42 -0
  291. autogen/llm_clients/MIGRATION_TO_V2.md +782 -0
  292. autogen/llm_clients/__init__.py +77 -0
  293. autogen/llm_clients/client_v2.py +122 -0
  294. autogen/llm_clients/models/__init__.py +55 -0
  295. autogen/llm_clients/models/content_blocks.py +389 -0
  296. autogen/llm_clients/models/unified_message.py +145 -0
  297. autogen/llm_clients/models/unified_response.py +83 -0
  298. autogen/llm_clients/openai_completions_client.py +444 -0
  299. autogen/llm_config/__init__.py +11 -0
  300. autogen/llm_config/client.py +59 -0
  301. autogen/llm_config/config.py +461 -0
  302. autogen/llm_config/entry.py +169 -0
  303. autogen/llm_config/types.py +37 -0
  304. autogen/llm_config/utils.py +223 -0
  305. autogen/logger/__init__.py +11 -0
  306. autogen/logger/base_logger.py +129 -0
  307. autogen/logger/file_logger.py +262 -0
  308. autogen/logger/logger_factory.py +42 -0
  309. autogen/logger/logger_utils.py +57 -0
  310. autogen/logger/sqlite_logger.py +524 -0
  311. autogen/math_utils.py +338 -0
  312. autogen/mcp/__init__.py +7 -0
  313. autogen/mcp/__main__.py +78 -0
  314. autogen/mcp/helpers.py +45 -0
  315. autogen/mcp/mcp_client.py +349 -0
  316. autogen/mcp/mcp_proxy/__init__.py +19 -0
  317. autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +62 -0
  318. autogen/mcp/mcp_proxy/mcp_proxy.py +577 -0
  319. autogen/mcp/mcp_proxy/operation_grouping.py +166 -0
  320. autogen/mcp/mcp_proxy/operation_renaming.py +110 -0
  321. autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
  322. autogen/mcp/mcp_proxy/security.py +399 -0
  323. autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
  324. autogen/messages/__init__.py +7 -0
  325. autogen/messages/agent_messages.py +946 -0
  326. autogen/messages/base_message.py +108 -0
  327. autogen/messages/client_messages.py +172 -0
  328. autogen/messages/print_message.py +48 -0
  329. autogen/oai/__init__.py +61 -0
  330. autogen/oai/anthropic.py +1516 -0
  331. autogen/oai/bedrock.py +800 -0
  332. autogen/oai/cerebras.py +302 -0
  333. autogen/oai/client.py +1658 -0
  334. autogen/oai/client_utils.py +196 -0
  335. autogen/oai/cohere.py +494 -0
  336. autogen/oai/gemini.py +1045 -0
  337. autogen/oai/gemini_types.py +156 -0
  338. autogen/oai/groq.py +319 -0
  339. autogen/oai/mistral.py +311 -0
  340. autogen/oai/oai_models/__init__.py +23 -0
  341. autogen/oai/oai_models/_models.py +16 -0
  342. autogen/oai/oai_models/chat_completion.py +86 -0
  343. autogen/oai/oai_models/chat_completion_audio.py +32 -0
  344. autogen/oai/oai_models/chat_completion_message.py +97 -0
  345. autogen/oai/oai_models/chat_completion_message_tool_call.py +60 -0
  346. autogen/oai/oai_models/chat_completion_token_logprob.py +62 -0
  347. autogen/oai/oai_models/completion_usage.py +59 -0
  348. autogen/oai/ollama.py +657 -0
  349. autogen/oai/openai_responses.py +451 -0
  350. autogen/oai/openai_utils.py +897 -0
  351. autogen/oai/together.py +387 -0
  352. autogen/remote/__init__.py +18 -0
  353. autogen/remote/agent.py +199 -0
  354. autogen/remote/agent_service.py +197 -0
  355. autogen/remote/errors.py +17 -0
  356. autogen/remote/httpx_client_factory.py +131 -0
  357. autogen/remote/protocol.py +37 -0
  358. autogen/remote/retry.py +102 -0
  359. autogen/remote/runtime.py +96 -0
  360. autogen/retrieve_utils.py +490 -0
  361. autogen/runtime_logging.py +161 -0
  362. autogen/testing/__init__.py +12 -0
  363. autogen/testing/messages.py +45 -0
  364. autogen/testing/test_agent.py +111 -0
  365. autogen/token_count_utils.py +280 -0
  366. autogen/tools/__init__.py +20 -0
  367. autogen/tools/contrib/__init__.py +9 -0
  368. autogen/tools/contrib/time/__init__.py +7 -0
  369. autogen/tools/contrib/time/time.py +40 -0
  370. autogen/tools/dependency_injection.py +249 -0
  371. autogen/tools/experimental/__init__.py +54 -0
  372. autogen/tools/experimental/browser_use/__init__.py +7 -0
  373. autogen/tools/experimental/browser_use/browser_use.py +154 -0
  374. autogen/tools/experimental/code_execution/__init__.py +7 -0
  375. autogen/tools/experimental/code_execution/python_code_execution.py +86 -0
  376. autogen/tools/experimental/crawl4ai/__init__.py +7 -0
  377. autogen/tools/experimental/crawl4ai/crawl4ai.py +150 -0
  378. autogen/tools/experimental/deep_research/__init__.py +7 -0
  379. autogen/tools/experimental/deep_research/deep_research.py +329 -0
  380. autogen/tools/experimental/duckduckgo/__init__.py +7 -0
  381. autogen/tools/experimental/duckduckgo/duckduckgo_search.py +103 -0
  382. autogen/tools/experimental/firecrawl/__init__.py +7 -0
  383. autogen/tools/experimental/firecrawl/firecrawl_tool.py +836 -0
  384. autogen/tools/experimental/google/__init__.py +14 -0
  385. autogen/tools/experimental/google/authentication/__init__.py +11 -0
  386. autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
  387. autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
  388. autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
  389. autogen/tools/experimental/google/drive/__init__.py +9 -0
  390. autogen/tools/experimental/google/drive/drive_functions.py +124 -0
  391. autogen/tools/experimental/google/drive/toolkit.py +88 -0
  392. autogen/tools/experimental/google/model.py +17 -0
  393. autogen/tools/experimental/google/toolkit_protocol.py +19 -0
  394. autogen/tools/experimental/google_search/__init__.py +8 -0
  395. autogen/tools/experimental/google_search/google_search.py +93 -0
  396. autogen/tools/experimental/google_search/youtube_search.py +181 -0
  397. autogen/tools/experimental/messageplatform/__init__.py +17 -0
  398. autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
  399. autogen/tools/experimental/messageplatform/discord/discord.py +284 -0
  400. autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
  401. autogen/tools/experimental/messageplatform/slack/slack.py +385 -0
  402. autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
  403. autogen/tools/experimental/messageplatform/telegram/telegram.py +271 -0
  404. autogen/tools/experimental/perplexity/__init__.py +7 -0
  405. autogen/tools/experimental/perplexity/perplexity_search.py +249 -0
  406. autogen/tools/experimental/reliable/__init__.py +10 -0
  407. autogen/tools/experimental/reliable/reliable.py +1311 -0
  408. autogen/tools/experimental/searxng/__init__.py +7 -0
  409. autogen/tools/experimental/searxng/searxng_search.py +142 -0
  410. autogen/tools/experimental/tavily/__init__.py +7 -0
  411. autogen/tools/experimental/tavily/tavily_search.py +176 -0
  412. autogen/tools/experimental/web_search_preview/__init__.py +7 -0
  413. autogen/tools/experimental/web_search_preview/web_search_preview.py +120 -0
  414. autogen/tools/experimental/wikipedia/__init__.py +7 -0
  415. autogen/tools/experimental/wikipedia/wikipedia.py +284 -0
  416. autogen/tools/function_utils.py +412 -0
  417. autogen/tools/tool.py +188 -0
  418. autogen/tools/toolkit.py +86 -0
  419. autogen/types.py +29 -0
  420. autogen/version.py +7 -0
  421. templates/client_template/main.jinja2 +72 -0
  422. templates/config_template/config.jinja2 +7 -0
  423. templates/main.jinja2 +61 -0
@@ -0,0 +1,782 @@
1
+ # Migration Guide: ModelClient V1 to ModelClientV2
2
+
3
+ This guide provides a comprehensive plan for migrating from the legacy ModelClient interface to the new ModelClientV2 interface with rich UnifiedResponse support.
4
+
5
+ ## Table of Contents
6
+ - [Overview](#overview)
7
+ - [Why Migrate?](#why-migrate)
8
+ - [Architecture Comparison](#architecture-comparison)
9
+ - [Migration Strategy](#migration-strategy)
10
+ - [Step-by-Step Migration](#step-by-step-migration)
11
+ - [Backward Compatibility](#backward-compatibility)
12
+ - [Provider-Specific Considerations](#provider-specific-considerations)
13
+ - [Testing Strategy](#testing-strategy)
14
+ - [FAQ](#faq)
15
+
16
+ ## Overview
17
+
18
+ ModelClientV2 introduces a new protocol for LLM clients that returns rich, provider-agnostic responses (UnifiedResponse) while maintaining backward compatibility with the existing ChatCompletion-based interface.
19
+
20
+ ### Key Changes
21
+ - **Rich Response Format**: Returns `UnifiedResponse` with typed content blocks instead of flattened `ChatCompletion`
22
+ - **Direct Content Access**: Use `response.text`, `response.reasoning`, etc. instead of `message_retrieval()`
23
+ - **Forward Compatible**: Handles unknown content types via `GenericContent`
24
+ - **Dual Interface**: Supports both V2 (rich) and V1 (legacy) responses
25
+
26
+ ## Why Migrate?
27
+
28
+ ### Benefits of ModelClientV2
29
+
30
+ 1. **Rich Content Support**: Access reasoning blocks, citations, multimodality, and other provider-specific features
31
+ 2. **Provider Agnostic**: Unified format across OpenAI, Anthropic, Gemini, and other providers
32
+ 3. **Type Safety**: Typed content blocks with enum-based content types
33
+ 4. **Forward Compatibility**: Handles new content types without code changes
34
+ 5. **Better Developer Experience**: Direct property access instead of parsing nested structures
35
+
36
+ ### Example: Before vs After
37
+
38
+ **Before (ModelClient V1):**
39
+ ```python
40
+ # V1 - Flattened ChatCompletion format
41
+ response = client.create(params)
42
+ messages = client.message_retrieval(response)
43
+ content = messages[0] if messages else ""
44
+
45
+ # Reasoning/thinking tokens lost or require provider-specific parsing
46
+ if hasattr(response, 'choices') and hasattr(response.choices[0], 'message'):
47
+ if hasattr(response.choices[0].message, 'reasoning'):
48
+ reasoning = response.choices[0].message.reasoning # Provider-specific
49
+ ```
50
+
51
+ **After (ModelClientV2):**
52
+ ```python
53
+ # V2 - Rich UnifiedResponse format
54
+ response = client.create(params)
55
+ content = response.text # Direct access
56
+ reasoning = response.reasoning # Rich content preserved
57
+ citations = response.get_content_by_type("citation")
58
+
59
+ # Access individual messages with typed content blocks
60
+ for message in response.messages:
61
+ for content_block in message.content:
62
+ if isinstance(content_block, ReasoningContent):
63
+ print(f"Reasoning: {content_block.reasoning}")
64
+ elif isinstance(content_block, TextContent):
65
+ print(f"Text: {content_block.text}")
66
+ ```
67
+
68
+ ## Architecture Comparison
69
+
70
+ ### ModelClient V1 (Legacy)
71
+
72
+ ```python
73
+ class ModelClient(Protocol):
74
+ def create(self, params: dict[str, Any]) -> ModelClientResponseProtocol:
75
+ """Returns ChatCompletion-like response"""
76
+ ...
77
+
78
+ def message_retrieval(self, response) -> list[str]:
79
+ """Extracts text content from response"""
80
+ ...
81
+
82
+ def cost(self, response) -> float: ...
83
+ def get_usage(self, response) -> dict[str, Any]: ...
84
+ ```
85
+
86
+ **Response Format:**
87
+ ```python
88
+ ChatCompletion(
89
+ id="...",
90
+ model="...",
91
+ choices=[
92
+ Choice(
93
+ message=Message(
94
+ role="assistant",
95
+ content="Plain text only" # Rich content flattened
96
+ )
97
+ )
98
+ ]
99
+ )
100
+ ```
101
+
102
+ ### ModelClientV2 (New)
103
+
104
+ ```python
105
+ class ModelClientV2(Protocol):
106
+ def create(self, params: dict[str, Any]) -> UnifiedResponse:
107
+ """Returns rich UnifiedResponse"""
108
+ ...
109
+
110
+ def create_v1_compatible(self, params: dict[str, Any]) -> Any:
111
+ """Backward compatibility method"""
112
+ ...
113
+
114
+ def cost(self, response: UnifiedResponse) -> float: ...
115
+ def get_usage(self, response: UnifiedResponse) -> dict[str, Any]: ...
116
+ # No message_retrieval - use response.text or response.messages directly
117
+ ```
118
+
119
+ **Response Format:**
120
+ ```python
121
+ UnifiedResponse(
122
+ id="...",
123
+ model="...",
124
+ provider="openai",
125
+ messages=[
126
+ UnifiedMessage(
127
+ role="assistant",
128
+ content=[
129
+ TextContent(type="text", text="Main response"),
130
+ ReasoningContent(type="reasoning", reasoning="Let me think..."),
131
+ CitationContent(type="citation", url="...", title="...", snippet="...")
132
+ ]
133
+ )
134
+ ],
135
+ usage={"prompt_tokens": 10, "completion_tokens": 20},
136
+ cost=0.001
137
+ )
138
+ ```
139
+
140
+ ## Migration Strategy
141
+
142
+ ### Phase 1: Implement Dual Interface (Current)
143
+ **Status**: ✅ Completed for OpenAICompletionsClient
144
+
145
+ **Goal**: Add V2 interface while maintaining V1 compatibility
146
+
147
+ ```python
148
+ class OpenAICompletionsClient(ModelClient): # Inherits V1 protocol
149
+ """Implements V2 interface via duck typing"""
150
+
151
+ def create(self, params: dict[str, Any]) -> UnifiedResponse: # V2 method
152
+ """Returns rich UnifiedResponse"""
153
+ ...
154
+
155
+ def message_retrieval(self, response: UnifiedResponse) -> list[str]: # V1 compat
156
+ """Flattens UnifiedResponse to text for legacy code"""
157
+ return [msg.get_text() for msg in response.messages]
158
+
159
+ def create_v1_compatible(self, params: dict[str, Any]) -> dict[str, Any]: # V2 compat
160
+ """Converts UnifiedResponse to ChatCompletion format"""
161
+ response = self.create(params)
162
+ return self._to_chat_completion(response)
163
+ ```
164
+
165
+ ### Phase 2: Update OpenAIWrapper (Next)
166
+ **Status**: 🔄 In Progress
167
+
168
+ **Goal**: Support both V1 and V2 clients in routing layer
169
+
170
+ ```python
171
+ class OpenAIWrapper:
172
+ def create(self, params: dict[str, Any]) -> ModelClientResponseProtocol | UnifiedResponse:
173
+ """Returns appropriate response type based on client"""
174
+ client = self._clients[self._config_list_index]
175
+
176
+ # Detect V2 clients by checking return type
177
+ response = client.create(params)
178
+
179
+ if isinstance(response, UnifiedResponse):
180
+ # V2 client - rich response
181
+ response._client = client # Store client reference
182
+ return response
183
+ else:
184
+ # V1 client - legacy response
185
+ return response
186
+
187
+ def extract_text_or_completion_object(self, response):
188
+ """Handle both V1 and V2 responses"""
189
+ if isinstance(response, UnifiedResponse):
190
+ # V2 - use direct access
191
+ return response.text
192
+ else:
193
+ # V1 - use message_retrieval
194
+ client = self._response_metadata[response.id]["client"]
195
+ return client.message_retrieval(response)
196
+ ```
197
+
198
+ ### Phase 3: Migrate Other Providers (Planned)
199
+ **Status**: 📋 Planned
200
+
201
+ **Priority Order:**
202
+ 1. ✅ OpenAI (Completed - OpenAICompletionsClient)
203
+ 2. 🔄 Gemini (High Priority - complex multimodal support)
204
+ 3. 📋 Anthropic (High Priority - thinking tokens, citations)
205
+ 4. 📋 Bedrock (Medium Priority - supports multiple models)
206
+ 5. 📋 Together.AI, Groq, Mistral (Lower Priority - simpler APIs)
207
+
208
+ ### Phase 4: Update Agent Layer (Future)
209
+ **Status**: 📋 Planned
210
+
211
+ **Goal**: Enable agents to consume rich content directly
212
+
213
+ ```python
214
+ class ConversableAgent:
215
+ def _generate_oai_reply_from_client(self, llm_client, messages, cache, agent):
216
+ response = llm_client.create(params)
217
+
218
+ if isinstance(response, UnifiedResponse):
219
+ # V2 - process rich content
220
+ self._process_reasoning(response.reasoning)
221
+ self._process_citations(response.get_content_by_type("citation"))
222
+ return response.text
223
+ else:
224
+ # V1 - legacy processing
225
+ extracted = self.client.message_retrieval(response)
226
+ return extracted
227
+ ```
228
+
229
+ ### Phase 5: Deprecation (Long-term)
230
+ **Status**: 📋 Planned
231
+
232
+ ## Step-by-Step Migration
233
+
234
+ ### For Client Implementers
235
+
236
+ #### Step 1: Inherit ModelClient (Maintain Compatibility)
237
+ ```python
238
+ from autogen.llm_config.client import ModelClient
239
+
240
+ class MyProviderClient(ModelClient):
241
+ """Inherit V1 protocol for OpenAIWrapper compatibility"""
242
+ pass
243
+ ```
244
+
245
+ #### Step 2: Implement V2 create() Method
246
+ ```python
247
+ from autogen.llm_clients.models import (
248
+ UnifiedResponse, UnifiedMessage, TextContent, ReasoningContent
249
+ )
250
+
251
+ def create(self, params: dict[str, Any]) -> UnifiedResponse: # type: ignore[override]
252
+ """Override with rich return type"""
253
+
254
+ # 1. Call provider API
255
+ raw_response = self._call_provider_api(params)
256
+
257
+ # 2. Transform to UnifiedResponse with rich content blocks
258
+ messages = []
259
+ for choice in raw_response.choices:
260
+ content_blocks = []
261
+
262
+ # Extract text content
263
+ if choice.message.content:
264
+ content_blocks.append(TextContent(
265
+ type="text",
266
+ text=choice.message.content
267
+ ))
268
+
269
+ # Extract reasoning (provider-specific)
270
+ if hasattr(choice.message, 'reasoning') and choice.message.reasoning:
271
+ content_blocks.append(ReasoningContent(
272
+ type="reasoning",
273
+ reasoning=choice.message.reasoning
274
+ ))
275
+
276
+ # Add more content types as needed...
277
+
278
+ messages.append(UnifiedMessage(
279
+ role=choice.message.role,
280
+ content=content_blocks
281
+ ))
282
+
283
+ # 3. Create UnifiedResponse
284
+ return UnifiedResponse(
285
+ id=raw_response.id,
286
+ model=raw_response.model,
287
+ provider="my_provider",
288
+ messages=messages,
289
+ usage={
290
+ "prompt_tokens": raw_response.usage.prompt_tokens,
291
+ "completion_tokens": raw_response.usage.completion_tokens,
292
+ "total_tokens": raw_response.usage.total_tokens
293
+ },
294
+ cost=self._calculate_cost(raw_response)
295
+ )
296
+ ```
297
+
298
+ #### Step 3: Maintain V1 message_retrieval()
299
+ ```python
300
+ def message_retrieval(self, response: UnifiedResponse) -> list[str]: # type: ignore[override]
301
+ """Flatten to text for V1 compatibility"""
302
+ return [msg.get_text() for msg in response.messages]
303
+ ```
304
+
305
+ #### Step 4: Implement create_v1_compatible()
306
+ ```python
307
+ def create_v1_compatible(self, params: dict[str, Any]) -> dict[str, Any]:
308
+ """Convert rich response to legacy format"""
309
+ response = self.create(params)
310
+
311
+ # Convert UnifiedResponse to ChatCompletion-like dict
312
+ return {
313
+ "id": response.id,
314
+ "model": response.model,
315
+ "choices": [
316
+ {
317
+ "index": 0,
318
+ "message": {
319
+ "role": msg.role,
320
+ "content": msg.get_text()
321
+ },
322
+ "finish_reason": "stop"
323
+ }
324
+ for msg in response.messages
325
+ ],
326
+ "usage": response.usage
327
+ }
328
+ ```
329
+
330
+ #### Step 5: Update cost() and get_usage()
331
+ ```python
332
+ def cost(self, response: UnifiedResponse) -> float:
333
+ """Extract cost from UnifiedResponse"""
334
+ return response.cost or 0.0
335
+
336
+ @staticmethod
337
+ def get_usage(response: UnifiedResponse) -> dict[str, Any]:
338
+ """Extract usage from UnifiedResponse"""
339
+ return {
340
+ "prompt_tokens": response.usage.get("prompt_tokens", 0),
341
+ "completion_tokens": response.usage.get("completion_tokens", 0),
342
+ "total_tokens": response.usage.get("total_tokens", 0),
343
+ "cost": response.cost or 0.0,
344
+ "model": response.model
345
+ }
346
+ ```
347
+
348
+ ### For Client Users
349
+
350
+ #### Step 1: Check Response Type
351
+ ```python
352
+ response = llm_client.create(params)
353
+
354
+ if isinstance(response, UnifiedResponse):
355
+ # V2 client - use rich interface
356
+ text = response.text
357
+ reasoning = response.reasoning
358
+ else:
359
+ # V1 client - use legacy interface
360
+ text = llm_client.message_retrieval(response)
361
+ ```
362
+
363
+ #### Step 2: Use Direct Property Access
364
+ ```python
365
+ # Instead of:
366
+ messages = client.message_retrieval(response)
367
+ content = messages[0] if messages else ""
368
+
369
+ # Use:
370
+ content = response.text # Direct access
371
+ ```
372
+
373
+ #### Step 3: Access Rich Content
374
+ ```python
375
+ # Reasoning blocks (OpenAI o1/o3, Anthropic thinking)
376
+ if response.reasoning:
377
+ print(f"Chain of thought: {response.reasoning}")
378
+
379
+ # Citations (web search, RAG)
380
+ citations = response.get_content_by_type("citation")
381
+ for citation in citations:
382
+ print(f"Source: {citation.url} - {citation.title}")
383
+
384
+ # Images (Gemini, DALL-E)
385
+ images = response.get_content_by_type("image")
386
+ for image in images:
387
+ print(f"Generated image: {image.image_url or image.data_uri}")
388
+ ```
389
+
390
+ #### Step 4: Handle Unknown Content Types
391
+ ```python
392
+ # Forward compatibility with GenericContent
393
+ for message in response.messages:
394
+ for content_block in message.content:
395
+ if isinstance(content_block, GenericContent):
396
+ print(f"Unknown type: {content_block.type}")
397
+ # Access fields dynamically
398
+ all_fields = content_block.get_all_fields()
399
+ print(f"Fields: {all_fields}")
400
+ ```
401
+
402
+ ## Backward Compatibility
403
+
404
+ ### OpenAIWrapper Integration
405
+
406
+ **Current State**: OpenAIWrapper supports V2 clients through duck typing
407
+
408
+ ```python
409
+ # OpenAIWrapper calls client.create() - works with both V1 and V2
410
+ response = client.create(params)
411
+
412
+ # For text extraction, OpenAIWrapper detects response type
413
+ if hasattr(response, 'text'): # UnifiedResponse
414
+ text = response.text
415
+ elif hasattr(response, 'message_retrieval_function'): # V1 with stored metadata
416
+ text = response.message_retrieval_function(response)
417
+ ```
418
+
419
+ ### Agent Compatibility
420
+
421
+ **Current State**: Agents work with both V1 and V2 clients
422
+
423
+ ```python
424
+ # ConversableAgent._generate_oai_reply_from_client()
425
+ response = llm_client.create(params)
426
+
427
+ # Extract text - works with both formats
428
+ extracted_response = self.client.extract_text_or_completion_object(response)
429
+ ```
430
+
431
+ **No Breaking Changes**: Existing agents continue to work without modifications
432
+
433
+ ## Provider-Specific Considerations
434
+
435
+ ### OpenAI (Completed ✅)
436
+ **Implementation**: `OpenAICompletionsClient`
437
+
438
+ **Supported Content Types:**
439
+ - `TextContent` - Standard text responses
440
+ - `ReasoningContent` - O1/O3 reasoning tokens
441
+ - `ToolCallContent` - Function/tool calls
442
+ - `ImageContent` - DALL-E generated images (future)
443
+
444
+ **Special Handling:**
445
+ - O1 models: Reasoning tokens extracted to ReasoningContent
446
+ - Streaming: Not yet implemented for V2 (uses V1 compatibility)
447
+ - Azure: Works through same client with different base_url
448
+
449
+ ### Gemini (High Priority 🔄)
450
+ **Complexity**: High - extensive multimodal support
451
+
452
+ **Supported Content Types:**
453
+ - `TextContent` - Text responses
454
+ - `ImageContent` - Generated images (Imagen integration)
455
+ - `AudioContent` - Generated audio (future)
456
+ - `VideoContent` - Video understanding inputs
457
+ - `ToolCallContent` - Function calling
458
+
459
+ **Migration Challenges:**
460
+ - Complex content part structure (text, inline_data, file_data)
461
+ - Multiple generation modes (generateContent, generateImages)
462
+ - Safety ratings and finish reasons
463
+ - Grounding metadata and citations
464
+
465
+ **Recommended Approach:**
466
+ ```python
467
+ class GeminiStatelessClient(ModelClient):
468
+ def create(self, params: dict[str, Any]) -> UnifiedResponse:
469
+ # Detect generation type from params
470
+ if "generation_type" in params and params["generation_type"] == "image":
471
+ return self._create_image_generation(params)
472
+ else:
473
+ return self._create_text_generation(params)
474
+
475
+ def _create_text_generation(self, params) -> UnifiedResponse:
476
+ # Convert OAI messages to Gemini format
477
+ contents = oai_messages_to_gemini_messages(params["messages"])
478
+
479
+ # Call Gemini API
480
+ response = self.client.models.generate_content(
481
+ model=params["model"],
482
+ contents=contents,
483
+ config=self._build_generation_config(params)
484
+ )
485
+
486
+ # Transform to UnifiedResponse
487
+ return self._to_unified_response(response)
488
+
489
+ def _to_unified_response(self, gemini_response) -> UnifiedResponse:
490
+ messages = []
491
+ for candidate in gemini_response.candidates:
492
+ content_blocks = []
493
+
494
+ # Extract text parts
495
+ for part in candidate.content.parts:
496
+ if part.text:
497
+ content_blocks.append(TextContent(type="text", text=part.text))
498
+ elif part.inline_data:
499
+ # Handle inline images/audio
500
+ mime_type = part.inline_data.mime_type
501
+ if mime_type.startswith("image/"):
502
+ content_blocks.append(ImageContent(
503
+ type="image",
504
+ data_uri=f"data:{mime_type};base64,{part.inline_data.data}"
505
+ ))
506
+
507
+ messages.append(UnifiedMessage(
508
+ role=self._normalize_role(candidate.content.role),
509
+ content=content_blocks
510
+ ))
511
+
512
+ return UnifiedResponse(
513
+ id=f"gemini-{uuid.uuid4()}",
514
+ model=gemini_response.model_name,
515
+ provider="gemini",
516
+ messages=messages,
517
+ usage={
518
+ "prompt_tokens": gemini_response.usage_metadata.prompt_token_count,
519
+ "completion_tokens": gemini_response.usage_metadata.candidates_token_count,
520
+ "total_tokens": gemini_response.usage_metadata.total_token_count
521
+ },
522
+ cost=self._calculate_cost(gemini_response)
523
+ )
524
+ ```
525
+
526
+ ### Anthropic (High Priority 📋)
527
+ **Complexity**: Medium - thinking tokens and citations
528
+
529
+ **Supported Content Types:**
530
+ - `TextContent` - Standard responses
531
+ - `ReasoningContent` - Extended thinking mode
532
+ - `CitationContent` - Grounded responses with sources
533
+ - `ToolCallContent` - Tool use
534
+
535
+ **Migration Approach:**
536
+ ```python
537
+ class AnthropicClient(ModelClient):
538
+ def _to_unified_response(self, anthropic_response) -> UnifiedResponse:
539
+ content_blocks = []
540
+
541
+ # Extract text content
542
+ for block in anthropic_response.content:
543
+ if block.type == "text":
544
+ content_blocks.append(TextContent(type="text", text=block.text))
545
+ elif block.type == "thinking":
546
+ content_blocks.append(ReasoningContent(
547
+ type="reasoning",
548
+ reasoning=block.thinking
549
+ ))
550
+ elif block.type == "tool_use":
551
+ content_blocks.append(ToolCallContent(
552
+ type="tool_call",
553
+ id=block.id,
554
+ name=block.name,
555
+ arguments=json.dumps(block.input)
556
+ ))
557
+
558
+ messages = [UnifiedMessage(role="assistant", content=content_blocks)]
559
+
560
+ return UnifiedResponse(
561
+ id=anthropic_response.id,
562
+ model=anthropic_response.model,
563
+ provider="anthropic",
564
+ messages=messages,
565
+ usage={
566
+ "prompt_tokens": anthropic_response.usage.input_tokens,
567
+ "completion_tokens": anthropic_response.usage.output_tokens,
568
+ "total_tokens": anthropic_response.usage.input_tokens + anthropic_response.usage.output_tokens
569
+ },
570
+ cost=self._calculate_cost(anthropic_response)
571
+ )
572
+ ```
573
+
574
+ ### Bedrock (Medium Priority 📋)
575
+ **Complexity**: Medium - wraps multiple providers
576
+
577
+ **Challenge**: Different underlying models (Claude, Llama, etc.) with different response formats
578
+
579
+ **Approach**: Detect model family and delegate to appropriate transformer
580
+
581
+ ## Testing Strategy
582
+
583
+ ### Unit Tests
584
+
585
+ **Test V2 Protocol Compliance:**
586
+ ```python
587
+ def test_v2_protocol_compliance():
588
+ """Verify client implements ModelClientV2 interface"""
589
+ client = MyProviderClient()
590
+
591
+ # Check required methods exist
592
+ assert hasattr(client, "create")
593
+ assert hasattr(client, "create_v1_compatible")
594
+ assert hasattr(client, "cost")
595
+ assert hasattr(client, "get_usage")
596
+
597
+ # Check return types
598
+ response = client.create({"model": "test", "messages": [...]})
599
+ assert isinstance(response, UnifiedResponse)
600
+
601
+ v1_response = client.create_v1_compatible({"model": "test", "messages": [...]})
602
+ assert isinstance(v1_response, dict)
603
+ assert "choices" in v1_response
604
+ ```
605
+
606
+ **Test Rich Content Extraction:**
607
+ ```python
608
+ def test_rich_content_types():
609
+ """Verify all content types are properly extracted"""
610
+ client = MyProviderClient()
611
+ response = client.create(params)
612
+
613
+ # Test direct access
614
+ assert isinstance(response.text, str)
615
+ assert response.text != ""
616
+
617
+ # Test content type filtering
618
+ reasoning_blocks = response.get_content_by_type("reasoning")
619
+ assert all(isinstance(b, ReasoningContent) for b in reasoning_blocks)
620
+
621
+ citations = response.get_content_by_type("citation")
622
+ assert all(isinstance(c, CitationContent) for c in citations)
623
+ ```
624
+
625
+ **Test V1 Compatibility:**
626
+ ```python
627
+ def test_v1_backward_compatibility():
628
+ """Verify V1 interface still works"""
629
+ client = MyProviderClient()
630
+
631
+ # V1 method should work
632
+ response = client.create(params)
633
+ messages = client.message_retrieval(response)
634
+ assert isinstance(messages, list)
635
+ assert all(isinstance(m, str) for m in messages)
636
+
637
+ # V1 compatible response
638
+ v1_response = client.create_v1_compatible(params)
639
+ assert "choices" in v1_response
640
+ assert "message" in v1_response["choices"][0]
641
+ ```
642
+
643
+ ### Integration Tests
644
+
645
+ **Test with OpenAIWrapper:**
646
+ ```python
647
+ def test_openai_wrapper_integration():
648
+ """Verify V2 client works with OpenAIWrapper"""
649
+ config_list = [{
650
+ "model": "gpt-4o",
651
+ "api_key": "test",
652
+ "api_type": "openai"
653
+ }]
654
+
655
+ wrapper = OpenAIWrapper(config_list=config_list)
656
+ response = wrapper.create({"messages": [{"role": "user", "content": "Hello"}]})
657
+
658
+ # Should return UnifiedResponse
659
+ assert isinstance(response, UnifiedResponse)
660
+ assert response.text
661
+ ```
662
+
663
+ **Test with ConversableAgent:**
664
+ ```python
665
+ def test_agent_integration():
666
+ """Verify agents work with V2 clients"""
667
+ agent = ConversableAgent(
668
+ name="assistant",
669
+ llm_config={"model": "gpt-4o", "api_key": "test"}
670
+ )
671
+
672
+ # Agent should handle V2 responses transparently
673
+ reply = agent.generate_reply(
674
+ messages=[{"role": "user", "content": "Hello"}]
675
+ )
676
+
677
+ assert isinstance(reply, str)
678
+ assert reply != ""
679
+ ```
680
+
681
+ ### Test Fixtures
682
+
683
+ **Create reusable fixtures for testing:**
684
+ ```python
685
+ @pytest.fixture
686
+ def mock_v2_response():
687
+ """Mock UnifiedResponse for testing"""
688
+ return UnifiedResponse(
689
+ id="test-123",
690
+ model="test-model",
691
+ provider="test",
692
+ messages=[
693
+ UnifiedMessage(
694
+ role="assistant",
695
+ content=[
696
+ TextContent(type="text", text="Test response"),
697
+ ReasoningContent(type="reasoning", reasoning="Test reasoning")
698
+ ]
699
+ )
700
+ ],
701
+ usage={"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30},
702
+ cost=0.001
703
+ )
704
+
705
+ @pytest.fixture
706
+ def mock_v2_client(mock_v2_response):
707
+ """Mock V2 client for testing"""
708
+ class MockV2Client:
709
+ def create(self, params):
710
+ return mock_v2_response
711
+
712
+ def create_v1_compatible(self, params):
713
+ return {
714
+ "id": mock_v2_response.id,
715
+ "model": mock_v2_response.model,
716
+ "choices": [{"message": {"content": mock_v2_response.text}}]
717
+ }
718
+
719
+ def cost(self, response):
720
+ return response.cost
721
+
722
+ @staticmethod
723
+ def get_usage(response):
724
+ return {
725
+ "prompt_tokens": response.usage["prompt_tokens"],
726
+ "completion_tokens": response.usage["completion_tokens"],
727
+ "total_tokens": response.usage["total_tokens"],
728
+ "cost": response.cost,
729
+ "model": response.model
730
+ }
731
+
732
+ return MockV2Client()
733
+ ```
734
+
735
+ ## FAQ
736
+
737
+ ### Q: Do I need to migrate my existing client immediately?
738
+ **A:** No. V1 clients will continue to work indefinitely. Migration is only needed if you want to support rich content features.
739
+
740
+ ### Q: Can I use V1 and V2 clients together?
741
+ **A:** Yes. OpenAIWrapper supports both simultaneously and handles routing automatically.
742
+
743
+ ### Q: What if my provider doesn't support rich content?
744
+ **A:** You can still migrate - just return `UnifiedResponse` with only `TextContent` blocks. This provides API consistency even without rich features.
745
+
746
+ ### Q: How do I handle streaming with V2?
747
+ **A:** Streaming support for V2 is planned. For now, use `create_v1_compatible()` for streaming use cases.
748
+
749
+ ### Q: Will this break my existing agents?
750
+ **A:** No. All changes are backward compatible. Agents will automatically work with both V1 and V2 clients.
751
+
752
+ ### Q: How do I test V2 clients without API keys?
753
+ **A:** Use the provided test fixtures (`mock_v2_client`, `mock_v2_response`) for unit tests. Integration tests can use `credentials_responses_*` fixtures that work without actual API calls.
754
+
755
+ ### Q: What's the performance impact?
756
+ **A:** Minimal. UnifiedResponse is lightweight and most overhead is in API calls themselves. The rich content structure is lazy-evaluated where possible.
757
+
758
+ ### Q: Can I add custom content types?
759
+ **A:** Yes! Use `ContentParser.register()` to add custom content types, or use `GenericContent` for one-off cases.
760
+
761
+ ### Q: How do I migrate provider-specific features?
762
+ **A:** Use the `extra` field in content blocks or add fields to `GenericContent`:
763
+ ```python
764
+ GenericContent(
765
+ type="custom_provider_feature",
766
+ custom_field="value",
767
+ another_field=123
768
+ )
769
+ ```
770
+
771
+ ## Support and Feedback
772
+
773
+ - **Documentation**: See [ModelClientV2 protocol](/autogen/llm_clients/client_v2.py)
774
+ - **Examples**: Check [test_client_v2.py](/test/llm_clients/test_client_v2.py)
775
+ - **Issues**: Report migration issues on GitHub with `[v2-migration]` tag
776
+ - **Community**: Discuss migration strategies in AG2 Discord #client-development channel
777
+
778
+ ---
779
+
780
+ **Last Updated**: 2025-11-13
781
+ **Version**: 1.0
782
+ **Status**: Phase 1 Complete (OpenAI), Phase 2 In Progress