ag2 0.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ag2-0.10.2.dist-info/METADATA +819 -0
- ag2-0.10.2.dist-info/RECORD +423 -0
- ag2-0.10.2.dist-info/WHEEL +4 -0
- ag2-0.10.2.dist-info/licenses/LICENSE +201 -0
- ag2-0.10.2.dist-info/licenses/NOTICE.md +19 -0
- autogen/__init__.py +88 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +426 -0
- autogen/_website/generate_mkdocs.py +1216 -0
- autogen/_website/notebook_processor.py +475 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +413 -0
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +86 -0
- autogen/a2a/client.py +357 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +207 -0
- autogen/agentchat/__init__.py +47 -0
- autogen/agentchat/agent.py +180 -0
- autogen/agentchat/assistant_agent.py +86 -0
- autogen/agentchat/chat.py +325 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +432 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +578 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +215 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +514 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +334 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +167 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +263 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +189 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +268 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +195 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +319 -0
- autogen/agentchat/contrib/rag/query_engine.py +76 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +59 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +704 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +200 -0
- autogen/agentchat/contrib/swarm_agent.py +1404 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +224 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +316 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +405 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +551 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +927 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +304 -0
- autogen/agentchat/conversable_agent.py +4307 -0
- autogen/agentchat/group/__init__.py +67 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +39 -0
- autogen/agentchat/group/context_variables.py +182 -0
- autogen/agentchat/group/events/transition_events.py +111 -0
- autogen/agentchat/group/group_tool_executor.py +324 -0
- autogen/agentchat/group/group_utils.py +659 -0
- autogen/agentchat/group/guardrails.py +179 -0
- autogen/agentchat/group/handoffs.py +303 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +291 -0
- autogen/agentchat/group/on_condition.py +55 -0
- autogen/agentchat/group/on_context_condition.py +51 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +160 -0
- autogen/agentchat/group/patterns/manual.py +177 -0
- autogen/agentchat/group/patterns/pattern.py +295 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +24 -0
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +140 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/function_target.py +245 -0
- autogen/agentchat/group/targets/group_chat_target.py +133 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +424 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1832 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +191 -0
- autogen/agentchat/realtime/experimental/function_observer.py +84 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +533 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +114 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +74 -0
- autogen/agents/contrib/time/time_tool_agent.py +52 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +301 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +113 -0
- autogen/agents/experimental/document_agent/document_agent.py +643 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +376 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +214 -0
- autogen/agents/experimental/document_agent/parser_utils.py +134 -0
- autogen/agents/experimental/document_agent/url_utils.py +417 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +76 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +70 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +88 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +71 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +97 -0
- autogen/cache/in_memory_cache.py +54 -0
- autogen/cache/redis_cache.py +119 -0
- autogen/code_utils.py +598 -0
- autogen/coding/__init__.py +30 -0
- autogen/coding/base.py +120 -0
- autogen/coding/docker_commandline_code_executor.py +283 -0
- autogen/coding/factory.py +56 -0
- autogen/coding/func_with_reqs.py +203 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +160 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +224 -0
- autogen/coding/jupyter/jupyter_code_executor.py +154 -0
- autogen/coding/jupyter/local_jupyter_server.py +164 -0
- autogen/coding/local_commandline_code_executor.py +341 -0
- autogen/coding/markdown_code_extractor.py +44 -0
- autogen/coding/utils.py +55 -0
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/doc_utils.py +35 -0
- autogen/environments/__init__.py +10 -0
- autogen/environments/docker_python_environment.py +365 -0
- autogen/environments/python_environment.py +125 -0
- autogen/environments/system_python_environment.py +85 -0
- autogen/environments/venv_python_environment.py +220 -0
- autogen/environments/working_directory.py +74 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1016 -0
- autogen/events/base_event.py +100 -0
- autogen/events/client_events.py +168 -0
- autogen/events/helpers.py +44 -0
- autogen/events/print_event.py +45 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +75 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +206 -0
- autogen/fast_depends/core/model.py +527 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +30 -0
- autogen/fast_depends/dependencies/provider.py +40 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +272 -0
- autogen/fast_depends/utils.py +177 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +173 -0
- autogen/import_utils.py +539 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +156 -0
- autogen/interop/langchain/langchain_tool.py +78 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +178 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +172 -0
- autogen/interop/registry.py +70 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +61 -0
- autogen/io/run_response.py +294 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +214 -0
- autogen/json_utils.py +42 -0
- autogen/llm_clients/MIGRATION_TO_V2.md +782 -0
- autogen/llm_clients/__init__.py +77 -0
- autogen/llm_clients/client_v2.py +122 -0
- autogen/llm_clients/models/__init__.py +55 -0
- autogen/llm_clients/models/content_blocks.py +389 -0
- autogen/llm_clients/models/unified_message.py +145 -0
- autogen/llm_clients/models/unified_response.py +83 -0
- autogen/llm_clients/openai_completions_client.py +444 -0
- autogen/llm_config/__init__.py +11 -0
- autogen/llm_config/client.py +59 -0
- autogen/llm_config/config.py +461 -0
- autogen/llm_config/entry.py +169 -0
- autogen/llm_config/types.py +37 -0
- autogen/llm_config/utils.py +223 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +129 -0
- autogen/logger/file_logger.py +262 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +524 -0
- autogen/math_utils.py +338 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/helpers.py +45 -0
- autogen/mcp/mcp_client.py +349 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +62 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +577 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +166 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +110 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +399 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +946 -0
- autogen/messages/base_message.py +108 -0
- autogen/messages/client_messages.py +172 -0
- autogen/messages/print_message.py +48 -0
- autogen/oai/__init__.py +61 -0
- autogen/oai/anthropic.py +1516 -0
- autogen/oai/bedrock.py +800 -0
- autogen/oai/cerebras.py +302 -0
- autogen/oai/client.py +1658 -0
- autogen/oai/client_utils.py +196 -0
- autogen/oai/cohere.py +494 -0
- autogen/oai/gemini.py +1045 -0
- autogen/oai/gemini_types.py +156 -0
- autogen/oai/groq.py +319 -0
- autogen/oai/mistral.py +311 -0
- autogen/oai/oai_models/__init__.py +23 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +86 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +97 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +60 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +62 -0
- autogen/oai/oai_models/completion_usage.py +59 -0
- autogen/oai/ollama.py +657 -0
- autogen/oai/openai_responses.py +451 -0
- autogen/oai/openai_utils.py +897 -0
- autogen/oai/together.py +387 -0
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +197 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/retrieve_utils.py +490 -0
- autogen/runtime_logging.py +161 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/token_count_utils.py +280 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +40 -0
- autogen/tools/dependency_injection.py +249 -0
- autogen/tools/experimental/__init__.py +54 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +154 -0
- autogen/tools/experimental/code_execution/__init__.py +7 -0
- autogen/tools/experimental/code_execution/python_code_execution.py +86 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +150 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +329 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +103 -0
- autogen/tools/experimental/firecrawl/__init__.py +7 -0
- autogen/tools/experimental/firecrawl/firecrawl_tool.py +836 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +284 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +385 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +271 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +249 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1311 -0
- autogen/tools/experimental/searxng/__init__.py +7 -0
- autogen/tools/experimental/searxng/searxng_search.py +142 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +176 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +120 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +284 -0
- autogen/tools/function_utils.py +412 -0
- autogen/tools/tool.py +188 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +72 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
Unified message format supporting all provider features.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from enum import Enum
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from pydantic import BaseModel, Field
|
|
13
|
+
|
|
14
|
+
from .content_blocks import (
|
|
15
|
+
BaseContent,
|
|
16
|
+
CitationContent,
|
|
17
|
+
ContentBlock,
|
|
18
|
+
ReasoningContent,
|
|
19
|
+
ToolCallContent,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class UserRoleEnum(str, Enum):
|
|
24
|
+
"""Standard message roles with strict typing."""
|
|
25
|
+
|
|
26
|
+
USER = "user"
|
|
27
|
+
ASSISTANT = "assistant"
|
|
28
|
+
SYSTEM = "system"
|
|
29
|
+
TOOL = "tool"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# Union type: strict typing for known roles, flexible string for unknown
|
|
33
|
+
UserRoleType = UserRoleEnum | str
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def normalize_role(role: str | None) -> UserRoleType:
|
|
37
|
+
"""
|
|
38
|
+
Normalize role string to UserRoleEnum for known roles, or return as-is for unknown roles.
|
|
39
|
+
|
|
40
|
+
This function converts standard role strings to type-safe UserRoleEnum values while
|
|
41
|
+
preserving unknown/custom roles as plain strings for forward compatibility.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
role: Role string from API response (e.g., "user", "assistant", "system", "tool")
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
UserRoleEnum for known roles, or original string for unknown/custom roles
|
|
48
|
+
|
|
49
|
+
Examples:
|
|
50
|
+
>>> normalize_role("user")
|
|
51
|
+
UserRoleEnum.USER
|
|
52
|
+
>>> normalize_role("assistant")
|
|
53
|
+
UserRoleEnum.ASSISTANT
|
|
54
|
+
>>> normalize_role("custom_role")
|
|
55
|
+
"custom_role"
|
|
56
|
+
"""
|
|
57
|
+
if not role:
|
|
58
|
+
return UserRoleEnum.ASSISTANT # Default fallback
|
|
59
|
+
|
|
60
|
+
# Map string roles to enum values
|
|
61
|
+
role_mapping = {
|
|
62
|
+
"user": UserRoleEnum.USER,
|
|
63
|
+
"assistant": UserRoleEnum.ASSISTANT,
|
|
64
|
+
"system": UserRoleEnum.SYSTEM,
|
|
65
|
+
"tool": UserRoleEnum.TOOL,
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
# Return enum for known roles, original string for unknown roles
|
|
69
|
+
return role_mapping.get(role.lower(), role)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class UnifiedMessage(BaseModel):
|
|
73
|
+
"""Unified message format supporting all provider features.
|
|
74
|
+
|
|
75
|
+
This message format can represent:
|
|
76
|
+
- Text, images, audio, video
|
|
77
|
+
- Reasoning blocks (OpenAI o1/o3, Anthropic)
|
|
78
|
+
- Citations (web search results)
|
|
79
|
+
- Tool calls and results
|
|
80
|
+
- Any future content types via GenericContent
|
|
81
|
+
- Any future role types via extensible role field
|
|
82
|
+
|
|
83
|
+
The role field uses UserRoleType which provides:
|
|
84
|
+
- Type-safe enum values for standard roles (UserRoleEnum.USER, etc.)
|
|
85
|
+
- String literal typing for known roles ("user", "assistant", "system", "tool")
|
|
86
|
+
- Flexible string fallback for unknown/future provider-specific roles
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
role: UserRoleType # Type-safe for known roles, flexible for unknown
|
|
90
|
+
content: list[ContentBlock] # Rich, typed content blocks
|
|
91
|
+
|
|
92
|
+
# Metadata
|
|
93
|
+
name: str | None = None
|
|
94
|
+
metadata: dict[str, Any] = Field(default_factory=dict) # Provider-specific extras
|
|
95
|
+
|
|
96
|
+
def get_text(self) -> str:
|
|
97
|
+
"""Extract all text content as string.
|
|
98
|
+
|
|
99
|
+
Uses the get_text() method of each content block for unified text extraction.
|
|
100
|
+
"""
|
|
101
|
+
text_parts = []
|
|
102
|
+
for block in self.content:
|
|
103
|
+
block_text = block.get_text()
|
|
104
|
+
if block_text: # Only include non-empty text
|
|
105
|
+
text_parts.append(block_text)
|
|
106
|
+
|
|
107
|
+
return " ".join(text_parts)
|
|
108
|
+
|
|
109
|
+
def get_reasoning(self) -> list[ReasoningContent]:
|
|
110
|
+
"""Extract reasoning blocks."""
|
|
111
|
+
return [b for b in self.content if isinstance(b, ReasoningContent)]
|
|
112
|
+
|
|
113
|
+
def get_citations(self) -> list[CitationContent]:
|
|
114
|
+
"""Extract citations."""
|
|
115
|
+
return [b for b in self.content if isinstance(b, CitationContent)]
|
|
116
|
+
|
|
117
|
+
def get_tool_calls(self) -> list[ToolCallContent]:
|
|
118
|
+
"""Extract tool calls."""
|
|
119
|
+
return [b for b in self.content if isinstance(b, ToolCallContent)]
|
|
120
|
+
|
|
121
|
+
def get_content_by_type(self, content_type: str) -> list[BaseContent]:
|
|
122
|
+
"""Get all content blocks of a specific type.
|
|
123
|
+
|
|
124
|
+
This is especially useful for unknown types handled by GenericContent.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
content_type: The type string to filter by (e.g., "text", "reasoning", "reflection")
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
List of content blocks matching the type
|
|
131
|
+
"""
|
|
132
|
+
return [b for b in self.content if b.type == content_type]
|
|
133
|
+
|
|
134
|
+
def is_standard_role(self) -> bool:
|
|
135
|
+
"""Check if this message uses a standard role.
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
True if role is one of the standard roles (user, assistant, system, tool),
|
|
139
|
+
False if it's a custom/future role
|
|
140
|
+
"""
|
|
141
|
+
# Handle both UserRoleEnum and string types
|
|
142
|
+
if isinstance(self.role, UserRoleEnum):
|
|
143
|
+
return True # All enum values are standard roles
|
|
144
|
+
# Check if string role matches any enum value
|
|
145
|
+
return self.role in [e.value for e in UserRoleEnum]
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
Unified response format for all LLM providers.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Any, ClassVar
|
|
10
|
+
|
|
11
|
+
from pydantic import BaseModel, Field
|
|
12
|
+
|
|
13
|
+
from .content_blocks import BaseContent, ReasoningContent
|
|
14
|
+
from .unified_message import UnifiedMessage
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class UnifiedResponse(BaseModel):
|
|
18
|
+
"""Provider-agnostic response format.
|
|
19
|
+
|
|
20
|
+
This response format can represent responses from any LLM provider while
|
|
21
|
+
preserving all provider-specific features (reasoning, citations, etc.).
|
|
22
|
+
|
|
23
|
+
Features:
|
|
24
|
+
- Provider agnostic (OpenAI, Anthropic, Gemini, etc.)
|
|
25
|
+
- Rich content blocks (text, images, reasoning, citations)
|
|
26
|
+
- Usage tracking and cost calculation
|
|
27
|
+
- Provider-specific metadata preservation
|
|
28
|
+
- Serializable (no attached functions)
|
|
29
|
+
- Extensible status field for provider-specific statuses
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
# Known standard status values (for reference)
|
|
33
|
+
STANDARD_STATUSES: ClassVar[list[str]] = ["completed", "in_progress", "failed"]
|
|
34
|
+
|
|
35
|
+
id: str
|
|
36
|
+
model: str
|
|
37
|
+
messages: list[UnifiedMessage]
|
|
38
|
+
|
|
39
|
+
# Usage tracking
|
|
40
|
+
usage: dict[str, int] = Field(default_factory=dict) # prompt_tokens, completion_tokens, etc.
|
|
41
|
+
cost: float | None = None
|
|
42
|
+
|
|
43
|
+
# Provider-specific
|
|
44
|
+
provider: str # "openai", "anthropic", "gemini", etc.
|
|
45
|
+
provider_metadata: dict[str, Any] = Field(default_factory=dict) # Raw provider data if needed
|
|
46
|
+
|
|
47
|
+
# Status - extensible to support provider-specific status values
|
|
48
|
+
finish_reason: str | None = None
|
|
49
|
+
status: str | None = None # Extensible - accepts any string, standard: "completed", "in_progress", "failed"
|
|
50
|
+
|
|
51
|
+
@property
|
|
52
|
+
def text(self) -> str:
|
|
53
|
+
"""Quick access to text content from all messages."""
|
|
54
|
+
if self.messages:
|
|
55
|
+
return " ".join([msg.get_text() for msg in self.messages])
|
|
56
|
+
return ""
|
|
57
|
+
|
|
58
|
+
@property
|
|
59
|
+
def reasoning(self) -> list[ReasoningContent]:
|
|
60
|
+
"""Quick access to reasoning blocks from all messages."""
|
|
61
|
+
return [block for msg in self.messages for block in msg.get_reasoning()]
|
|
62
|
+
|
|
63
|
+
def get_content_by_type(self, content_type: str) -> list[BaseContent]:
|
|
64
|
+
"""Get all content blocks of a specific type across all messages.
|
|
65
|
+
|
|
66
|
+
This is especially useful for unknown types handled by GenericContent.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
content_type: The type string to filter by (e.g., "text", "reasoning", "reflection")
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
List of content blocks matching the type across all messages
|
|
73
|
+
"""
|
|
74
|
+
return [block for msg in self.messages for block in msg.get_content_by_type(content_type)]
|
|
75
|
+
|
|
76
|
+
def is_standard_status(self) -> bool:
|
|
77
|
+
"""Check if this response uses a standard status value.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
True if status is one of the standard statuses (completed, in_progress, failed),
|
|
81
|
+
False if it's a custom/future status or None
|
|
82
|
+
"""
|
|
83
|
+
return self.status in self.STANDARD_STATUSES if self.status else False
|
|
@@ -0,0 +1,444 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
OpenAI Chat Completions API Client implementing ModelClientV2 and ModelClient protocols.
|
|
7
|
+
|
|
8
|
+
This client handles the OpenAI Chat Completions API (client.chat.completions.create)
|
|
9
|
+
which returns rich responses with:
|
|
10
|
+
- Reasoning blocks (o1, o3 models with 'reasoning' field)
|
|
11
|
+
- Tool calls and function execution
|
|
12
|
+
- Multimodal content (text, images)
|
|
13
|
+
- Standard chat messages
|
|
14
|
+
|
|
15
|
+
The client preserves all provider-specific features in UnifiedResponse format
|
|
16
|
+
and is compatible with AG2's agent system through ModelClient protocol.
|
|
17
|
+
|
|
18
|
+
Note: This uses the Chat Completions API, NOT the newer Responses API (client.responses.create).
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from typing import Any
|
|
22
|
+
|
|
23
|
+
from autogen.import_utils import optional_import_block
|
|
24
|
+
|
|
25
|
+
with optional_import_block() as openai_result:
|
|
26
|
+
from openai import OpenAI
|
|
27
|
+
|
|
28
|
+
if openai_result.is_successful:
|
|
29
|
+
openai_import_exception: ImportError | None = None
|
|
30
|
+
else:
|
|
31
|
+
OpenAI = None # type: ignore[assignment]
|
|
32
|
+
openai_import_exception = ImportError(
|
|
33
|
+
"Please install openai to use OpenAICompletionsClient. Install with: pip install openai"
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
from ..llm_config.client import ModelClient
|
|
37
|
+
from .models import (
|
|
38
|
+
CitationContent,
|
|
39
|
+
GenericContent,
|
|
40
|
+
ReasoningContent,
|
|
41
|
+
TextContent,
|
|
42
|
+
ToolCallContent,
|
|
43
|
+
UnifiedMessage,
|
|
44
|
+
UnifiedResponse,
|
|
45
|
+
UserRoleEnum,
|
|
46
|
+
normalize_role,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class OpenAICompletionsClient(ModelClient):
|
|
51
|
+
"""
|
|
52
|
+
OpenAI Chat Completions API client implementing ModelClientV2 protocol.
|
|
53
|
+
|
|
54
|
+
This client works with OpenAI's Chat Completions API (client.chat.completions.create)
|
|
55
|
+
which returns structured output with reasoning blocks (o1/o3 models), tool calls, and more.
|
|
56
|
+
|
|
57
|
+
Key Features:
|
|
58
|
+
- Preserves reasoning blocks as ReasoningContent (o1/o3 models)
|
|
59
|
+
- Handles tool calls and results
|
|
60
|
+
- Supports multimodal content
|
|
61
|
+
- Provides backward compatibility via create_v1_compatible()
|
|
62
|
+
|
|
63
|
+
Example:
|
|
64
|
+
client = OpenAICompletionsClient(api_key="...")
|
|
65
|
+
|
|
66
|
+
# Get rich response with reasoning
|
|
67
|
+
response = client.create({
|
|
68
|
+
"model": "o1-preview",
|
|
69
|
+
"messages": [{"role": "user", "content": "Explain quantum computing"}]
|
|
70
|
+
})
|
|
71
|
+
|
|
72
|
+
# Access reasoning blocks
|
|
73
|
+
for reasoning in response.reasoning:
|
|
74
|
+
print(f"Reasoning: {reasoning.reasoning}")
|
|
75
|
+
|
|
76
|
+
# Get text response
|
|
77
|
+
print(f"Answer: {response.text}")
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
RESPONSE_USAGE_KEYS: list[str] = ["prompt_tokens", "completion_tokens", "total_tokens", "cost", "model"]
|
|
81
|
+
|
|
82
|
+
def __init__(
|
|
83
|
+
self,
|
|
84
|
+
api_key: str | None = None,
|
|
85
|
+
base_url: str | None = None,
|
|
86
|
+
timeout: float = 60.0,
|
|
87
|
+
response_format: Any = None,
|
|
88
|
+
**kwargs: Any,
|
|
89
|
+
):
|
|
90
|
+
"""
|
|
91
|
+
Initialize OpenAI Chat Completions API client.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
api_key: OpenAI API key (or set OPENAI_API_KEY env var)
|
|
95
|
+
base_url: Custom base URL for OpenAI API
|
|
96
|
+
timeout: Request timeout in seconds
|
|
97
|
+
response_format: Optional response format (Pydantic model or JSON schema)
|
|
98
|
+
**kwargs: Additional arguments passed to OpenAI client
|
|
99
|
+
"""
|
|
100
|
+
if openai_import_exception is not None:
|
|
101
|
+
raise openai_import_exception
|
|
102
|
+
|
|
103
|
+
self.client = OpenAI(api_key=api_key, base_url=base_url, timeout=timeout, **kwargs) # type: ignore[misc]
|
|
104
|
+
self._default_response_format = response_format
|
|
105
|
+
self._cost_per_token = {
|
|
106
|
+
# GPT-5 series - Latest flagship models (per million tokens)
|
|
107
|
+
"gpt-5": {"prompt": 1.25 / 1_000_000, "completion": 10.00 / 1_000_000},
|
|
108
|
+
"gpt-5-mini": {"prompt": 0.25 / 1_000_000, "completion": 2.00 / 1_000_000},
|
|
109
|
+
"gpt-5-nano": {"prompt": 0.05 / 1_000_000, "completion": 0.40 / 1_000_000},
|
|
110
|
+
# GPT-4o series - Multimodal flagship (per million tokens)
|
|
111
|
+
"gpt-4o": {"prompt": 2.50 / 1_000_000, "completion": 10.00 / 1_000_000},
|
|
112
|
+
"gpt-4o-mini": {"prompt": 0.15 / 1_000_000, "completion": 0.60 / 1_000_000},
|
|
113
|
+
# GPT-4 Turbo (per million tokens)
|
|
114
|
+
"gpt-4-turbo": {"prompt": 10.00 / 1_000_000, "completion": 30.00 / 1_000_000},
|
|
115
|
+
# GPT-4 legacy (per million tokens)
|
|
116
|
+
"gpt-4": {"prompt": 10.00 / 1_000_000, "completion": 30.00 / 1_000_000},
|
|
117
|
+
# GPT-3.5 Turbo (per million tokens)
|
|
118
|
+
"gpt-3.5-turbo": {"prompt": 0.50 / 1_000_000, "completion": 1.50 / 1_000_000},
|
|
119
|
+
# o1 series - Reasoning models (keep existing if still valid)
|
|
120
|
+
"o1-preview": {"prompt": 0.015 / 1000, "completion": 0.060 / 1000},
|
|
121
|
+
"o1-mini": {"prompt": 0.003 / 1000, "completion": 0.012 / 1000},
|
|
122
|
+
"o3-mini": {"prompt": 0.003 / 1000, "completion": 0.012 / 1000},
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
def create(self, params: dict[str, Any]) -> UnifiedResponse: # type: ignore[override]
|
|
126
|
+
"""
|
|
127
|
+
Create a completion and return UnifiedResponse with all features preserved.
|
|
128
|
+
|
|
129
|
+
This method implements ModelClient.create() but returns UnifiedResponse instead
|
|
130
|
+
of ModelClientResponseProtocol. The rich UnifiedResponse structure is compatible
|
|
131
|
+
via duck typing - it has .model attribute and works with message_retrieval().
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
params: Request parameters including:
|
|
135
|
+
- model: Model name (e.g., "o1-preview")
|
|
136
|
+
- messages: List of message dicts
|
|
137
|
+
- temperature: Optional temperature (not supported by o1 models)
|
|
138
|
+
- max_tokens: Optional max completion tokens
|
|
139
|
+
- tools: Optional tool definitions
|
|
140
|
+
- response_format: Optional Pydantic BaseModel or JSON schema dict
|
|
141
|
+
- **other OpenAI parameters
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
UnifiedResponse with reasoning blocks, citations, and all content preserved
|
|
145
|
+
"""
|
|
146
|
+
# Merge default response_format if not already in params
|
|
147
|
+
if self._default_response_format is not None and "response_format" not in params:
|
|
148
|
+
params = params.copy()
|
|
149
|
+
params["response_format"] = self._default_response_format
|
|
150
|
+
|
|
151
|
+
# Check if response_format is a Pydantic BaseModel
|
|
152
|
+
response_format = params.get("response_format")
|
|
153
|
+
use_parse = self._is_pydantic_model(response_format)
|
|
154
|
+
|
|
155
|
+
# Call OpenAI API - use parse() for Pydantic models, create() otherwise
|
|
156
|
+
if use_parse:
|
|
157
|
+
# parse() doesn't support stream parameter - remove it if present
|
|
158
|
+
parse_params = params.copy()
|
|
159
|
+
parse_params.pop("stream", None)
|
|
160
|
+
response = self.client.chat.completions.parse(**parse_params)
|
|
161
|
+
else:
|
|
162
|
+
response = self.client.chat.completions.create(**params)
|
|
163
|
+
|
|
164
|
+
# Transform to UnifiedResponse
|
|
165
|
+
return self._transform_response(response, params.get("model", "unknown"), use_parse=use_parse)
|
|
166
|
+
|
|
167
|
+
def _is_pydantic_model(self, obj: Any) -> bool:
|
|
168
|
+
"""
|
|
169
|
+
Check if object is a Pydantic BaseModel class.
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
obj: Object to check
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
True if obj is a Pydantic BaseModel class (not instance)
|
|
176
|
+
"""
|
|
177
|
+
try:
|
|
178
|
+
import inspect
|
|
179
|
+
|
|
180
|
+
from pydantic import BaseModel
|
|
181
|
+
|
|
182
|
+
return inspect.isclass(obj) and issubclass(obj, BaseModel)
|
|
183
|
+
except (ImportError, TypeError):
|
|
184
|
+
return False
|
|
185
|
+
|
|
186
|
+
def _transform_response(self, openai_response: Any, model: str, use_parse: bool = False) -> UnifiedResponse:
|
|
187
|
+
"""
|
|
188
|
+
Transform OpenAI ChatCompletion response to UnifiedResponse.
|
|
189
|
+
|
|
190
|
+
This handles the standard ChatCompletion format including o1/o3 models
|
|
191
|
+
which include a 'reasoning' field in the message object.
|
|
192
|
+
|
|
193
|
+
Content handling:
|
|
194
|
+
- Text content → TextContent
|
|
195
|
+
- Reasoning blocks (o1/o3 models) → ReasoningContent
|
|
196
|
+
- Tool calls → ToolCallContent
|
|
197
|
+
- Parsed Pydantic objects (when use_parse=True) → GenericContent with 'parsed' type
|
|
198
|
+
- Refusals (when use_parse=True) → GenericContent with 'refusal' type
|
|
199
|
+
- Unknown message fields → GenericContent (forward compatibility)
|
|
200
|
+
|
|
201
|
+
This ensures that new OpenAI features are preserved even if we don't have
|
|
202
|
+
specific content types defined yet.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
openai_response: Raw OpenAI API response (from create() or parse())
|
|
206
|
+
model: Model name
|
|
207
|
+
use_parse: Whether response came from parse() method (has .parsed field)
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
UnifiedResponse with all content blocks properly typed
|
|
211
|
+
"""
|
|
212
|
+
messages = []
|
|
213
|
+
|
|
214
|
+
# Process each choice
|
|
215
|
+
for choice in openai_response.choices:
|
|
216
|
+
content_blocks = []
|
|
217
|
+
message_obj = choice.message
|
|
218
|
+
|
|
219
|
+
# Extract reasoning if present (o1/o3 models)
|
|
220
|
+
if getattr(message_obj, "reasoning", None):
|
|
221
|
+
content_blocks.append(
|
|
222
|
+
ReasoningContent(
|
|
223
|
+
reasoning=message_obj.reasoning,
|
|
224
|
+
summary=None,
|
|
225
|
+
)
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
# Extract parsed Pydantic object if present (from parse() method)
|
|
229
|
+
if use_parse and getattr(message_obj, "parsed", None):
|
|
230
|
+
# Store parsed object as GenericContent to preserve it
|
|
231
|
+
parsed_obj = message_obj.parsed
|
|
232
|
+
# Convert to dict for storage
|
|
233
|
+
if hasattr(parsed_obj, "model_dump"):
|
|
234
|
+
parsed_dict = parsed_obj.model_dump()
|
|
235
|
+
elif hasattr(parsed_obj, "dict"):
|
|
236
|
+
parsed_dict = parsed_obj.dict()
|
|
237
|
+
else:
|
|
238
|
+
parsed_dict = {"value": str(parsed_obj)}
|
|
239
|
+
|
|
240
|
+
content_blocks.append(GenericContent(type="parsed", parsed=parsed_dict))
|
|
241
|
+
|
|
242
|
+
# Extract refusal if present (from parse() method)
|
|
243
|
+
if use_parse and getattr(message_obj, "refusal", None):
|
|
244
|
+
content_blocks.append(GenericContent(type="refusal", refusal=message_obj.refusal))
|
|
245
|
+
|
|
246
|
+
# Extract text content
|
|
247
|
+
# Note: OpenAI Chat Completions API always returns content as str, never list
|
|
248
|
+
# (List content is only used in REQUEST messages for multimodal inputs)
|
|
249
|
+
if message_obj.content:
|
|
250
|
+
content_blocks.append(TextContent(text=message_obj.content))
|
|
251
|
+
|
|
252
|
+
# Extract tool calls
|
|
253
|
+
if getattr(message_obj, "tool_calls", None):
|
|
254
|
+
for tool_call in message_obj.tool_calls:
|
|
255
|
+
content_blocks.append(
|
|
256
|
+
ToolCallContent(
|
|
257
|
+
id=tool_call.id,
|
|
258
|
+
name=tool_call.function.name,
|
|
259
|
+
arguments=tool_call.function.arguments,
|
|
260
|
+
)
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
# Extract citations if present (future-proofing)
|
|
264
|
+
# Note: Not currently available in Chat Completions API
|
|
265
|
+
if getattr(message_obj, "citations", None):
|
|
266
|
+
for citation in message_obj.citations:
|
|
267
|
+
content_blocks.append(
|
|
268
|
+
CitationContent(
|
|
269
|
+
url=citation.get("url", ""),
|
|
270
|
+
title=citation.get("title", ""),
|
|
271
|
+
snippet=citation.get("snippet", ""),
|
|
272
|
+
relevance_score=citation.get("relevance_score"),
|
|
273
|
+
)
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# Handle any other unknown fields from OpenAI response as GenericContent
|
|
277
|
+
# This ensures forward compatibility with new OpenAI features
|
|
278
|
+
known_fields = {
|
|
279
|
+
"role",
|
|
280
|
+
"content",
|
|
281
|
+
"reasoning",
|
|
282
|
+
"tool_calls",
|
|
283
|
+
"citations",
|
|
284
|
+
"name",
|
|
285
|
+
"function_call",
|
|
286
|
+
"parsed",
|
|
287
|
+
"refusal",
|
|
288
|
+
}
|
|
289
|
+
message_dict = message_obj.model_dump() if hasattr(message_obj, "model_dump") else {}
|
|
290
|
+
for field_name, field_value in message_dict.items():
|
|
291
|
+
if field_name not in known_fields and field_value is not None:
|
|
292
|
+
# Create GenericContent for unknown field
|
|
293
|
+
content_blocks.append(GenericContent(type=field_name, **{field_name: field_value}))
|
|
294
|
+
|
|
295
|
+
# Create unified message with normalized role (convert to UserRoleEnum for known roles)
|
|
296
|
+
messages.append(
|
|
297
|
+
UnifiedMessage(
|
|
298
|
+
role=normalize_role(message_obj.role),
|
|
299
|
+
content=content_blocks,
|
|
300
|
+
name=getattr(message_obj, "name", None),
|
|
301
|
+
)
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
# Extract usage information
|
|
305
|
+
usage = {}
|
|
306
|
+
if getattr(openai_response, "usage", None):
|
|
307
|
+
usage = {
|
|
308
|
+
"prompt_tokens": openai_response.usage.prompt_tokens,
|
|
309
|
+
"completion_tokens": openai_response.usage.completion_tokens,
|
|
310
|
+
"total_tokens": openai_response.usage.total_tokens,
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
# Build UnifiedResponse
|
|
314
|
+
unified_response = UnifiedResponse(
|
|
315
|
+
id=openai_response.id,
|
|
316
|
+
model=openai_response.model,
|
|
317
|
+
provider="openai",
|
|
318
|
+
messages=messages,
|
|
319
|
+
usage=usage,
|
|
320
|
+
finish_reason=openai_response.choices[0].finish_reason if openai_response.choices else None,
|
|
321
|
+
status="completed",
|
|
322
|
+
provider_metadata={
|
|
323
|
+
"created": getattr(openai_response, "created", None),
|
|
324
|
+
"system_fingerprint": getattr(openai_response, "system_fingerprint", None),
|
|
325
|
+
"service_tier": getattr(openai_response, "service_tier", None),
|
|
326
|
+
},
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
# Calculate cost
|
|
330
|
+
unified_response.cost = self.cost(unified_response)
|
|
331
|
+
|
|
332
|
+
return unified_response
|
|
333
|
+
|
|
334
|
+
def create_v1_compatible(self, params: dict[str, Any]) -> Any:
|
|
335
|
+
"""
|
|
336
|
+
Create completion in backward-compatible ChatCompletionExtended format.
|
|
337
|
+
|
|
338
|
+
This method provides compatibility with existing AG2 code that expects
|
|
339
|
+
ChatCompletionExtended format. Note that reasoning blocks and citations
|
|
340
|
+
will be lost in this format.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
params: Same parameters as create()
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
ChatCompletionExtended-compatible dict (flattened response)
|
|
347
|
+
|
|
348
|
+
Warning:
|
|
349
|
+
This method loses information (reasoning blocks, citations) when
|
|
350
|
+
converting to the legacy format. Prefer create() for new code.
|
|
351
|
+
"""
|
|
352
|
+
# Get rich response
|
|
353
|
+
unified_response = self.create(params)
|
|
354
|
+
|
|
355
|
+
# Convert to legacy format (simplified - would need full ChatCompletionExtended in practice)
|
|
356
|
+
# Extract role and convert UserRoleEnum to string
|
|
357
|
+
role = unified_response.messages[0].role if unified_response.messages else UserRoleEnum.ASSISTANT
|
|
358
|
+
role_str = role.value if isinstance(role, UserRoleEnum) else role
|
|
359
|
+
|
|
360
|
+
return {
|
|
361
|
+
"id": unified_response.id,
|
|
362
|
+
"model": unified_response.model,
|
|
363
|
+
"created": unified_response.provider_metadata.get("created"),
|
|
364
|
+
"object": "chat.completion",
|
|
365
|
+
"choices": [
|
|
366
|
+
{
|
|
367
|
+
"index": 0,
|
|
368
|
+
"message": {
|
|
369
|
+
"role": role_str,
|
|
370
|
+
"content": unified_response.text,
|
|
371
|
+
},
|
|
372
|
+
"finish_reason": unified_response.finish_reason,
|
|
373
|
+
}
|
|
374
|
+
],
|
|
375
|
+
"usage": unified_response.usage,
|
|
376
|
+
"cost": unified_response.cost,
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
def cost(self, response: UnifiedResponse) -> float: # type: ignore[override]
|
|
380
|
+
"""
|
|
381
|
+
Calculate cost from response usage.
|
|
382
|
+
|
|
383
|
+
Implements ModelClient.cost() but accepts UnifiedResponse via duck typing.
|
|
384
|
+
|
|
385
|
+
Args:
|
|
386
|
+
response: UnifiedResponse with usage information
|
|
387
|
+
|
|
388
|
+
Returns:
|
|
389
|
+
Cost in USD for the API call
|
|
390
|
+
"""
|
|
391
|
+
if not response.usage:
|
|
392
|
+
return 0.0
|
|
393
|
+
|
|
394
|
+
model = response.model
|
|
395
|
+
prompt_tokens = response.usage.get("prompt_tokens", 0)
|
|
396
|
+
completion_tokens = response.usage.get("completion_tokens", 0)
|
|
397
|
+
|
|
398
|
+
# Find pricing for model (exact match or prefix)
|
|
399
|
+
pricing = None
|
|
400
|
+
for model_key in self._cost_per_token:
|
|
401
|
+
if model.startswith(model_key):
|
|
402
|
+
pricing = self._cost_per_token[model_key]
|
|
403
|
+
break
|
|
404
|
+
|
|
405
|
+
if not pricing:
|
|
406
|
+
# Unknown model - use default pricing (GPT-4 Turbo level, per million tokens)
|
|
407
|
+
pricing = {"prompt": 10.00 / 1_000_000, "completion": 30.00 / 1_000_000}
|
|
408
|
+
|
|
409
|
+
return (prompt_tokens * pricing["prompt"]) + (completion_tokens * pricing["completion"])
|
|
410
|
+
|
|
411
|
+
@staticmethod
|
|
412
|
+
def get_usage(response: UnifiedResponse) -> dict[str, Any]: # type: ignore[override]
|
|
413
|
+
"""
|
|
414
|
+
Extract usage statistics from response.
|
|
415
|
+
|
|
416
|
+
Implements ModelClient.get_usage() but accepts UnifiedResponse via duck typing.
|
|
417
|
+
|
|
418
|
+
Args:
|
|
419
|
+
response: UnifiedResponse from create()
|
|
420
|
+
|
|
421
|
+
Returns:
|
|
422
|
+
Dict with keys from RESPONSE_USAGE_KEYS
|
|
423
|
+
"""
|
|
424
|
+
return {
|
|
425
|
+
"prompt_tokens": response.usage.get("prompt_tokens", 0),
|
|
426
|
+
"completion_tokens": response.usage.get("completion_tokens", 0),
|
|
427
|
+
"total_tokens": response.usage.get("total_tokens", 0),
|
|
428
|
+
"cost": response.cost or 0.0,
|
|
429
|
+
"model": response.model,
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
def message_retrieval(self, response: UnifiedResponse) -> list[str]: # type: ignore[override]
|
|
433
|
+
"""
|
|
434
|
+
Retrieve text content from response messages.
|
|
435
|
+
|
|
436
|
+
Implements ModelClient.message_retrieval() but accepts UnifiedResponse via duck typing.
|
|
437
|
+
|
|
438
|
+
Args:
|
|
439
|
+
response: UnifiedResponse from create()
|
|
440
|
+
|
|
441
|
+
Returns:
|
|
442
|
+
List of text strings from message content blocks
|
|
443
|
+
"""
|
|
444
|
+
return [msg.get_text() for msg in response.messages]
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from .client import ModelClient
|
|
6
|
+
from .config import LLMConfig
|
|
7
|
+
|
|
8
|
+
__all__ = (
|
|
9
|
+
"LLMConfig",
|
|
10
|
+
"ModelClient",
|
|
11
|
+
)
|