ag2 0.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ag2-0.10.2.dist-info/METADATA +819 -0
- ag2-0.10.2.dist-info/RECORD +423 -0
- ag2-0.10.2.dist-info/WHEEL +4 -0
- ag2-0.10.2.dist-info/licenses/LICENSE +201 -0
- ag2-0.10.2.dist-info/licenses/NOTICE.md +19 -0
- autogen/__init__.py +88 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +426 -0
- autogen/_website/generate_mkdocs.py +1216 -0
- autogen/_website/notebook_processor.py +475 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +413 -0
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +86 -0
- autogen/a2a/client.py +357 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +207 -0
- autogen/agentchat/__init__.py +47 -0
- autogen/agentchat/agent.py +180 -0
- autogen/agentchat/assistant_agent.py +86 -0
- autogen/agentchat/chat.py +325 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +432 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +578 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +215 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +514 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +334 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +167 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +263 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +189 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +268 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +195 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +319 -0
- autogen/agentchat/contrib/rag/query_engine.py +76 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +59 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +704 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +200 -0
- autogen/agentchat/contrib/swarm_agent.py +1404 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +224 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +316 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +405 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +551 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +927 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +304 -0
- autogen/agentchat/conversable_agent.py +4307 -0
- autogen/agentchat/group/__init__.py +67 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +39 -0
- autogen/agentchat/group/context_variables.py +182 -0
- autogen/agentchat/group/events/transition_events.py +111 -0
- autogen/agentchat/group/group_tool_executor.py +324 -0
- autogen/agentchat/group/group_utils.py +659 -0
- autogen/agentchat/group/guardrails.py +179 -0
- autogen/agentchat/group/handoffs.py +303 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +291 -0
- autogen/agentchat/group/on_condition.py +55 -0
- autogen/agentchat/group/on_context_condition.py +51 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +160 -0
- autogen/agentchat/group/patterns/manual.py +177 -0
- autogen/agentchat/group/patterns/pattern.py +295 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +24 -0
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +140 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/function_target.py +245 -0
- autogen/agentchat/group/targets/group_chat_target.py +133 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +424 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1832 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +191 -0
- autogen/agentchat/realtime/experimental/function_observer.py +84 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +533 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +114 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +74 -0
- autogen/agents/contrib/time/time_tool_agent.py +52 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +301 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +113 -0
- autogen/agents/experimental/document_agent/document_agent.py +643 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +376 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +214 -0
- autogen/agents/experimental/document_agent/parser_utils.py +134 -0
- autogen/agents/experimental/document_agent/url_utils.py +417 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +76 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +70 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +88 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +71 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +97 -0
- autogen/cache/in_memory_cache.py +54 -0
- autogen/cache/redis_cache.py +119 -0
- autogen/code_utils.py +598 -0
- autogen/coding/__init__.py +30 -0
- autogen/coding/base.py +120 -0
- autogen/coding/docker_commandline_code_executor.py +283 -0
- autogen/coding/factory.py +56 -0
- autogen/coding/func_with_reqs.py +203 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +160 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +224 -0
- autogen/coding/jupyter/jupyter_code_executor.py +154 -0
- autogen/coding/jupyter/local_jupyter_server.py +164 -0
- autogen/coding/local_commandline_code_executor.py +341 -0
- autogen/coding/markdown_code_extractor.py +44 -0
- autogen/coding/utils.py +55 -0
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/doc_utils.py +35 -0
- autogen/environments/__init__.py +10 -0
- autogen/environments/docker_python_environment.py +365 -0
- autogen/environments/python_environment.py +125 -0
- autogen/environments/system_python_environment.py +85 -0
- autogen/environments/venv_python_environment.py +220 -0
- autogen/environments/working_directory.py +74 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1016 -0
- autogen/events/base_event.py +100 -0
- autogen/events/client_events.py +168 -0
- autogen/events/helpers.py +44 -0
- autogen/events/print_event.py +45 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +75 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +206 -0
- autogen/fast_depends/core/model.py +527 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +30 -0
- autogen/fast_depends/dependencies/provider.py +40 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +272 -0
- autogen/fast_depends/utils.py +177 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +173 -0
- autogen/import_utils.py +539 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +156 -0
- autogen/interop/langchain/langchain_tool.py +78 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +178 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +172 -0
- autogen/interop/registry.py +70 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +61 -0
- autogen/io/run_response.py +294 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +214 -0
- autogen/json_utils.py +42 -0
- autogen/llm_clients/MIGRATION_TO_V2.md +782 -0
- autogen/llm_clients/__init__.py +77 -0
- autogen/llm_clients/client_v2.py +122 -0
- autogen/llm_clients/models/__init__.py +55 -0
- autogen/llm_clients/models/content_blocks.py +389 -0
- autogen/llm_clients/models/unified_message.py +145 -0
- autogen/llm_clients/models/unified_response.py +83 -0
- autogen/llm_clients/openai_completions_client.py +444 -0
- autogen/llm_config/__init__.py +11 -0
- autogen/llm_config/client.py +59 -0
- autogen/llm_config/config.py +461 -0
- autogen/llm_config/entry.py +169 -0
- autogen/llm_config/types.py +37 -0
- autogen/llm_config/utils.py +223 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +129 -0
- autogen/logger/file_logger.py +262 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +524 -0
- autogen/math_utils.py +338 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/helpers.py +45 -0
- autogen/mcp/mcp_client.py +349 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +62 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +577 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +166 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +110 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +399 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +946 -0
- autogen/messages/base_message.py +108 -0
- autogen/messages/client_messages.py +172 -0
- autogen/messages/print_message.py +48 -0
- autogen/oai/__init__.py +61 -0
- autogen/oai/anthropic.py +1516 -0
- autogen/oai/bedrock.py +800 -0
- autogen/oai/cerebras.py +302 -0
- autogen/oai/client.py +1658 -0
- autogen/oai/client_utils.py +196 -0
- autogen/oai/cohere.py +494 -0
- autogen/oai/gemini.py +1045 -0
- autogen/oai/gemini_types.py +156 -0
- autogen/oai/groq.py +319 -0
- autogen/oai/mistral.py +311 -0
- autogen/oai/oai_models/__init__.py +23 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +86 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +97 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +60 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +62 -0
- autogen/oai/oai_models/completion_usage.py +59 -0
- autogen/oai/ollama.py +657 -0
- autogen/oai/openai_responses.py +451 -0
- autogen/oai/openai_utils.py +897 -0
- autogen/oai/together.py +387 -0
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +197 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/retrieve_utils.py +490 -0
- autogen/runtime_logging.py +161 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/token_count_utils.py +280 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +40 -0
- autogen/tools/dependency_injection.py +249 -0
- autogen/tools/experimental/__init__.py +54 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +154 -0
- autogen/tools/experimental/code_execution/__init__.py +7 -0
- autogen/tools/experimental/code_execution/python_code_execution.py +86 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +150 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +329 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +103 -0
- autogen/tools/experimental/firecrawl/__init__.py +7 -0
- autogen/tools/experimental/firecrawl/firecrawl_tool.py +836 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +284 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +385 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +271 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +249 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1311 -0
- autogen/tools/experimental/searxng/__init__.py +7 -0
- autogen/tools/experimental/searxng/searxng_search.py +142 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +176 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +120 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +284 -0
- autogen/tools/function_utils.py +412 -0
- autogen/tools/tool.py +188 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +72 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
|
@@ -0,0 +1,451 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import copy
|
|
6
|
+
import warnings
|
|
7
|
+
from typing import TYPE_CHECKING, Any
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
|
|
11
|
+
from autogen.code_utils import content_str
|
|
12
|
+
from autogen.import_utils import optional_import_block, require_optional_import
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from autogen.oai.client import ModelClient, OpenAI, OpenAILLMConfigEntry
|
|
16
|
+
else:
|
|
17
|
+
# Import at runtime to avoid circular import
|
|
18
|
+
OpenAILLMConfigEntry = None
|
|
19
|
+
ModelClient = None
|
|
20
|
+
OpenAI = None
|
|
21
|
+
|
|
22
|
+
with optional_import_block() as openai_result:
|
|
23
|
+
from openai.types.responses.response import Response
|
|
24
|
+
from openai.types.responses.response_output_item import ImageGenerationCall
|
|
25
|
+
|
|
26
|
+
# Image Costs
|
|
27
|
+
# Pricing per image (in USD)
|
|
28
|
+
PRICING = {
|
|
29
|
+
"gpt-image-1": {
|
|
30
|
+
"low": {"1024x1024": 0.011, "1024x1536": 0.016, "1536x1024": 0.016},
|
|
31
|
+
"medium": {"1024x1024": 0.042, "1024x1536": 0.063, "1536x1024": 0.063},
|
|
32
|
+
"high": {"1024x1024": 0.167, "1024x1536": 0.25, "1536x1024": 0.25},
|
|
33
|
+
},
|
|
34
|
+
"dall-e-3": {
|
|
35
|
+
"standard": {"1024x1024": 0.040, "1024x1792": 0.080, "1792x1024": 0.080},
|
|
36
|
+
"hd": {"1024x1024": 0.080, "1024x1792": 0.120, "1792x1024": 0.120},
|
|
37
|
+
},
|
|
38
|
+
"dall-e-2": {"standard": {"1024x1024": 0.020, "512x512": 0.018, "256x256": 0.016}},
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
# Valid sizes for each model
|
|
42
|
+
VALID_SIZES = {
|
|
43
|
+
"gpt-image-1": ["1024x1024", "1024x1536", "1536x1024"],
|
|
44
|
+
"dall-e-3": ["1024x1024", "1024x1792", "1792x1024"],
|
|
45
|
+
"dall-e-2": ["1024x1024", "512x512", "256x256"],
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def calculate_openai_image_cost(
|
|
50
|
+
model: str = "gpt-image-1", size: str = "1024x1024", quality: str = "high"
|
|
51
|
+
) -> tuple[float, str]:
|
|
52
|
+
"""Calculate the cost for a single image generation.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
model: Model name ("gpt-image-1", "dall-e-3" or "dall-e-2")
|
|
56
|
+
size: Image size (e.g., "1024x1024", "1024x1536")
|
|
57
|
+
quality: Quality setting:
|
|
58
|
+
- For gpt-image-1: "low", "medium", or "high"
|
|
59
|
+
- For dall-e-3: "standard" or "hd"
|
|
60
|
+
- For dall-e-2: "standard" only
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Tuple of (cost, error_message)
|
|
64
|
+
"""
|
|
65
|
+
# Normalize inputs
|
|
66
|
+
model = model.lower()
|
|
67
|
+
quality = quality.lower()
|
|
68
|
+
|
|
69
|
+
# Validate model
|
|
70
|
+
if model not in PRICING:
|
|
71
|
+
return 0.0, f"Invalid model: {model}. Valid models: {list(PRICING.keys())}"
|
|
72
|
+
|
|
73
|
+
# Validate size
|
|
74
|
+
if size not in VALID_SIZES[model]:
|
|
75
|
+
return 0.0, f"Invalid size {size} for {model}. Valid sizes: {VALID_SIZES[model]}"
|
|
76
|
+
|
|
77
|
+
# Get the cost based on model type
|
|
78
|
+
try:
|
|
79
|
+
if model == "gpt-image-1" or model == "dall-e-3":
|
|
80
|
+
cost = PRICING[model][quality][size]
|
|
81
|
+
elif model == "dall-e-2":
|
|
82
|
+
cost = PRICING[model]["standard"][size]
|
|
83
|
+
else:
|
|
84
|
+
return 0.0, f"Model {model} not properly configured"
|
|
85
|
+
|
|
86
|
+
return cost, None
|
|
87
|
+
|
|
88
|
+
except KeyError:
|
|
89
|
+
return 0.0, f"Invalid quality '{quality}' for {model}"
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _get_base_class():
|
|
93
|
+
"""Lazy import OpenAILLMConfigEntry to avoid circular imports."""
|
|
94
|
+
from autogen.oai.client import OpenAILLMConfigEntry
|
|
95
|
+
|
|
96
|
+
return OpenAILLMConfigEntry
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
# -----------------------------------------------------------------------------
|
|
100
|
+
# OpenAI Client that calls the /responses endpoint
|
|
101
|
+
# -----------------------------------------------------------------------------
|
|
102
|
+
@require_optional_import("openai", "openai")
|
|
103
|
+
class OpenAIResponsesClient:
|
|
104
|
+
"""Minimal implementation targeting the experimental /responses endpoint.
|
|
105
|
+
|
|
106
|
+
We purposefully keep the surface small - *create*, *message_retrieval*,
|
|
107
|
+
*cost* and *get_usage* - enough for ConversableAgent to operate. Anything
|
|
108
|
+
that the new endpoint does natively (web_search, file_search, image
|
|
109
|
+
generation, function calling, etc.) is transparently passed through by the
|
|
110
|
+
OpenAI SDK so we don't replicate logic here.
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
RESPONSE_USAGE_KEYS: list[str] = ["prompt_tokens", "completion_tokens", "total_tokens", "cost", "model"]
|
|
114
|
+
|
|
115
|
+
def __init__(
|
|
116
|
+
self,
|
|
117
|
+
client: "OpenAI",
|
|
118
|
+
response_format: BaseModel | dict[str, Any] | None = None,
|
|
119
|
+
):
|
|
120
|
+
self._oai_client = client # plain openai.OpenAI instance
|
|
121
|
+
self.response_format = response_format # kept for parity but unused for now
|
|
122
|
+
|
|
123
|
+
# Initialize the image generation parameters
|
|
124
|
+
self.image_output_params = {
|
|
125
|
+
"quality": None, # "high" or "low"
|
|
126
|
+
"background": None, # "white" or "black" or "transparent"
|
|
127
|
+
"size": None, # "1024x1024" or "1024x1792" or "1792x1024"
|
|
128
|
+
"output_format": "png", # "png", "jpg" or "jpeg" or "webp"
|
|
129
|
+
"output_compression": None, # 0-100 if output_format is "jpg" or "jpeg" or "webp"
|
|
130
|
+
}
|
|
131
|
+
self.previous_response_id = None
|
|
132
|
+
|
|
133
|
+
# Image costs are calculated manually (rather than off returned information)
|
|
134
|
+
self.image_costs = 0
|
|
135
|
+
|
|
136
|
+
# ------------------------------------------------------------------ helpers
|
|
137
|
+
# responses objects embed usage similarly to chat completions
|
|
138
|
+
@staticmethod
|
|
139
|
+
def _usage_dict(resp) -> dict:
|
|
140
|
+
usage_obj = getattr(resp, "usage", None) or {}
|
|
141
|
+
|
|
142
|
+
# Convert pydantic/BaseModel usage objects to dict for uniform access
|
|
143
|
+
if hasattr(usage_obj, "model_dump"):
|
|
144
|
+
usage = usage_obj.model_dump()
|
|
145
|
+
elif isinstance(usage_obj, dict):
|
|
146
|
+
usage = usage_obj
|
|
147
|
+
else: # fallback - unknown structure
|
|
148
|
+
usage = {}
|
|
149
|
+
|
|
150
|
+
output_tokens_details = usage.get("output_tokens_details", {})
|
|
151
|
+
|
|
152
|
+
return {
|
|
153
|
+
"prompt_tokens": usage.get("input_tokens", 0),
|
|
154
|
+
"completion_tokens": usage.get("output_tokens", 0),
|
|
155
|
+
"total_tokens": usage.get("total_tokens", 0),
|
|
156
|
+
"cost": getattr(resp, "cost", 0),
|
|
157
|
+
"model": getattr(resp, "model", ""),
|
|
158
|
+
"reasoning_tokens": output_tokens_details.get("reasoning_tokens", 0),
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
def _add_image_cost(self, response: "Response") -> None:
|
|
162
|
+
"""Add image cost to self._image_costs when an image is generated"""
|
|
163
|
+
for output in response.output:
|
|
164
|
+
if (
|
|
165
|
+
isinstance(output, ImageGenerationCall)
|
|
166
|
+
and hasattr(response.output[0], "model_extra")
|
|
167
|
+
and response.output[0].model_extra
|
|
168
|
+
):
|
|
169
|
+
extra_fields = output.model_extra
|
|
170
|
+
|
|
171
|
+
image_cost, image_error = calculate_openai_image_cost(
|
|
172
|
+
model="gpt-image-1",
|
|
173
|
+
size=extra_fields.get("size", "1024x1536"),
|
|
174
|
+
quality=extra_fields.get("quality", "high"),
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
if not image_error and image_cost:
|
|
178
|
+
self.image_costs += image_cost
|
|
179
|
+
|
|
180
|
+
def _get_delta_messages(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
181
|
+
"""Get the delta messages from the messages."""
|
|
182
|
+
delta_messages = []
|
|
183
|
+
for m in messages[::-1]:
|
|
184
|
+
contents = m.get("content")
|
|
185
|
+
is_last_completed_response = False
|
|
186
|
+
if isinstance(contents, list):
|
|
187
|
+
for c in contents:
|
|
188
|
+
if "status" in c and c.get("status") == "completed":
|
|
189
|
+
is_last_completed_response = True
|
|
190
|
+
break
|
|
191
|
+
elif isinstance(contents, str):
|
|
192
|
+
is_last_completed_response = "status" in m and m.get("status") == "completed"
|
|
193
|
+
|
|
194
|
+
if is_last_completed_response:
|
|
195
|
+
break
|
|
196
|
+
delta_messages.append(m)
|
|
197
|
+
return delta_messages[::-1]
|
|
198
|
+
|
|
199
|
+
def _parse_params(self, params: dict[str, Any]) -> None:
|
|
200
|
+
if "verbosity" in params:
|
|
201
|
+
verbosity = params.pop("verbosity")
|
|
202
|
+
params["text"] = {"verbosity": verbosity}
|
|
203
|
+
return params
|
|
204
|
+
|
|
205
|
+
def create(self, params: dict[str, Any]) -> "Response":
|
|
206
|
+
"""Invoke `client.responses.create() or .parse()`.
|
|
207
|
+
|
|
208
|
+
If the caller provided a classic *messages* array we convert it to the
|
|
209
|
+
*input* format expected by the Responses API.
|
|
210
|
+
"""
|
|
211
|
+
params = params.copy()
|
|
212
|
+
|
|
213
|
+
image_generation_tool_params = {"type": "image_generation"}
|
|
214
|
+
web_search_tool_params = {"type": "web_search_preview"}
|
|
215
|
+
|
|
216
|
+
if self.previous_response_id is not None and "previous_response_id" not in params:
|
|
217
|
+
params["previous_response_id"] = self.previous_response_id
|
|
218
|
+
|
|
219
|
+
# Back-compat: transform messages → input if needed ------------------
|
|
220
|
+
if "messages" in params and "input" not in params:
|
|
221
|
+
msgs = self._get_delta_messages(params.pop("messages"))
|
|
222
|
+
input_items = []
|
|
223
|
+
for m in msgs[::-1]: # reverse the list to get the last item first
|
|
224
|
+
role = m.get("role", "user")
|
|
225
|
+
# First, we need to convert the content to the Responses API format
|
|
226
|
+
content = m.get("content")
|
|
227
|
+
blocks = []
|
|
228
|
+
if role != "tool":
|
|
229
|
+
if isinstance(content, list):
|
|
230
|
+
for c in content:
|
|
231
|
+
if c.get("type") in ["input_text", "text"]:
|
|
232
|
+
blocks.append({"type": "input_text", "text": c.get("text")})
|
|
233
|
+
elif c.get("type") == "input_image":
|
|
234
|
+
blocks.append({"type": "input_image", "image_url": c.get("image_url")})
|
|
235
|
+
elif c.get("type") == "image_params":
|
|
236
|
+
for k, v in c.get("image_params", {}).items():
|
|
237
|
+
if k in self.image_output_params:
|
|
238
|
+
image_generation_tool_params[k] = v
|
|
239
|
+
else:
|
|
240
|
+
raise ValueError(f"Invalid content type: {c.get('type')}")
|
|
241
|
+
else:
|
|
242
|
+
blocks.append({"type": "input_text", "text": content})
|
|
243
|
+
input_items.append({"role": role, "content": blocks})
|
|
244
|
+
|
|
245
|
+
else:
|
|
246
|
+
if input_items:
|
|
247
|
+
break
|
|
248
|
+
# tool call response is the last item in the list
|
|
249
|
+
content = content_str(m.get("content"))
|
|
250
|
+
input_items.append({
|
|
251
|
+
"type": "function_call_output",
|
|
252
|
+
"call_id": m.get("tool_call_id", None),
|
|
253
|
+
"output": content,
|
|
254
|
+
})
|
|
255
|
+
break
|
|
256
|
+
|
|
257
|
+
# Ensure we have at least one valid input item
|
|
258
|
+
if input_items:
|
|
259
|
+
params["input"] = input_items[::-1]
|
|
260
|
+
else:
|
|
261
|
+
# If no valid input items were created, create a default one
|
|
262
|
+
# This prevents the API error about missing required parameters
|
|
263
|
+
params["input"] = [{"role": "user", "content": [{"type": "input_text", "text": "Hello"}]}]
|
|
264
|
+
|
|
265
|
+
# Initialize tools list
|
|
266
|
+
tools_list = []
|
|
267
|
+
# Back-compat: add default tools
|
|
268
|
+
built_in_tools = params.pop("built_in_tools", [])
|
|
269
|
+
if built_in_tools:
|
|
270
|
+
if "image_generation" in built_in_tools:
|
|
271
|
+
tools_list.append(image_generation_tool_params)
|
|
272
|
+
if "web_search" in built_in_tools:
|
|
273
|
+
tools_list.append(web_search_tool_params)
|
|
274
|
+
|
|
275
|
+
if "tools" in params:
|
|
276
|
+
for tool in params["tools"]:
|
|
277
|
+
tool_item = {"type": "function"}
|
|
278
|
+
if "function" in tool:
|
|
279
|
+
tool_item |= tool["function"]
|
|
280
|
+
tools_list.append(tool_item)
|
|
281
|
+
params["tools"] = tools_list
|
|
282
|
+
params["tool_choice"] = "auto"
|
|
283
|
+
|
|
284
|
+
# Ensure we don't mix legacy params that Responses doesn't accept
|
|
285
|
+
if params.get("stream") and params.get("background"):
|
|
286
|
+
warnings.warn(
|
|
287
|
+
"Streaming a background response may introduce latency.",
|
|
288
|
+
UserWarning,
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
# Validate that we have at least one of the required parameters
|
|
292
|
+
if not any(key in params for key in ["input", "previous_response_id", "prompt"]):
|
|
293
|
+
# If we still don't have any required parameters, create a minimal input
|
|
294
|
+
params["input"] = [{"role": "user", "content": [{"type": "input_text", "text": "Hello"}]}]
|
|
295
|
+
|
|
296
|
+
# ------------------------------------------------------------------
|
|
297
|
+
# Structured output handling - mimic OpenAIClient behaviour
|
|
298
|
+
# ------------------------------------------------------------------
|
|
299
|
+
|
|
300
|
+
if self.response_format is not None or "response_format" in params:
|
|
301
|
+
|
|
302
|
+
def _create_or_parse(**kwargs):
|
|
303
|
+
# For structured output we must convert dict / pydantic model
|
|
304
|
+
# into the JSON-schema body expected by the API.
|
|
305
|
+
if "stream" in kwargs:
|
|
306
|
+
kwargs.pop("stream") # Responses API rejects stream with RF for now
|
|
307
|
+
|
|
308
|
+
rf = kwargs.get("response_format", self.response_format)
|
|
309
|
+
|
|
310
|
+
if isinstance(rf, dict):
|
|
311
|
+
from autogen.oai.client import _ensure_strict_json_schema
|
|
312
|
+
|
|
313
|
+
kwargs["text_format"] = {
|
|
314
|
+
"type": "json_schema",
|
|
315
|
+
"json_schema": {
|
|
316
|
+
"schema": _ensure_strict_json_schema(rf, path=(), root=rf),
|
|
317
|
+
"name": "response_format",
|
|
318
|
+
"strict": True,
|
|
319
|
+
},
|
|
320
|
+
}
|
|
321
|
+
else:
|
|
322
|
+
# pydantic.BaseModel subclass
|
|
323
|
+
from autogen.oai.client import type_to_response_format_param
|
|
324
|
+
|
|
325
|
+
kwargs["text_format"] = type_to_response_format_param(rf)
|
|
326
|
+
if "response_format" in kwargs:
|
|
327
|
+
kwargs["text_format"] = kwargs.pop("response_format")
|
|
328
|
+
try:
|
|
329
|
+
return self._oai_client.responses.parse(**kwargs)
|
|
330
|
+
except TypeError as e:
|
|
331
|
+
# Older openai-python versions may not yet expose the
|
|
332
|
+
# text_format parameter on the Responses endpoint.
|
|
333
|
+
if "text_format" in str(e) and "unexpected" in str(e):
|
|
334
|
+
warnings.warn(
|
|
335
|
+
"Installed openai-python version doesn't support "
|
|
336
|
+
"`response_format` for the Responses API. "
|
|
337
|
+
"Falling back to raw text output.",
|
|
338
|
+
UserWarning,
|
|
339
|
+
)
|
|
340
|
+
kwargs.pop("text_format", None)
|
|
341
|
+
return self._oai_client.responses.create(**kwargs)
|
|
342
|
+
|
|
343
|
+
response = _create_or_parse(**params)
|
|
344
|
+
self.previous_response_id = response.id
|
|
345
|
+
return response
|
|
346
|
+
# No structured output
|
|
347
|
+
params = self._parse_params(params)
|
|
348
|
+
response = self._oai_client.responses.create(**params)
|
|
349
|
+
self.previous_response_id = response.id
|
|
350
|
+
# Accumulate image costs
|
|
351
|
+
self._add_image_cost(response)
|
|
352
|
+
return response
|
|
353
|
+
|
|
354
|
+
def message_retrieval(self, response) -> list[str] | list["ModelClient.ModelClientResponseProtocol.Choice.Message"]:
|
|
355
|
+
output = getattr(response, "output", [])
|
|
356
|
+
content = []
|
|
357
|
+
tool_calls = []
|
|
358
|
+
|
|
359
|
+
for item in output:
|
|
360
|
+
# Convert pydantic objects to plain dicts for uniform handling
|
|
361
|
+
if hasattr(item, "model_dump"):
|
|
362
|
+
item = item.model_dump()
|
|
363
|
+
|
|
364
|
+
item_type = item.get("type")
|
|
365
|
+
|
|
366
|
+
# Skip reasoning items - they're not messages
|
|
367
|
+
if item_type == "reasoning":
|
|
368
|
+
continue
|
|
369
|
+
|
|
370
|
+
if item_type == "message":
|
|
371
|
+
new_item = copy.deepcopy(item)
|
|
372
|
+
new_item["type"] = "text"
|
|
373
|
+
new_item["role"] = "assistant"
|
|
374
|
+
|
|
375
|
+
blocks = item.get("content", [])
|
|
376
|
+
if len(blocks) == 1 and blocks[0].get("type") == "output_text":
|
|
377
|
+
new_item["text"] = blocks[0]["text"]
|
|
378
|
+
elif len(blocks) > 0:
|
|
379
|
+
# Handle multiple content blocks
|
|
380
|
+
text_parts = []
|
|
381
|
+
for block in blocks:
|
|
382
|
+
if block.get("type") == "output_text":
|
|
383
|
+
text_parts.append(block.get("text", ""))
|
|
384
|
+
new_item["text"] = " ".join(text_parts)
|
|
385
|
+
|
|
386
|
+
if "content" in new_item:
|
|
387
|
+
del new_item["content"]
|
|
388
|
+
content.append(new_item)
|
|
389
|
+
continue
|
|
390
|
+
|
|
391
|
+
# ------------------------------------------------------------------
|
|
392
|
+
# 2) Custom function calls
|
|
393
|
+
# ------------------------------------------------------------------
|
|
394
|
+
if item_type == "function_call":
|
|
395
|
+
tool_calls.append({
|
|
396
|
+
"id": item.get("call_id", None),
|
|
397
|
+
"function": {
|
|
398
|
+
"name": item.get("name", None),
|
|
399
|
+
"arguments": item.get("arguments"),
|
|
400
|
+
},
|
|
401
|
+
"type": "function_call",
|
|
402
|
+
})
|
|
403
|
+
continue
|
|
404
|
+
|
|
405
|
+
# ------------------------------------------------------------------
|
|
406
|
+
# 3) Built-in tool calls
|
|
407
|
+
# ------------------------------------------------------------------
|
|
408
|
+
if item_type and item_type.endswith("_call"):
|
|
409
|
+
tool_name = item_type.replace("_call", "")
|
|
410
|
+
tool_call_args = {
|
|
411
|
+
"id": item.get("id"),
|
|
412
|
+
"role": "tool_calls",
|
|
413
|
+
"type": "tool_call", # Responses API currently routes via function-like tools
|
|
414
|
+
"name": tool_name,
|
|
415
|
+
}
|
|
416
|
+
if tool_name == "image_generation":
|
|
417
|
+
for k in self.image_output_params:
|
|
418
|
+
if k in item:
|
|
419
|
+
tool_call_args[k] = item[k]
|
|
420
|
+
encoded_base64_result = item.get("result", "")
|
|
421
|
+
tool_call_args["content"] = encoded_base64_result
|
|
422
|
+
# add image_url for image input back to oai response api.
|
|
423
|
+
output_format = self.image_output_params["output_format"]
|
|
424
|
+
tool_call_args["image_url"] = f"data:image/{output_format};base64,{encoded_base64_result}"
|
|
425
|
+
elif tool_name == "web_search":
|
|
426
|
+
pass
|
|
427
|
+
else:
|
|
428
|
+
raise ValueError(f"Invalid tool name: {tool_name}")
|
|
429
|
+
content.append(tool_call_args)
|
|
430
|
+
continue
|
|
431
|
+
|
|
432
|
+
# ------------------------------------------------------------------
|
|
433
|
+
# 4) Fallback - store raw dict so information isn't lost
|
|
434
|
+
# ------------------------------------------------------------------
|
|
435
|
+
content.append(item)
|
|
436
|
+
|
|
437
|
+
return [
|
|
438
|
+
{
|
|
439
|
+
"role": "assistant",
|
|
440
|
+
"id": response.id,
|
|
441
|
+
"content": content if content else None,
|
|
442
|
+
"tool_calls": tool_calls,
|
|
443
|
+
}
|
|
444
|
+
]
|
|
445
|
+
|
|
446
|
+
def cost(self, response):
|
|
447
|
+
return self._usage_dict(response).get("cost", 0) + self.image_costs
|
|
448
|
+
|
|
449
|
+
@staticmethod
|
|
450
|
+
def get_usage(response):
|
|
451
|
+
return OpenAIResponsesClient._usage_dict(response)
|