ag2 0.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ag2-0.10.2.dist-info/METADATA +819 -0
- ag2-0.10.2.dist-info/RECORD +423 -0
- ag2-0.10.2.dist-info/WHEEL +4 -0
- ag2-0.10.2.dist-info/licenses/LICENSE +201 -0
- ag2-0.10.2.dist-info/licenses/NOTICE.md +19 -0
- autogen/__init__.py +88 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +426 -0
- autogen/_website/generate_mkdocs.py +1216 -0
- autogen/_website/notebook_processor.py +475 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +413 -0
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +86 -0
- autogen/a2a/client.py +357 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +207 -0
- autogen/agentchat/__init__.py +47 -0
- autogen/agentchat/agent.py +180 -0
- autogen/agentchat/assistant_agent.py +86 -0
- autogen/agentchat/chat.py +325 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +432 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +578 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +215 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +514 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +334 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +167 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +263 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +189 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +268 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +195 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +319 -0
- autogen/agentchat/contrib/rag/query_engine.py +76 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +59 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +704 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +200 -0
- autogen/agentchat/contrib/swarm_agent.py +1404 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +224 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +316 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +405 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +551 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +927 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +304 -0
- autogen/agentchat/conversable_agent.py +4307 -0
- autogen/agentchat/group/__init__.py +67 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +39 -0
- autogen/agentchat/group/context_variables.py +182 -0
- autogen/agentchat/group/events/transition_events.py +111 -0
- autogen/agentchat/group/group_tool_executor.py +324 -0
- autogen/agentchat/group/group_utils.py +659 -0
- autogen/agentchat/group/guardrails.py +179 -0
- autogen/agentchat/group/handoffs.py +303 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +291 -0
- autogen/agentchat/group/on_condition.py +55 -0
- autogen/agentchat/group/on_context_condition.py +51 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +160 -0
- autogen/agentchat/group/patterns/manual.py +177 -0
- autogen/agentchat/group/patterns/pattern.py +295 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +24 -0
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +140 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/function_target.py +245 -0
- autogen/agentchat/group/targets/group_chat_target.py +133 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +424 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1832 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +191 -0
- autogen/agentchat/realtime/experimental/function_observer.py +84 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +533 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +114 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +74 -0
- autogen/agents/contrib/time/time_tool_agent.py +52 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +301 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +113 -0
- autogen/agents/experimental/document_agent/document_agent.py +643 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +376 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +214 -0
- autogen/agents/experimental/document_agent/parser_utils.py +134 -0
- autogen/agents/experimental/document_agent/url_utils.py +417 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +76 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +70 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +88 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +71 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +97 -0
- autogen/cache/in_memory_cache.py +54 -0
- autogen/cache/redis_cache.py +119 -0
- autogen/code_utils.py +598 -0
- autogen/coding/__init__.py +30 -0
- autogen/coding/base.py +120 -0
- autogen/coding/docker_commandline_code_executor.py +283 -0
- autogen/coding/factory.py +56 -0
- autogen/coding/func_with_reqs.py +203 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +160 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +224 -0
- autogen/coding/jupyter/jupyter_code_executor.py +154 -0
- autogen/coding/jupyter/local_jupyter_server.py +164 -0
- autogen/coding/local_commandline_code_executor.py +341 -0
- autogen/coding/markdown_code_extractor.py +44 -0
- autogen/coding/utils.py +55 -0
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/doc_utils.py +35 -0
- autogen/environments/__init__.py +10 -0
- autogen/environments/docker_python_environment.py +365 -0
- autogen/environments/python_environment.py +125 -0
- autogen/environments/system_python_environment.py +85 -0
- autogen/environments/venv_python_environment.py +220 -0
- autogen/environments/working_directory.py +74 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1016 -0
- autogen/events/base_event.py +100 -0
- autogen/events/client_events.py +168 -0
- autogen/events/helpers.py +44 -0
- autogen/events/print_event.py +45 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +75 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +206 -0
- autogen/fast_depends/core/model.py +527 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +30 -0
- autogen/fast_depends/dependencies/provider.py +40 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +272 -0
- autogen/fast_depends/utils.py +177 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +173 -0
- autogen/import_utils.py +539 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +156 -0
- autogen/interop/langchain/langchain_tool.py +78 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +178 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +172 -0
- autogen/interop/registry.py +70 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +61 -0
- autogen/io/run_response.py +294 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +214 -0
- autogen/json_utils.py +42 -0
- autogen/llm_clients/MIGRATION_TO_V2.md +782 -0
- autogen/llm_clients/__init__.py +77 -0
- autogen/llm_clients/client_v2.py +122 -0
- autogen/llm_clients/models/__init__.py +55 -0
- autogen/llm_clients/models/content_blocks.py +389 -0
- autogen/llm_clients/models/unified_message.py +145 -0
- autogen/llm_clients/models/unified_response.py +83 -0
- autogen/llm_clients/openai_completions_client.py +444 -0
- autogen/llm_config/__init__.py +11 -0
- autogen/llm_config/client.py +59 -0
- autogen/llm_config/config.py +461 -0
- autogen/llm_config/entry.py +169 -0
- autogen/llm_config/types.py +37 -0
- autogen/llm_config/utils.py +223 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +129 -0
- autogen/logger/file_logger.py +262 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +524 -0
- autogen/math_utils.py +338 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/helpers.py +45 -0
- autogen/mcp/mcp_client.py +349 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +62 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +577 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +166 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +110 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +399 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +946 -0
- autogen/messages/base_message.py +108 -0
- autogen/messages/client_messages.py +172 -0
- autogen/messages/print_message.py +48 -0
- autogen/oai/__init__.py +61 -0
- autogen/oai/anthropic.py +1516 -0
- autogen/oai/bedrock.py +800 -0
- autogen/oai/cerebras.py +302 -0
- autogen/oai/client.py +1658 -0
- autogen/oai/client_utils.py +196 -0
- autogen/oai/cohere.py +494 -0
- autogen/oai/gemini.py +1045 -0
- autogen/oai/gemini_types.py +156 -0
- autogen/oai/groq.py +319 -0
- autogen/oai/mistral.py +311 -0
- autogen/oai/oai_models/__init__.py +23 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +86 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +97 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +60 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +62 -0
- autogen/oai/oai_models/completion_usage.py +59 -0
- autogen/oai/ollama.py +657 -0
- autogen/oai/openai_responses.py +451 -0
- autogen/oai/openai_utils.py +897 -0
- autogen/oai/together.py +387 -0
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +197 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/retrieve_utils.py +490 -0
- autogen/runtime_logging.py +161 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/token_count_utils.py +280 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +40 -0
- autogen/tools/dependency_injection.py +249 -0
- autogen/tools/experimental/__init__.py +54 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +154 -0
- autogen/tools/experimental/code_execution/__init__.py +7 -0
- autogen/tools/experimental/code_execution/python_code_execution.py +86 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +150 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +329 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +103 -0
- autogen/tools/experimental/firecrawl/__init__.py +7 -0
- autogen/tools/experimental/firecrawl/firecrawl_tool.py +836 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +284 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +385 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +271 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +249 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1311 -0
- autogen/tools/experimental/searxng/__init__.py +7 -0
- autogen/tools/experimental/searxng/searxng_search.py +142 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +176 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +120 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +284 -0
- autogen/tools/function_utils.py +412 -0
- autogen/tools/tool.py +188 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +72 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
autogen/oai/mistral.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
"""Create an OpenAI-compatible client using Mistral.AI's API.
|
|
8
|
+
|
|
9
|
+
Example:
|
|
10
|
+
```python
|
|
11
|
+
llm_config = {
|
|
12
|
+
"config_list": [
|
|
13
|
+
{"api_type": "mistral", "model": "open-mixtral-8x22b", "api_key": os.environ.get("MISTRAL_API_KEY")}
|
|
14
|
+
]
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
agent = autogen.AssistantAgent("my_agent", llm_config=llm_config)
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
Install Mistral.AI python library using: pip install --upgrade mistralai
|
|
21
|
+
|
|
22
|
+
Resources:
|
|
23
|
+
- https://docs.mistral.ai/getting-started/quickstart/
|
|
24
|
+
|
|
25
|
+
NOTE: Requires mistralai package version >= 1.0.1
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
import json
|
|
29
|
+
import os
|
|
30
|
+
import time
|
|
31
|
+
import warnings
|
|
32
|
+
from typing import Any, Literal
|
|
33
|
+
|
|
34
|
+
from typing_extensions import Unpack
|
|
35
|
+
|
|
36
|
+
from ..import_utils import optional_import_block, require_optional_import
|
|
37
|
+
from ..llm_config.entry import LLMConfigEntry, LLMConfigEntryDict
|
|
38
|
+
from .client_utils import should_hide_tools, validate_parameter
|
|
39
|
+
from .oai_models import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall, Choice, CompletionUsage
|
|
40
|
+
|
|
41
|
+
with optional_import_block():
|
|
42
|
+
# Mistral libraries
|
|
43
|
+
# pip install mistralai
|
|
44
|
+
from mistralai import (
|
|
45
|
+
AssistantMessage,
|
|
46
|
+
Function,
|
|
47
|
+
FunctionCall,
|
|
48
|
+
Mistral,
|
|
49
|
+
SystemMessage,
|
|
50
|
+
ToolCall,
|
|
51
|
+
ToolMessage,
|
|
52
|
+
UserMessage,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class MistralEntryDict(LLMConfigEntryDict, total=False):
|
|
57
|
+
api_type: Literal["mistral"]
|
|
58
|
+
|
|
59
|
+
safe_prompt: bool
|
|
60
|
+
random_seed: int | None
|
|
61
|
+
stream: bool
|
|
62
|
+
hide_tools: Literal["if_all_run", "if_any_run", "never"]
|
|
63
|
+
tool_choice: Literal["none", "auto", "any"] | None
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class MistralLLMConfigEntry(LLMConfigEntry):
|
|
67
|
+
api_type: Literal["mistral"] = "mistral"
|
|
68
|
+
safe_prompt: bool = False
|
|
69
|
+
random_seed: int | None = None
|
|
70
|
+
stream: bool = False
|
|
71
|
+
hide_tools: Literal["if_all_run", "if_any_run", "never"] = "never"
|
|
72
|
+
tool_choice: Literal["none", "auto", "any"] | None = None
|
|
73
|
+
|
|
74
|
+
def create_client(self):
|
|
75
|
+
raise NotImplementedError("MistralLLMConfigEntry.create_client is not implemented.")
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@require_optional_import("mistralai", "mistral")
|
|
79
|
+
class MistralAIClient:
|
|
80
|
+
"""Client for Mistral.AI's API."""
|
|
81
|
+
|
|
82
|
+
RESPONSE_USAGE_KEYS: list[str] = ["prompt_tokens", "completion_tokens", "total_tokens", "cost", "model"]
|
|
83
|
+
|
|
84
|
+
def __init__(self, **kwargs: Unpack[MistralEntryDict]):
|
|
85
|
+
"""Requires api_key or environment variable to be set
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
**kwargs: Additional keyword arguments to pass to the Mistral client.
|
|
89
|
+
"""
|
|
90
|
+
# Ensure we have the api_key upon instantiation
|
|
91
|
+
self.api_key = kwargs.get("api_key")
|
|
92
|
+
if not self.api_key:
|
|
93
|
+
self.api_key = os.getenv("MISTRAL_API_KEY", None)
|
|
94
|
+
|
|
95
|
+
assert self.api_key, (
|
|
96
|
+
"Please specify the 'api_key' in your config list entry for Mistral or set the MISTRAL_API_KEY env variable."
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
if "response_format" in kwargs and kwargs["response_format"] is not None:
|
|
100
|
+
warnings.warn("response_format is not supported for Mistral.AI, it will be ignored.", UserWarning)
|
|
101
|
+
|
|
102
|
+
self._client = Mistral(api_key=self.api_key)
|
|
103
|
+
|
|
104
|
+
def message_retrieval(self, response: ChatCompletion) -> list[str] | list[ChatCompletionMessage]:
|
|
105
|
+
"""Retrieve the messages from the response."""
|
|
106
|
+
return [choice.message for choice in response.choices]
|
|
107
|
+
|
|
108
|
+
def cost(self, response) -> float:
|
|
109
|
+
return response.cost
|
|
110
|
+
|
|
111
|
+
@require_optional_import("mistralai", "mistral")
|
|
112
|
+
def parse_params(self, params: dict[str, Any]) -> dict[str, Any]:
|
|
113
|
+
"""Loads the parameters for Mistral.AI API from the passed in parameters and returns a validated set. Checks types, ranges, and sets defaults"""
|
|
114
|
+
mistral_params = {}
|
|
115
|
+
|
|
116
|
+
# 1. Validate models
|
|
117
|
+
mistral_params["model"] = params.get("model")
|
|
118
|
+
assert mistral_params["model"], (
|
|
119
|
+
"Please specify the 'model' in your config list entry to nominate the Mistral.ai model to use."
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# 2. Validate allowed Mistral.AI parameters
|
|
123
|
+
mistral_params["temperature"] = validate_parameter(params, "temperature", (int, float), True, 0.7, None, None)
|
|
124
|
+
mistral_params["top_p"] = validate_parameter(params, "top_p", (int, float), True, None, None, None)
|
|
125
|
+
mistral_params["max_tokens"] = validate_parameter(params, "max_tokens", int, True, None, (0, None), None)
|
|
126
|
+
mistral_params["safe_prompt"] = validate_parameter(
|
|
127
|
+
params, "safe_prompt", bool, False, False, None, [True, False]
|
|
128
|
+
)
|
|
129
|
+
mistral_params["random_seed"] = validate_parameter(params, "random_seed", int, True, None, False, None)
|
|
130
|
+
mistral_params["tool_choice"] = validate_parameter(
|
|
131
|
+
params, "tool_choice", str, False, None, None, ["none", "auto", "any"]
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# TODO
|
|
135
|
+
if params.get("stream", False):
|
|
136
|
+
warnings.warn(
|
|
137
|
+
"Streaming is not currently supported, streaming will be disabled.",
|
|
138
|
+
UserWarning,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# 3. Convert messages to Mistral format
|
|
142
|
+
mistral_messages = []
|
|
143
|
+
tool_call_ids = {} # tool call ids to function name mapping
|
|
144
|
+
for message in params["messages"]:
|
|
145
|
+
if message["role"] == "assistant" and "tool_calls" in message and message["tool_calls"] is not None:
|
|
146
|
+
# Convert OAI ToolCall to Mistral ToolCall
|
|
147
|
+
mistral_messages_tools = []
|
|
148
|
+
for toolcall in message["tool_calls"]:
|
|
149
|
+
mistral_messages_tools.append(
|
|
150
|
+
ToolCall(
|
|
151
|
+
id=toolcall["id"],
|
|
152
|
+
function=FunctionCall(
|
|
153
|
+
name=toolcall["function"]["name"],
|
|
154
|
+
arguments=json.loads(toolcall["function"]["arguments"]),
|
|
155
|
+
),
|
|
156
|
+
)
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
mistral_messages.append(AssistantMessage(content="", tool_calls=mistral_messages_tools))
|
|
160
|
+
|
|
161
|
+
# Map tool call id to the function name
|
|
162
|
+
for tool_call in message["tool_calls"]:
|
|
163
|
+
tool_call_ids[tool_call["id"]] = tool_call["function"]["name"]
|
|
164
|
+
|
|
165
|
+
elif message["role"] == "system":
|
|
166
|
+
if len(mistral_messages) > 0 and mistral_messages[-1].role == "assistant":
|
|
167
|
+
# System messages can't appear after an Assistant message, so use a UserMessage
|
|
168
|
+
mistral_messages.append(UserMessage(content=message["content"]))
|
|
169
|
+
else:
|
|
170
|
+
mistral_messages.append(SystemMessage(content=message["content"]))
|
|
171
|
+
elif message["role"] == "assistant":
|
|
172
|
+
mistral_messages.append(AssistantMessage(content=message["content"]))
|
|
173
|
+
elif message["role"] == "user":
|
|
174
|
+
mistral_messages.append(UserMessage(content=message["content"]))
|
|
175
|
+
|
|
176
|
+
elif message["role"] == "tool":
|
|
177
|
+
# Indicates the result of a tool call, the name is the function name called
|
|
178
|
+
mistral_messages.append(
|
|
179
|
+
ToolMessage(
|
|
180
|
+
name=tool_call_ids[message["tool_call_id"]],
|
|
181
|
+
content=message["content"],
|
|
182
|
+
tool_call_id=message["tool_call_id"],
|
|
183
|
+
)
|
|
184
|
+
)
|
|
185
|
+
else:
|
|
186
|
+
warnings.warn(f"Unknown message role {message['role']}", UserWarning)
|
|
187
|
+
|
|
188
|
+
# 4. Last message needs to be user or tool, if not, add a "please continue" message
|
|
189
|
+
if not isinstance(mistral_messages[-1], UserMessage) and not isinstance(mistral_messages[-1], ToolMessage):
|
|
190
|
+
mistral_messages.append(UserMessage(content="Please continue."))
|
|
191
|
+
|
|
192
|
+
mistral_params["messages"] = mistral_messages
|
|
193
|
+
|
|
194
|
+
# 5. Add tools to the call if we have them and aren't hiding them
|
|
195
|
+
if "tools" in params:
|
|
196
|
+
hide_tools = validate_parameter(
|
|
197
|
+
params, "hide_tools", str, False, "never", None, ["if_all_run", "if_any_run", "never"]
|
|
198
|
+
)
|
|
199
|
+
if not should_hide_tools(params["messages"], params["tools"], hide_tools):
|
|
200
|
+
mistral_params["tools"] = tool_def_to_mistral(params["tools"])
|
|
201
|
+
|
|
202
|
+
return mistral_params
|
|
203
|
+
|
|
204
|
+
@require_optional_import("mistralai", "mistral")
|
|
205
|
+
def create(self, params: dict[str, Any]) -> ChatCompletion:
|
|
206
|
+
# 1. Parse parameters to Mistral.AI API's parameters
|
|
207
|
+
mistral_params = self.parse_params(params)
|
|
208
|
+
|
|
209
|
+
# 2. Call Mistral.AI API
|
|
210
|
+
mistral_response = self._client.chat.complete(**mistral_params)
|
|
211
|
+
# TODO: Handle streaming
|
|
212
|
+
|
|
213
|
+
# 3. Convert Mistral response to OAI compatible format
|
|
214
|
+
if mistral_response.choices[0].finish_reason == "tool_calls":
|
|
215
|
+
mistral_finish = "tool_calls"
|
|
216
|
+
tool_calls = []
|
|
217
|
+
for tool_call in mistral_response.choices[0].message.tool_calls:
|
|
218
|
+
tool_calls.append(
|
|
219
|
+
ChatCompletionMessageToolCall(
|
|
220
|
+
id=tool_call.id,
|
|
221
|
+
function={"name": tool_call.function.name, "arguments": tool_call.function.arguments},
|
|
222
|
+
type="function",
|
|
223
|
+
)
|
|
224
|
+
)
|
|
225
|
+
else:
|
|
226
|
+
mistral_finish = "stop"
|
|
227
|
+
tool_calls = None
|
|
228
|
+
|
|
229
|
+
message = ChatCompletionMessage(
|
|
230
|
+
role="assistant",
|
|
231
|
+
content=mistral_response.choices[0].message.content,
|
|
232
|
+
function_call=None,
|
|
233
|
+
tool_calls=tool_calls,
|
|
234
|
+
)
|
|
235
|
+
choices = [Choice(finish_reason=mistral_finish, index=0, message=message)]
|
|
236
|
+
|
|
237
|
+
response_oai = ChatCompletion(
|
|
238
|
+
id=mistral_response.id,
|
|
239
|
+
model=mistral_response.model,
|
|
240
|
+
created=int(time.time()),
|
|
241
|
+
object="chat.completion",
|
|
242
|
+
choices=choices,
|
|
243
|
+
usage=CompletionUsage(
|
|
244
|
+
prompt_tokens=mistral_response.usage.prompt_tokens,
|
|
245
|
+
completion_tokens=mistral_response.usage.completion_tokens,
|
|
246
|
+
total_tokens=mistral_response.usage.prompt_tokens + mistral_response.usage.completion_tokens,
|
|
247
|
+
),
|
|
248
|
+
cost=calculate_mistral_cost(
|
|
249
|
+
mistral_response.usage.prompt_tokens, mistral_response.usage.completion_tokens, mistral_response.model
|
|
250
|
+
),
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
return response_oai
|
|
254
|
+
|
|
255
|
+
@staticmethod
|
|
256
|
+
def get_usage(response: ChatCompletion) -> dict:
|
|
257
|
+
return {
|
|
258
|
+
"prompt_tokens": response.usage.prompt_tokens if response.usage is not None else 0,
|
|
259
|
+
"completion_tokens": response.usage.completion_tokens if response.usage is not None else 0,
|
|
260
|
+
"total_tokens": (
|
|
261
|
+
response.usage.prompt_tokens + response.usage.completion_tokens if response.usage is not None else 0
|
|
262
|
+
),
|
|
263
|
+
"cost": response.cost if hasattr(response, "cost") else 0,
|
|
264
|
+
"model": response.model,
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
@require_optional_import("mistralai", "mistral")
|
|
269
|
+
def tool_def_to_mistral(tool_definitions: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
270
|
+
"""Converts AG2 tool definition to a mistral tool format"""
|
|
271
|
+
mistral_tools = []
|
|
272
|
+
|
|
273
|
+
for autogen_tool in tool_definitions:
|
|
274
|
+
mistral_tool = {
|
|
275
|
+
"type": "function",
|
|
276
|
+
"function": Function(
|
|
277
|
+
name=autogen_tool["function"]["name"],
|
|
278
|
+
description=autogen_tool["function"]["description"],
|
|
279
|
+
parameters=autogen_tool["function"]["parameters"],
|
|
280
|
+
),
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
mistral_tools.append(mistral_tool)
|
|
284
|
+
|
|
285
|
+
return mistral_tools
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
def calculate_mistral_cost(input_tokens: int, output_tokens: int, model_name: str) -> float:
|
|
289
|
+
"""Calculate the cost of the mistral response."""
|
|
290
|
+
# Prices per 1 thousand tokens
|
|
291
|
+
# https://mistral.ai/technology/
|
|
292
|
+
model_cost_map = {
|
|
293
|
+
"open-mistral-7b": {"input": 0.00025, "output": 0.00025},
|
|
294
|
+
"open-mixtral-8x7b": {"input": 0.0007, "output": 0.0007},
|
|
295
|
+
"open-mixtral-8x22b": {"input": 0.002, "output": 0.006},
|
|
296
|
+
"mistral-small-latest": {"input": 0.001, "output": 0.003},
|
|
297
|
+
"mistral-medium-latest": {"input": 0.00275, "output": 0.0081},
|
|
298
|
+
"mistral-large-latest": {"input": 0.0003, "output": 0.0003},
|
|
299
|
+
"mistral-large-2407": {"input": 0.0003, "output": 0.0003},
|
|
300
|
+
"open-mistral-nemo-2407": {"input": 0.0003, "output": 0.0003},
|
|
301
|
+
"codestral-2405": {"input": 0.001, "output": 0.003},
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
# Ensure we have the model they are using and return the total cost
|
|
305
|
+
if model_name in model_cost_map:
|
|
306
|
+
costs = model_cost_map[model_name]
|
|
307
|
+
|
|
308
|
+
return (input_tokens * costs["input"] / 1000) + (output_tokens * costs["output"] / 1000)
|
|
309
|
+
else:
|
|
310
|
+
warnings.warn(f"Cost calculation is not implemented for model {model_name}, will return $0.", UserWarning)
|
|
311
|
+
return 0
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from .chat_completion import ChatCompletionExtended as ChatCompletion
|
|
6
|
+
from .chat_completion import Choice
|
|
7
|
+
from .chat_completion_message import ChatCompletionMessage
|
|
8
|
+
from .chat_completion_message_tool_call import (
|
|
9
|
+
ChatCompletionMessageCustomToolCall,
|
|
10
|
+
ChatCompletionMessageFunctionToolCall,
|
|
11
|
+
ChatCompletionMessageToolCall,
|
|
12
|
+
)
|
|
13
|
+
from .completion_usage import CompletionUsage
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"ChatCompletion",
|
|
17
|
+
"ChatCompletionMessage",
|
|
18
|
+
"ChatCompletionMessageCustomToolCall",
|
|
19
|
+
"ChatCompletionMessageFunctionToolCall",
|
|
20
|
+
"ChatCompletionMessageToolCall",
|
|
21
|
+
"Choice",
|
|
22
|
+
"CompletionUsage",
|
|
23
|
+
]
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# Taken over from https://github.com/openai/openai-python/blob/main/src/openai/_models.py
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
import pydantic
|
|
9
|
+
import pydantic.generics
|
|
10
|
+
from pydantic import ConfigDict
|
|
11
|
+
|
|
12
|
+
__all__ = ["BaseModel"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class BaseModel(pydantic.BaseModel):
|
|
16
|
+
model_config = ConfigDict(extra="allow", defer_build=True)
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/chat/chat_completion.py
|
|
6
|
+
|
|
7
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
8
|
+
|
|
9
|
+
from collections.abc import Callable
|
|
10
|
+
from typing import Any, Literal
|
|
11
|
+
|
|
12
|
+
from ._models import BaseModel
|
|
13
|
+
from .chat_completion_message import ChatCompletionMessage
|
|
14
|
+
from .chat_completion_token_logprob import ChatCompletionTokenLogprob
|
|
15
|
+
from .completion_usage import CompletionUsage
|
|
16
|
+
|
|
17
|
+
__all__ = ["ChatCompletion", "Choice", "ChoiceLogprobs"]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ChoiceLogprobs(BaseModel):
|
|
21
|
+
content: list[ChatCompletionTokenLogprob] | None = None
|
|
22
|
+
"""A list of message content tokens with log probability information."""
|
|
23
|
+
|
|
24
|
+
refusal: list[ChatCompletionTokenLogprob] | None = None
|
|
25
|
+
"""A list of message refusal tokens with log probability information."""
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class Choice(BaseModel):
|
|
29
|
+
finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
|
|
30
|
+
"""The reason the model stopped generating tokens.
|
|
31
|
+
|
|
32
|
+
This will be `stop` if the model hit a natural stop point or a provided stop
|
|
33
|
+
sequence, `length` if the maximum number of tokens specified in the request was
|
|
34
|
+
reached, `content_filter` if content was omitted due to a flag from our content
|
|
35
|
+
filters, `tool_calls` if the model called a tool, or `function_call`
|
|
36
|
+
(deprecated) if the model called a function.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
index: int
|
|
40
|
+
"""The index of the choice in the list of choices."""
|
|
41
|
+
|
|
42
|
+
logprobs: ChoiceLogprobs | None = None
|
|
43
|
+
"""Log probability information for the choice."""
|
|
44
|
+
|
|
45
|
+
message: ChatCompletionMessage
|
|
46
|
+
"""A chat completion message generated by the model."""
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class ChatCompletion(BaseModel):
|
|
50
|
+
id: str
|
|
51
|
+
"""A unique identifier for the chat completion."""
|
|
52
|
+
|
|
53
|
+
choices: list[Choice]
|
|
54
|
+
"""A list of chat completion choices.
|
|
55
|
+
|
|
56
|
+
Can be more than one if `n` is greater than 1.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
created: int
|
|
60
|
+
"""The Unix timestamp (in seconds) of when the chat completion was created."""
|
|
61
|
+
|
|
62
|
+
model: str
|
|
63
|
+
"""The model used for the chat completion."""
|
|
64
|
+
|
|
65
|
+
object: Literal["chat.completion"]
|
|
66
|
+
"""The object type, which is always `chat.completion`."""
|
|
67
|
+
|
|
68
|
+
service_tier: Literal["auto", "default", "flex", "scale", "priority"] | None = None
|
|
69
|
+
"""The service tier used for processing the request."""
|
|
70
|
+
|
|
71
|
+
system_fingerprint: str | None = None
|
|
72
|
+
"""This fingerprint represents the backend configuration that the model runs with.
|
|
73
|
+
|
|
74
|
+
Can be used in conjunction with the `seed` request parameter to understand when
|
|
75
|
+
backend changes have been made that might impact determinism.
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
usage: CompletionUsage | None = None
|
|
79
|
+
"""Usage statistics for the completion request."""
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class ChatCompletionExtended(ChatCompletion):
|
|
83
|
+
message_retrieval_function: Callable[[Any, "ChatCompletion"], list[ChatCompletionMessage]] | None = None
|
|
84
|
+
config_id: str | None = None
|
|
85
|
+
pass_filter: Callable[..., bool] | None = None
|
|
86
|
+
cost: float | None = None
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/chat/chat_completion_audio.py
|
|
6
|
+
|
|
7
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
from ._models import BaseModel
|
|
11
|
+
|
|
12
|
+
__all__ = ["ChatCompletionAudio"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ChatCompletionAudio(BaseModel):
|
|
16
|
+
id: str
|
|
17
|
+
"""Unique identifier for this audio response."""
|
|
18
|
+
|
|
19
|
+
data: str
|
|
20
|
+
"""
|
|
21
|
+
Base64 encoded audio bytes generated by the model, in the format specified in
|
|
22
|
+
the request.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
expires_at: int
|
|
26
|
+
"""
|
|
27
|
+
The Unix timestamp (in seconds) for when this audio response will no longer be
|
|
28
|
+
accessible on the server for use in multi-turn conversations.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
transcript: str
|
|
32
|
+
"""Transcript of the audio generated by the model."""
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# Taken over from https://github.com/openai/openai-python/blob/16a10604fbd0d82c1382b84b417a1d6a2d33a7f1/src/openai/types/chat/chat_completion_message.py
|
|
6
|
+
|
|
7
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
from typing import Any, Literal
|
|
11
|
+
|
|
12
|
+
from pydantic import Field
|
|
13
|
+
|
|
14
|
+
from ._models import BaseModel
|
|
15
|
+
from .chat_completion_audio import ChatCompletionAudio
|
|
16
|
+
from .chat_completion_message_tool_call import (
|
|
17
|
+
ChatCompletionMessageCustomToolCall,
|
|
18
|
+
ChatCompletionMessageFunctionToolCall,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
__all__ = ["Annotation", "AnnotationURLCitation", "ChatCompletionMessage", "FunctionCall"]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AnnotationURLCitation(BaseModel):
|
|
25
|
+
end_index: int
|
|
26
|
+
"""The index of the last character of the URL citation in the message."""
|
|
27
|
+
|
|
28
|
+
start_index: int
|
|
29
|
+
"""The index of the first character of the URL citation in the message."""
|
|
30
|
+
|
|
31
|
+
title: str
|
|
32
|
+
"""The title of the web resource."""
|
|
33
|
+
|
|
34
|
+
url: str
|
|
35
|
+
"""The URL of the web resource."""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class Annotation(BaseModel):
|
|
39
|
+
type: Literal["url_citation"]
|
|
40
|
+
"""The type of the URL citation. Always `url_citation`."""
|
|
41
|
+
|
|
42
|
+
url_citation: AnnotationURLCitation
|
|
43
|
+
"""A URL citation when using web search."""
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class FunctionCall(BaseModel):
|
|
47
|
+
arguments: str
|
|
48
|
+
"""
|
|
49
|
+
The arguments to call the function with, as generated by the model in JSON
|
|
50
|
+
format. Note that the model does not always generate valid JSON, and may
|
|
51
|
+
hallucinate parameters not defined by your function schema. Validate the
|
|
52
|
+
arguments in your code before calling your function.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
name: str
|
|
56
|
+
"""The name of the function to call."""
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class ChatCompletionMessage(BaseModel):
|
|
60
|
+
content: str | dict[str, Any] | list[dict[str, Any]] | None = Field(
|
|
61
|
+
default=None, json_schema_extra={"anyOf": [{"type": "string"}, {"type": "null"}]}
|
|
62
|
+
)
|
|
63
|
+
"""The contents of the message.
|
|
64
|
+
|
|
65
|
+
Can be a string for text-only messages, a dict for structured content,
|
|
66
|
+
a list of dicts for multimodal content (text + images), or None when
|
|
67
|
+
tool_calls or function_call is present.
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
refusal: str | None = None
|
|
71
|
+
"""The refusal message generated by the model."""
|
|
72
|
+
|
|
73
|
+
role: Literal["assistant"]
|
|
74
|
+
"""The role of the author of this message."""
|
|
75
|
+
|
|
76
|
+
annotations: list[Annotation] | None = None
|
|
77
|
+
"""
|
|
78
|
+
Annotations for the message, when applicable, as when using the
|
|
79
|
+
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
audio: ChatCompletionAudio | None = None
|
|
83
|
+
"""
|
|
84
|
+
If the audio output modality is requested, this object contains data about the
|
|
85
|
+
audio response from the model.
|
|
86
|
+
[Learn more](https://platform.openai.com/docs/guides/audio).
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
function_call: FunctionCall | None = None
|
|
90
|
+
"""Deprecated and replaced by `tool_calls`.
|
|
91
|
+
|
|
92
|
+
The name and arguments of a function that should be called, as generated by the
|
|
93
|
+
model.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
tool_calls: list[ChatCompletionMessageFunctionToolCall | ChatCompletionMessageCustomToolCall] | None = None
|
|
97
|
+
"""The tool calls generated by the model, such as function calls."""
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/chat/chat_completion_message_tool_call.py
|
|
6
|
+
|
|
7
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
8
|
+
|
|
9
|
+
from typing import Literal
|
|
10
|
+
|
|
11
|
+
from ._models import BaseModel
|
|
12
|
+
|
|
13
|
+
__all__ = ["ChatCompletionMessageCustomToolCall", "ChatCompletionMessageFunctionToolCall", "Custom", "Function"]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Function(BaseModel):
|
|
17
|
+
arguments: str
|
|
18
|
+
"""
|
|
19
|
+
The arguments to call the function with, as generated by the model in JSON
|
|
20
|
+
format. Note that the model does not always generate valid JSON, and may
|
|
21
|
+
hallucinate parameters not defined by your function schema. Validate the
|
|
22
|
+
arguments in your code before calling your function.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
name: str
|
|
26
|
+
"""The name of the function to call."""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Custom(BaseModel):
|
|
30
|
+
input: str
|
|
31
|
+
"""The input to the custom tool."""
|
|
32
|
+
|
|
33
|
+
name: str
|
|
34
|
+
"""The name of the custom tool."""
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class ChatCompletionMessageFunctionToolCall(BaseModel):
|
|
38
|
+
id: str
|
|
39
|
+
"""The ID of the tool call."""
|
|
40
|
+
|
|
41
|
+
function: Function
|
|
42
|
+
"""The function that the model called."""
|
|
43
|
+
|
|
44
|
+
type: Literal["function"]
|
|
45
|
+
"""The type of the tool. Currently, only `function` is supported."""
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class ChatCompletionMessageCustomToolCall(BaseModel):
|
|
49
|
+
id: str
|
|
50
|
+
"""The ID of the tool call."""
|
|
51
|
+
|
|
52
|
+
custom: Custom
|
|
53
|
+
"""The custom tool that the model called."""
|
|
54
|
+
|
|
55
|
+
type: Literal["custom"]
|
|
56
|
+
"""The type of the tool. Currently, only `custom` is supported."""
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
# Backward compatibility alias
|
|
60
|
+
ChatCompletionMessageToolCall = ChatCompletionMessageFunctionToolCall
|