ag2 0.9.1a1__py3-none-any.whl → 0.9.1.post0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info}/METADATA +264 -73
- ag2-0.9.1.post0.dist-info/RECORD +392 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info}/WHEEL +1 -2
- autogen/__init__.py +89 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +427 -0
- autogen/_website/generate_mkdocs.py +1174 -0
- autogen/_website/notebook_processor.py +476 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +412 -0
- autogen/agentchat/__init__.py +44 -0
- autogen/agentchat/agent.py +182 -0
- autogen/agentchat/assistant_agent.py +85 -0
- autogen/agentchat/chat.py +309 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +429 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +566 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +187 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
- autogen/agentchat/contrib/rag/query_engine.py +74 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
- autogen/agentchat/contrib/swarm_agent.py +1425 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +232 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +303 -0
- autogen/agentchat/conversable_agent.py +4020 -0
- autogen/agentchat/group/__init__.py +64 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +41 -0
- autogen/agentchat/group/context_variables.py +192 -0
- autogen/agentchat/group/group_tool_executor.py +202 -0
- autogen/agentchat/group/group_utils.py +591 -0
- autogen/agentchat/group/handoffs.py +244 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +237 -0
- autogen/agentchat/group/on_condition.py +58 -0
- autogen/agentchat/group/on_context_condition.py +54 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +159 -0
- autogen/agentchat/group/patterns/manual.py +176 -0
- autogen/agentchat/group/patterns/pattern.py +288 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +26 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/group_chat_target.py +132 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +413 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1694 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
- autogen/agentchat/realtime/experimental/function_observer.py +85 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +111 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +73 -0
- autogen/agents/contrib/time/time_tool_agent.py +51 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
- autogen/agents/experimental/document_agent/document_agent.py +461 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +380 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
- autogen/agents/experimental/document_agent/parser_utils.py +130 -0
- autogen/agents/experimental/document_agent/url_utils.py +426 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +77 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +62 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +75 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +102 -0
- autogen/cache/in_memory_cache.py +58 -0
- autogen/cache/redis_cache.py +123 -0
- autogen/code_utils.py +596 -0
- autogen/coding/__init__.py +22 -0
- autogen/coding/base.py +119 -0
- autogen/coding/docker_commandline_code_executor.py +268 -0
- autogen/coding/factory.py +47 -0
- autogen/coding/func_with_reqs.py +202 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +167 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +231 -0
- autogen/coding/jupyter/jupyter_code_executor.py +160 -0
- autogen/coding/jupyter/local_jupyter_server.py +172 -0
- autogen/coding/local_commandline_code_executor.py +405 -0
- autogen/coding/markdown_code_extractor.py +45 -0
- autogen/coding/utils.py +56 -0
- autogen/doc_utils.py +34 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1010 -0
- autogen/events/base_event.py +99 -0
- autogen/events/client_events.py +167 -0
- autogen/events/helpers.py +36 -0
- autogen/events/print_event.py +46 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +80 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +225 -0
- autogen/fast_depends/core/model.py +576 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +29 -0
- autogen/fast_depends/dependencies/provider.py +39 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +280 -0
- autogen/fast_depends/utils.py +187 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +178 -0
- autogen/import_utils.py +526 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
- autogen/interop/langchain/langchain_tool.py +82 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +113 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
- autogen/interop/registry.py +69 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +56 -0
- autogen/io/run_response.py +293 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +213 -0
- autogen/json_utils.py +43 -0
- autogen/llm_config.py +379 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +128 -0
- autogen/logger/file_logger.py +261 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +523 -0
- autogen/math_utils.py +339 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/mcp_client.py +208 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +948 -0
- autogen/messages/base_message.py +107 -0
- autogen/messages/client_messages.py +171 -0
- autogen/messages/print_message.py +49 -0
- autogen/oai/__init__.py +53 -0
- autogen/oai/anthropic.py +714 -0
- autogen/oai/bedrock.py +628 -0
- autogen/oai/cerebras.py +299 -0
- autogen/oai/client.py +1435 -0
- autogen/oai/client_utils.py +169 -0
- autogen/oai/cohere.py +479 -0
- autogen/oai/gemini.py +990 -0
- autogen/oai/gemini_types.py +129 -0
- autogen/oai/groq.py +305 -0
- autogen/oai/mistral.py +303 -0
- autogen/oai/oai_models/__init__.py +11 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +87 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +86 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
- autogen/oai/oai_models/completion_usage.py +60 -0
- autogen/oai/ollama.py +643 -0
- autogen/oai/openai_utils.py +881 -0
- autogen/oai/together.py +370 -0
- autogen/retrieve_utils.py +491 -0
- autogen/runtime_logging.py +160 -0
- autogen/token_count_utils.py +267 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +41 -0
- autogen/tools/dependency_injection.py +254 -0
- autogen/tools/experimental/__init__.py +43 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +161 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +328 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +183 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
- autogen/tools/function_utils.py +411 -0
- autogen/tools/tool.py +187 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- ag2-0.9.1a1.dist-info/RECORD +0 -6
- ag2-0.9.1a1.dist-info/top_level.txt +0 -1
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/LICENSE +0 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/NOTICE.md +0 -0
autogen/oai/mistral.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
"""Create an OpenAI-compatible client using Mistral.AI's API.
|
|
8
|
+
|
|
9
|
+
Example:
|
|
10
|
+
```python
|
|
11
|
+
llm_config = {
|
|
12
|
+
"config_list": [
|
|
13
|
+
{"api_type": "mistral", "model": "open-mixtral-8x22b", "api_key": os.environ.get("MISTRAL_API_KEY")}
|
|
14
|
+
]
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
agent = autogen.AssistantAgent("my_agent", llm_config=llm_config)
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
Install Mistral.AI python library using: pip install --upgrade mistralai
|
|
21
|
+
|
|
22
|
+
Resources:
|
|
23
|
+
- https://docs.mistral.ai/getting-started/quickstart/
|
|
24
|
+
|
|
25
|
+
NOTE: Requires mistralai package version >= 1.0.1
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
import json
|
|
29
|
+
import os
|
|
30
|
+
import time
|
|
31
|
+
import warnings
|
|
32
|
+
from typing import Any, Literal, Optional, Union
|
|
33
|
+
|
|
34
|
+
from pydantic import Field
|
|
35
|
+
|
|
36
|
+
from ..import_utils import optional_import_block, require_optional_import
|
|
37
|
+
from ..llm_config import LLMConfigEntry, register_llm_config
|
|
38
|
+
from .client_utils import should_hide_tools, validate_parameter
|
|
39
|
+
from .oai_models import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall, Choice, CompletionUsage
|
|
40
|
+
|
|
41
|
+
with optional_import_block():
|
|
42
|
+
# Mistral libraries
|
|
43
|
+
# pip install mistralai
|
|
44
|
+
from mistralai import (
|
|
45
|
+
AssistantMessage,
|
|
46
|
+
Function,
|
|
47
|
+
FunctionCall,
|
|
48
|
+
Mistral,
|
|
49
|
+
SystemMessage,
|
|
50
|
+
ToolCall,
|
|
51
|
+
ToolMessage,
|
|
52
|
+
UserMessage,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@register_llm_config
|
|
57
|
+
class MistralLLMConfigEntry(LLMConfigEntry):
|
|
58
|
+
api_type: Literal["mistral"] = "mistral"
|
|
59
|
+
temperature: float = Field(default=0.7)
|
|
60
|
+
top_p: Optional[float] = None
|
|
61
|
+
max_tokens: Optional[int] = Field(default=None, ge=0)
|
|
62
|
+
safe_prompt: bool = False
|
|
63
|
+
random_seed: Optional[int] = None
|
|
64
|
+
stream: bool = False
|
|
65
|
+
hide_tools: Literal["if_all_run", "if_any_run", "never"] = "never"
|
|
66
|
+
tool_choice: Optional[Literal["none", "auto", "any"]] = None
|
|
67
|
+
|
|
68
|
+
def create_client(self):
|
|
69
|
+
raise NotImplementedError("MistralLLMConfigEntry.create_client is not implemented.")
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@require_optional_import("mistralai", "mistral")
|
|
73
|
+
class MistralAIClient:
|
|
74
|
+
"""Client for Mistral.AI's API."""
|
|
75
|
+
|
|
76
|
+
def __init__(self, **kwargs):
|
|
77
|
+
"""Requires api_key or environment variable to be set
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
**kwargs: Additional keyword arguments to pass to the Mistral client.
|
|
81
|
+
"""
|
|
82
|
+
# Ensure we have the api_key upon instantiation
|
|
83
|
+
self.api_key = kwargs.get("api_key")
|
|
84
|
+
if not self.api_key:
|
|
85
|
+
self.api_key = os.getenv("MISTRAL_API_KEY", None)
|
|
86
|
+
|
|
87
|
+
assert self.api_key, (
|
|
88
|
+
"Please specify the 'api_key' in your config list entry for Mistral or set the MISTRAL_API_KEY env variable."
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
if "response_format" in kwargs and kwargs["response_format"] is not None:
|
|
92
|
+
warnings.warn("response_format is not supported for Mistral.AI, it will be ignored.", UserWarning)
|
|
93
|
+
|
|
94
|
+
self._client = Mistral(api_key=self.api_key)
|
|
95
|
+
|
|
96
|
+
def message_retrieval(self, response: ChatCompletion) -> Union[list[str], list[ChatCompletionMessage]]:
|
|
97
|
+
"""Retrieve the messages from the response."""
|
|
98
|
+
return [choice.message for choice in response.choices]
|
|
99
|
+
|
|
100
|
+
def cost(self, response) -> float:
|
|
101
|
+
return response.cost
|
|
102
|
+
|
|
103
|
+
@require_optional_import("mistralai", "mistral")
|
|
104
|
+
def parse_params(self, params: dict[str, Any]) -> dict[str, Any]:
|
|
105
|
+
"""Loads the parameters for Mistral.AI API from the passed in parameters and returns a validated set. Checks types, ranges, and sets defaults"""
|
|
106
|
+
mistral_params = {}
|
|
107
|
+
|
|
108
|
+
# 1. Validate models
|
|
109
|
+
mistral_params["model"] = params.get("model")
|
|
110
|
+
assert mistral_params["model"], (
|
|
111
|
+
"Please specify the 'model' in your config list entry to nominate the Mistral.ai model to use."
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
# 2. Validate allowed Mistral.AI parameters
|
|
115
|
+
mistral_params["temperature"] = validate_parameter(params, "temperature", (int, float), True, 0.7, None, None)
|
|
116
|
+
mistral_params["top_p"] = validate_parameter(params, "top_p", (int, float), True, None, None, None)
|
|
117
|
+
mistral_params["max_tokens"] = validate_parameter(params, "max_tokens", int, True, None, (0, None), None)
|
|
118
|
+
mistral_params["safe_prompt"] = validate_parameter(
|
|
119
|
+
params, "safe_prompt", bool, False, False, None, [True, False]
|
|
120
|
+
)
|
|
121
|
+
mistral_params["random_seed"] = validate_parameter(params, "random_seed", int, True, None, False, None)
|
|
122
|
+
mistral_params["tool_choice"] = validate_parameter(
|
|
123
|
+
params, "tool_choice", str, False, None, None, ["none", "auto", "any"]
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# TODO
|
|
127
|
+
if params.get("stream", False):
|
|
128
|
+
warnings.warn(
|
|
129
|
+
"Streaming is not currently supported, streaming will be disabled.",
|
|
130
|
+
UserWarning,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# 3. Convert messages to Mistral format
|
|
134
|
+
mistral_messages = []
|
|
135
|
+
tool_call_ids = {} # tool call ids to function name mapping
|
|
136
|
+
for message in params["messages"]:
|
|
137
|
+
if message["role"] == "assistant" and "tool_calls" in message and message["tool_calls"] is not None:
|
|
138
|
+
# Convert OAI ToolCall to Mistral ToolCall
|
|
139
|
+
mistral_messages_tools = []
|
|
140
|
+
for toolcall in message["tool_calls"]:
|
|
141
|
+
mistral_messages_tools.append(
|
|
142
|
+
ToolCall(
|
|
143
|
+
id=toolcall["id"],
|
|
144
|
+
function=FunctionCall(
|
|
145
|
+
name=toolcall["function"]["name"],
|
|
146
|
+
arguments=json.loads(toolcall["function"]["arguments"]),
|
|
147
|
+
),
|
|
148
|
+
)
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
mistral_messages.append(AssistantMessage(content="", tool_calls=mistral_messages_tools))
|
|
152
|
+
|
|
153
|
+
# Map tool call id to the function name
|
|
154
|
+
for tool_call in message["tool_calls"]:
|
|
155
|
+
tool_call_ids[tool_call["id"]] = tool_call["function"]["name"]
|
|
156
|
+
|
|
157
|
+
elif message["role"] == "system":
|
|
158
|
+
if len(mistral_messages) > 0 and mistral_messages[-1].role == "assistant":
|
|
159
|
+
# System messages can't appear after an Assistant message, so use a UserMessage
|
|
160
|
+
mistral_messages.append(UserMessage(content=message["content"]))
|
|
161
|
+
else:
|
|
162
|
+
mistral_messages.append(SystemMessage(content=message["content"]))
|
|
163
|
+
elif message["role"] == "assistant":
|
|
164
|
+
mistral_messages.append(AssistantMessage(content=message["content"]))
|
|
165
|
+
elif message["role"] == "user":
|
|
166
|
+
mistral_messages.append(UserMessage(content=message["content"]))
|
|
167
|
+
|
|
168
|
+
elif message["role"] == "tool":
|
|
169
|
+
# Indicates the result of a tool call, the name is the function name called
|
|
170
|
+
mistral_messages.append(
|
|
171
|
+
ToolMessage(
|
|
172
|
+
name=tool_call_ids[message["tool_call_id"]],
|
|
173
|
+
content=message["content"],
|
|
174
|
+
tool_call_id=message["tool_call_id"],
|
|
175
|
+
)
|
|
176
|
+
)
|
|
177
|
+
else:
|
|
178
|
+
warnings.warn(f"Unknown message role {message['role']}", UserWarning)
|
|
179
|
+
|
|
180
|
+
# 4. Last message needs to be user or tool, if not, add a "please continue" message
|
|
181
|
+
if not isinstance(mistral_messages[-1], UserMessage) and not isinstance(mistral_messages[-1], ToolMessage):
|
|
182
|
+
mistral_messages.append(UserMessage(content="Please continue."))
|
|
183
|
+
|
|
184
|
+
mistral_params["messages"] = mistral_messages
|
|
185
|
+
|
|
186
|
+
# 5. Add tools to the call if we have them and aren't hiding them
|
|
187
|
+
if "tools" in params:
|
|
188
|
+
hide_tools = validate_parameter(
|
|
189
|
+
params, "hide_tools", str, False, "never", None, ["if_all_run", "if_any_run", "never"]
|
|
190
|
+
)
|
|
191
|
+
if not should_hide_tools(params["messages"], params["tools"], hide_tools):
|
|
192
|
+
mistral_params["tools"] = tool_def_to_mistral(params["tools"])
|
|
193
|
+
|
|
194
|
+
return mistral_params
|
|
195
|
+
|
|
196
|
+
@require_optional_import("mistralai", "mistral")
|
|
197
|
+
def create(self, params: dict[str, Any]) -> ChatCompletion:
|
|
198
|
+
# 1. Parse parameters to Mistral.AI API's parameters
|
|
199
|
+
mistral_params = self.parse_params(params)
|
|
200
|
+
|
|
201
|
+
# 2. Call Mistral.AI API
|
|
202
|
+
mistral_response = self._client.chat.complete(**mistral_params)
|
|
203
|
+
# TODO: Handle streaming
|
|
204
|
+
|
|
205
|
+
# 3. Convert Mistral response to OAI compatible format
|
|
206
|
+
if mistral_response.choices[0].finish_reason == "tool_calls":
|
|
207
|
+
mistral_finish = "tool_calls"
|
|
208
|
+
tool_calls = []
|
|
209
|
+
for tool_call in mistral_response.choices[0].message.tool_calls:
|
|
210
|
+
tool_calls.append(
|
|
211
|
+
ChatCompletionMessageToolCall(
|
|
212
|
+
id=tool_call.id,
|
|
213
|
+
function={"name": tool_call.function.name, "arguments": tool_call.function.arguments},
|
|
214
|
+
type="function",
|
|
215
|
+
)
|
|
216
|
+
)
|
|
217
|
+
else:
|
|
218
|
+
mistral_finish = "stop"
|
|
219
|
+
tool_calls = None
|
|
220
|
+
|
|
221
|
+
message = ChatCompletionMessage(
|
|
222
|
+
role="assistant",
|
|
223
|
+
content=mistral_response.choices[0].message.content,
|
|
224
|
+
function_call=None,
|
|
225
|
+
tool_calls=tool_calls,
|
|
226
|
+
)
|
|
227
|
+
choices = [Choice(finish_reason=mistral_finish, index=0, message=message)]
|
|
228
|
+
|
|
229
|
+
response_oai = ChatCompletion(
|
|
230
|
+
id=mistral_response.id,
|
|
231
|
+
model=mistral_response.model,
|
|
232
|
+
created=int(time.time()),
|
|
233
|
+
object="chat.completion",
|
|
234
|
+
choices=choices,
|
|
235
|
+
usage=CompletionUsage(
|
|
236
|
+
prompt_tokens=mistral_response.usage.prompt_tokens,
|
|
237
|
+
completion_tokens=mistral_response.usage.completion_tokens,
|
|
238
|
+
total_tokens=mistral_response.usage.prompt_tokens + mistral_response.usage.completion_tokens,
|
|
239
|
+
),
|
|
240
|
+
cost=calculate_mistral_cost(
|
|
241
|
+
mistral_response.usage.prompt_tokens, mistral_response.usage.completion_tokens, mistral_response.model
|
|
242
|
+
),
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
return response_oai
|
|
246
|
+
|
|
247
|
+
@staticmethod
|
|
248
|
+
def get_usage(response: ChatCompletion) -> dict:
|
|
249
|
+
return {
|
|
250
|
+
"prompt_tokens": response.usage.prompt_tokens if response.usage is not None else 0,
|
|
251
|
+
"completion_tokens": response.usage.completion_tokens if response.usage is not None else 0,
|
|
252
|
+
"total_tokens": (
|
|
253
|
+
response.usage.prompt_tokens + response.usage.completion_tokens if response.usage is not None else 0
|
|
254
|
+
),
|
|
255
|
+
"cost": response.cost if hasattr(response, "cost") else 0,
|
|
256
|
+
"model": response.model,
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
@require_optional_import("mistralai", "mistral")
|
|
261
|
+
def tool_def_to_mistral(tool_definitions: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
262
|
+
"""Converts AG2 tool definition to a mistral tool format"""
|
|
263
|
+
mistral_tools = []
|
|
264
|
+
|
|
265
|
+
for autogen_tool in tool_definitions:
|
|
266
|
+
mistral_tool = {
|
|
267
|
+
"type": "function",
|
|
268
|
+
"function": Function(
|
|
269
|
+
name=autogen_tool["function"]["name"],
|
|
270
|
+
description=autogen_tool["function"]["description"],
|
|
271
|
+
parameters=autogen_tool["function"]["parameters"],
|
|
272
|
+
),
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
mistral_tools.append(mistral_tool)
|
|
276
|
+
|
|
277
|
+
return mistral_tools
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def calculate_mistral_cost(input_tokens: int, output_tokens: int, model_name: str) -> float:
|
|
281
|
+
"""Calculate the cost of the mistral response."""
|
|
282
|
+
# Prices per 1 thousand tokens
|
|
283
|
+
# https://mistral.ai/technology/
|
|
284
|
+
model_cost_map = {
|
|
285
|
+
"open-mistral-7b": {"input": 0.00025, "output": 0.00025},
|
|
286
|
+
"open-mixtral-8x7b": {"input": 0.0007, "output": 0.0007},
|
|
287
|
+
"open-mixtral-8x22b": {"input": 0.002, "output": 0.006},
|
|
288
|
+
"mistral-small-latest": {"input": 0.001, "output": 0.003},
|
|
289
|
+
"mistral-medium-latest": {"input": 0.00275, "output": 0.0081},
|
|
290
|
+
"mistral-large-latest": {"input": 0.0003, "output": 0.0003},
|
|
291
|
+
"mistral-large-2407": {"input": 0.0003, "output": 0.0003},
|
|
292
|
+
"open-mistral-nemo-2407": {"input": 0.0003, "output": 0.0003},
|
|
293
|
+
"codestral-2405": {"input": 0.001, "output": 0.003},
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
# Ensure we have the model they are using and return the total cost
|
|
297
|
+
if model_name in model_cost_map:
|
|
298
|
+
costs = model_cost_map[model_name]
|
|
299
|
+
|
|
300
|
+
return (input_tokens * costs["input"] / 1000) + (output_tokens * costs["output"] / 1000)
|
|
301
|
+
else:
|
|
302
|
+
warnings.warn(f"Cost calculation is not implemented for model {model_name}, will return $0.", UserWarning)
|
|
303
|
+
return 0
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from .chat_completion import ChatCompletionExtended as ChatCompletion
|
|
6
|
+
from .chat_completion import Choice
|
|
7
|
+
from .chat_completion_message import ChatCompletionMessage
|
|
8
|
+
from .chat_completion_message_tool_call import ChatCompletionMessageToolCall
|
|
9
|
+
from .completion_usage import CompletionUsage
|
|
10
|
+
|
|
11
|
+
__all__ = ["ChatCompletion", "ChatCompletionMessage", "ChatCompletionMessageToolCall", "Choice", "CompletionUsage"]
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# Taken over from https://github.com/openai/openai-python/blob/main/src/openai/_models.py
|
|
6
|
+
|
|
7
|
+
import pydantic
|
|
8
|
+
import pydantic.generics
|
|
9
|
+
from pydantic import ConfigDict
|
|
10
|
+
from typing_extensions import ClassVar
|
|
11
|
+
|
|
12
|
+
__all__ = ["BaseModel"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class BaseModel(pydantic.BaseModel):
|
|
16
|
+
model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow")
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/chat/chat_completion.py
|
|
6
|
+
|
|
7
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
8
|
+
|
|
9
|
+
from typing import Any, Callable, List, Optional
|
|
10
|
+
|
|
11
|
+
from typing_extensions import Literal
|
|
12
|
+
|
|
13
|
+
from ._models import BaseModel
|
|
14
|
+
from .chat_completion_message import ChatCompletionMessage
|
|
15
|
+
from .chat_completion_token_logprob import ChatCompletionTokenLogprob
|
|
16
|
+
from .completion_usage import CompletionUsage
|
|
17
|
+
|
|
18
|
+
__all__ = ["ChatCompletion", "Choice", "ChoiceLogprobs"]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ChoiceLogprobs(BaseModel):
|
|
22
|
+
content: Optional[List[ChatCompletionTokenLogprob]] = None
|
|
23
|
+
"""A list of message content tokens with log probability information."""
|
|
24
|
+
|
|
25
|
+
refusal: Optional[List[ChatCompletionTokenLogprob]] = None
|
|
26
|
+
"""A list of message refusal tokens with log probability information."""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Choice(BaseModel):
|
|
30
|
+
finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
|
|
31
|
+
"""The reason the model stopped generating tokens.
|
|
32
|
+
|
|
33
|
+
This will be `stop` if the model hit a natural stop point or a provided stop
|
|
34
|
+
sequence, `length` if the maximum number of tokens specified in the request was
|
|
35
|
+
reached, `content_filter` if content was omitted due to a flag from our content
|
|
36
|
+
filters, `tool_calls` if the model called a tool, or `function_call`
|
|
37
|
+
(deprecated) if the model called a function.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
index: int
|
|
41
|
+
"""The index of the choice in the list of choices."""
|
|
42
|
+
|
|
43
|
+
logprobs: Optional[ChoiceLogprobs] = None
|
|
44
|
+
"""Log probability information for the choice."""
|
|
45
|
+
|
|
46
|
+
message: ChatCompletionMessage
|
|
47
|
+
"""A chat completion message generated by the model."""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class ChatCompletion(BaseModel):
|
|
51
|
+
id: str
|
|
52
|
+
"""A unique identifier for the chat completion."""
|
|
53
|
+
|
|
54
|
+
choices: List[Choice]
|
|
55
|
+
"""A list of chat completion choices.
|
|
56
|
+
|
|
57
|
+
Can be more than one if `n` is greater than 1.
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
created: int
|
|
61
|
+
"""The Unix timestamp (in seconds) of when the chat completion was created."""
|
|
62
|
+
|
|
63
|
+
model: str
|
|
64
|
+
"""The model used for the chat completion."""
|
|
65
|
+
|
|
66
|
+
object: Literal["chat.completion"]
|
|
67
|
+
"""The object type, which is always `chat.completion`."""
|
|
68
|
+
|
|
69
|
+
service_tier: Optional[Literal["auto", "default", "flex"]] = None
|
|
70
|
+
"""The service tier used for processing the request."""
|
|
71
|
+
|
|
72
|
+
system_fingerprint: Optional[str] = None
|
|
73
|
+
"""This fingerprint represents the backend configuration that the model runs with.
|
|
74
|
+
|
|
75
|
+
Can be used in conjunction with the `seed` request parameter to understand when
|
|
76
|
+
backend changes have been made that might impact determinism.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
usage: Optional[CompletionUsage] = None
|
|
80
|
+
"""Usage statistics for the completion request."""
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class ChatCompletionExtended(ChatCompletion):
|
|
84
|
+
message_retrieval_function: Optional[Callable[[Any, "ChatCompletion"], list[ChatCompletionMessage]]] = None
|
|
85
|
+
config_id: Optional[str] = None
|
|
86
|
+
pass_filter: Optional[Callable[..., bool]] = None
|
|
87
|
+
cost: Optional[float] = None
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/chat/chat_completion_audio.py
|
|
6
|
+
|
|
7
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
from ._models import BaseModel
|
|
11
|
+
|
|
12
|
+
__all__ = ["ChatCompletionAudio"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ChatCompletionAudio(BaseModel):
|
|
16
|
+
id: str
|
|
17
|
+
"""Unique identifier for this audio response."""
|
|
18
|
+
|
|
19
|
+
data: str
|
|
20
|
+
"""
|
|
21
|
+
Base64 encoded audio bytes generated by the model, in the format specified in
|
|
22
|
+
the request.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
expires_at: int
|
|
26
|
+
"""
|
|
27
|
+
The Unix timestamp (in seconds) for when this audio response will no longer be
|
|
28
|
+
accessible on the server for use in multi-turn conversations.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
transcript: str
|
|
32
|
+
"""Transcript of the audio generated by the model."""
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# Taken over from https://github.com/openai/openai-python/blob/16a10604fbd0d82c1382b84b417a1d6a2d33a7f1/src/openai/types/chat/chat_completion_message.py
|
|
6
|
+
|
|
7
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
8
|
+
|
|
9
|
+
from typing import List, Optional
|
|
10
|
+
|
|
11
|
+
from typing_extensions import Literal
|
|
12
|
+
|
|
13
|
+
from ._models import BaseModel
|
|
14
|
+
from .chat_completion_audio import ChatCompletionAudio
|
|
15
|
+
from .chat_completion_message_tool_call import ChatCompletionMessageToolCall
|
|
16
|
+
|
|
17
|
+
__all__ = ["Annotation", "AnnotationURLCitation", "ChatCompletionMessage", "FunctionCall"]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class AnnotationURLCitation(BaseModel):
|
|
21
|
+
end_index: int
|
|
22
|
+
"""The index of the last character of the URL citation in the message."""
|
|
23
|
+
|
|
24
|
+
start_index: int
|
|
25
|
+
"""The index of the first character of the URL citation in the message."""
|
|
26
|
+
|
|
27
|
+
title: str
|
|
28
|
+
"""The title of the web resource."""
|
|
29
|
+
|
|
30
|
+
url: str
|
|
31
|
+
"""The URL of the web resource."""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class Annotation(BaseModel):
|
|
35
|
+
type: Literal["url_citation"]
|
|
36
|
+
"""The type of the URL citation. Always `url_citation`."""
|
|
37
|
+
|
|
38
|
+
url_citation: AnnotationURLCitation
|
|
39
|
+
"""A URL citation when using web search."""
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class FunctionCall(BaseModel):
|
|
43
|
+
arguments: str
|
|
44
|
+
"""
|
|
45
|
+
The arguments to call the function with, as generated by the model in JSON
|
|
46
|
+
format. Note that the model does not always generate valid JSON, and may
|
|
47
|
+
hallucinate parameters not defined by your function schema. Validate the
|
|
48
|
+
arguments in your code before calling your function.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
name: str
|
|
52
|
+
"""The name of the function to call."""
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ChatCompletionMessage(BaseModel):
|
|
56
|
+
content: Optional[str] = None
|
|
57
|
+
"""The contents of the message."""
|
|
58
|
+
|
|
59
|
+
refusal: Optional[str] = None
|
|
60
|
+
"""The refusal message generated by the model."""
|
|
61
|
+
|
|
62
|
+
role: Literal["assistant"]
|
|
63
|
+
"""The role of the author of this message."""
|
|
64
|
+
|
|
65
|
+
annotations: Optional[List[Annotation]] = None
|
|
66
|
+
"""
|
|
67
|
+
Annotations for the message, when applicable, as when using the
|
|
68
|
+
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
audio: Optional[ChatCompletionAudio] = None
|
|
72
|
+
"""
|
|
73
|
+
If the audio output modality is requested, this object contains data about the
|
|
74
|
+
audio response from the model.
|
|
75
|
+
[Learn more](https://platform.openai.com/docs/guides/audio).
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
function_call: Optional[FunctionCall] = None
|
|
79
|
+
"""Deprecated and replaced by `tool_calls`.
|
|
80
|
+
|
|
81
|
+
The name and arguments of a function that should be called, as generated by the
|
|
82
|
+
model.
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None
|
|
86
|
+
"""The tool calls generated by the model, such as function calls."""
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/chat/chat_completion_message_tool_call.py
|
|
6
|
+
|
|
7
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
8
|
+
|
|
9
|
+
from typing_extensions import Literal
|
|
10
|
+
|
|
11
|
+
from ._models import BaseModel
|
|
12
|
+
|
|
13
|
+
__all__ = ["ChatCompletionMessageToolCall", "Function"]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Function(BaseModel):
|
|
17
|
+
arguments: str
|
|
18
|
+
"""
|
|
19
|
+
The arguments to call the function with, as generated by the model in JSON
|
|
20
|
+
format. Note that the model does not always generate valid JSON, and may
|
|
21
|
+
hallucinate parameters not defined by your function schema. Validate the
|
|
22
|
+
arguments in your code before calling your function.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
name: str
|
|
26
|
+
"""The name of the function to call."""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ChatCompletionMessageToolCall(BaseModel):
|
|
30
|
+
id: str
|
|
31
|
+
"""The ID of the tool call."""
|
|
32
|
+
|
|
33
|
+
function: Function
|
|
34
|
+
"""The function that the model called."""
|
|
35
|
+
|
|
36
|
+
type: Literal["function"]
|
|
37
|
+
"""The type of the tool. Currently, only `function` is supported."""
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
# Taken over from https://github.com/openai/openai-python/blob/3e69750d47df4f0759d4a28ddc68e4b38756d9ca/src/openai/types/chat/chat_completion_token_logprob.py
|
|
6
|
+
|
|
7
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
8
|
+
|
|
9
|
+
from typing import List, Optional
|
|
10
|
+
|
|
11
|
+
from ._models import BaseModel
|
|
12
|
+
|
|
13
|
+
__all__ = ["ChatCompletionTokenLogprob", "TopLogprob"]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class TopLogprob(BaseModel):
|
|
17
|
+
token: str
|
|
18
|
+
"""The token."""
|
|
19
|
+
|
|
20
|
+
bytes: Optional[List[int]] = None
|
|
21
|
+
"""A list of integers representing the UTF-8 bytes representation of the token.
|
|
22
|
+
|
|
23
|
+
Useful in instances where characters are represented by multiple tokens and
|
|
24
|
+
their byte representations must be combined to generate the correct text
|
|
25
|
+
representation. Can be `null` if there is no bytes representation for the token.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
logprob: float
|
|
29
|
+
"""The log probability of this token, if it is within the top 20 most likely
|
|
30
|
+
tokens.
|
|
31
|
+
|
|
32
|
+
Otherwise, the value `-9999.0` is used to signify that the token is very
|
|
33
|
+
unlikely.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class ChatCompletionTokenLogprob(BaseModel):
|
|
38
|
+
token: str
|
|
39
|
+
"""The token."""
|
|
40
|
+
|
|
41
|
+
bytes: Optional[List[int]] = None
|
|
42
|
+
"""A list of integers representing the UTF-8 bytes representation of the token.
|
|
43
|
+
|
|
44
|
+
Useful in instances where characters are represented by multiple tokens and
|
|
45
|
+
their byte representations must be combined to generate the correct text
|
|
46
|
+
representation. Can be `null` if there is no bytes representation for the token.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
logprob: float
|
|
50
|
+
"""The log probability of this token, if it is within the top 20 most likely
|
|
51
|
+
tokens.
|
|
52
|
+
|
|
53
|
+
Otherwise, the value `-9999.0` is used to signify that the token is very
|
|
54
|
+
unlikely.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
top_logprobs: List[TopLogprob]
|
|
58
|
+
"""List of the most likely tokens and their log probability, at this token
|
|
59
|
+
position.
|
|
60
|
+
|
|
61
|
+
In rare cases, there may be fewer than the number of requested `top_logprobs`
|
|
62
|
+
returned.
|
|
63
|
+
"""
|