ag2 0.9.1__py3-none-any.whl → 0.9.1.post0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.1.dist-info → ag2-0.9.1.post0.dist-info}/METADATA +264 -73
- ag2-0.9.1.post0.dist-info/RECORD +392 -0
- {ag2-0.9.1.dist-info → ag2-0.9.1.post0.dist-info}/WHEEL +1 -2
- autogen/__init__.py +89 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +427 -0
- autogen/_website/generate_mkdocs.py +1174 -0
- autogen/_website/notebook_processor.py +476 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +412 -0
- autogen/agentchat/__init__.py +44 -0
- autogen/agentchat/agent.py +182 -0
- autogen/agentchat/assistant_agent.py +85 -0
- autogen/agentchat/chat.py +309 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +429 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +566 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +187 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
- autogen/agentchat/contrib/rag/query_engine.py +74 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
- autogen/agentchat/contrib/swarm_agent.py +1425 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +232 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +303 -0
- autogen/agentchat/conversable_agent.py +4020 -0
- autogen/agentchat/group/__init__.py +64 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +41 -0
- autogen/agentchat/group/context_variables.py +192 -0
- autogen/agentchat/group/group_tool_executor.py +202 -0
- autogen/agentchat/group/group_utils.py +591 -0
- autogen/agentchat/group/handoffs.py +244 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +237 -0
- autogen/agentchat/group/on_condition.py +58 -0
- autogen/agentchat/group/on_context_condition.py +54 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +159 -0
- autogen/agentchat/group/patterns/manual.py +176 -0
- autogen/agentchat/group/patterns/pattern.py +288 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +26 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/group_chat_target.py +132 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +413 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1694 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
- autogen/agentchat/realtime/experimental/function_observer.py +85 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +111 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +73 -0
- autogen/agents/contrib/time/time_tool_agent.py +51 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
- autogen/agents/experimental/document_agent/document_agent.py +461 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +380 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
- autogen/agents/experimental/document_agent/parser_utils.py +130 -0
- autogen/agents/experimental/document_agent/url_utils.py +426 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +77 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +62 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +75 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +102 -0
- autogen/cache/in_memory_cache.py +58 -0
- autogen/cache/redis_cache.py +123 -0
- autogen/code_utils.py +596 -0
- autogen/coding/__init__.py +22 -0
- autogen/coding/base.py +119 -0
- autogen/coding/docker_commandline_code_executor.py +268 -0
- autogen/coding/factory.py +47 -0
- autogen/coding/func_with_reqs.py +202 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +167 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +231 -0
- autogen/coding/jupyter/jupyter_code_executor.py +160 -0
- autogen/coding/jupyter/local_jupyter_server.py +172 -0
- autogen/coding/local_commandline_code_executor.py +405 -0
- autogen/coding/markdown_code_extractor.py +45 -0
- autogen/coding/utils.py +56 -0
- autogen/doc_utils.py +34 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1010 -0
- autogen/events/base_event.py +99 -0
- autogen/events/client_events.py +167 -0
- autogen/events/helpers.py +36 -0
- autogen/events/print_event.py +46 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +80 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +225 -0
- autogen/fast_depends/core/model.py +576 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +29 -0
- autogen/fast_depends/dependencies/provider.py +39 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +280 -0
- autogen/fast_depends/utils.py +187 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +178 -0
- autogen/import_utils.py +526 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
- autogen/interop/langchain/langchain_tool.py +82 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +113 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
- autogen/interop/registry.py +69 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +56 -0
- autogen/io/run_response.py +293 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +213 -0
- autogen/json_utils.py +43 -0
- autogen/llm_config.py +379 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +128 -0
- autogen/logger/file_logger.py +261 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +523 -0
- autogen/math_utils.py +339 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/mcp_client.py +208 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +948 -0
- autogen/messages/base_message.py +107 -0
- autogen/messages/client_messages.py +171 -0
- autogen/messages/print_message.py +49 -0
- autogen/oai/__init__.py +53 -0
- autogen/oai/anthropic.py +714 -0
- autogen/oai/bedrock.py +628 -0
- autogen/oai/cerebras.py +299 -0
- autogen/oai/client.py +1435 -0
- autogen/oai/client_utils.py +169 -0
- autogen/oai/cohere.py +479 -0
- autogen/oai/gemini.py +990 -0
- autogen/oai/gemini_types.py +129 -0
- autogen/oai/groq.py +305 -0
- autogen/oai/mistral.py +303 -0
- autogen/oai/oai_models/__init__.py +11 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +87 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +86 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
- autogen/oai/oai_models/completion_usage.py +60 -0
- autogen/oai/ollama.py +643 -0
- autogen/oai/openai_utils.py +881 -0
- autogen/oai/together.py +370 -0
- autogen/retrieve_utils.py +491 -0
- autogen/runtime_logging.py +160 -0
- autogen/token_count_utils.py +267 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +41 -0
- autogen/tools/dependency_injection.py +254 -0
- autogen/tools/experimental/__init__.py +43 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +161 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +328 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +183 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
- autogen/tools/function_utils.py +411 -0
- autogen/tools/tool.py +187 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- ag2-0.9.1.dist-info/RECORD +0 -6
- ag2-0.9.1.dist-info/top_level.txt +0 -1
- {ag2-0.9.1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/LICENSE +0 -0
- {ag2-0.9.1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/NOTICE.md +0 -0
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from collections.abc import AsyncGenerator
|
|
6
|
+
from contextlib import asynccontextmanager
|
|
7
|
+
from logging import Logger, getLogger
|
|
8
|
+
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
|
|
9
|
+
|
|
10
|
+
from ......doc_utils import export_module
|
|
11
|
+
from ......import_utils import optional_import_block, require_optional_import
|
|
12
|
+
from ......llm_config import LLMConfig
|
|
13
|
+
from ...realtime_events import RealtimeEvent
|
|
14
|
+
from ..realtime_client import RealtimeClientBase, Role, register_realtime_client
|
|
15
|
+
from .utils import parse_oai_message
|
|
16
|
+
|
|
17
|
+
with optional_import_block():
|
|
18
|
+
from openai import DEFAULT_MAX_RETRIES, NOT_GIVEN, AsyncOpenAI
|
|
19
|
+
from openai.resources.beta.realtime.realtime import AsyncRealtimeConnection
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
if TYPE_CHECKING:
|
|
23
|
+
from ..realtime_client import RealtimeClientProtocol
|
|
24
|
+
|
|
25
|
+
__all__ = ["OpenAIRealtimeClient"]
|
|
26
|
+
|
|
27
|
+
global_logger = getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@register_realtime_client()
|
|
31
|
+
@require_optional_import("openai>=1.66.2", "openai-realtime", except_for=["get_factory", "__init__"])
|
|
32
|
+
@export_module("autogen.agentchat.realtime.experimental.clients")
|
|
33
|
+
class OpenAIRealtimeClient(RealtimeClientBase):
|
|
34
|
+
"""(Experimental) Client for OpenAI Realtime API."""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
*,
|
|
39
|
+
llm_config: Union[LLMConfig, dict[str, Any]],
|
|
40
|
+
logger: Optional[Logger] = None,
|
|
41
|
+
) -> None:
|
|
42
|
+
"""(Experimental) Client for OpenAI Realtime API.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
llm_config: The config for the client.
|
|
46
|
+
logger: the logger to use for logging events
|
|
47
|
+
"""
|
|
48
|
+
super().__init__()
|
|
49
|
+
self._llm_config = llm_config
|
|
50
|
+
self._logger = logger
|
|
51
|
+
|
|
52
|
+
self._connection: Optional["AsyncRealtimeConnection"] = None
|
|
53
|
+
|
|
54
|
+
self.config = llm_config["config_list"][0]
|
|
55
|
+
# model is passed to self._client.beta.realtime.connect function later
|
|
56
|
+
self._model: str = self.config["model"]
|
|
57
|
+
self._voice: str = self.config.get("voice", "alloy")
|
|
58
|
+
self._temperature: float = llm_config.get("temperature", 0.8) # type: ignore[union-attr]
|
|
59
|
+
|
|
60
|
+
self._client: Optional["AsyncOpenAI"] = None
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def logger(self) -> Logger:
|
|
64
|
+
"""Get the logger for the OpenAI Realtime API."""
|
|
65
|
+
return self._logger or global_logger
|
|
66
|
+
|
|
67
|
+
@property
|
|
68
|
+
def connection(self) -> "AsyncRealtimeConnection":
|
|
69
|
+
"""Get the OpenAI WebSocket connection."""
|
|
70
|
+
if self._connection is None:
|
|
71
|
+
raise RuntimeError("OpenAI WebSocket is not initialized")
|
|
72
|
+
return self._connection
|
|
73
|
+
|
|
74
|
+
async def send_function_result(self, call_id: str, result: str) -> None:
|
|
75
|
+
"""Send the result of a function call to the OpenAI Realtime API.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
call_id (str): The ID of the function call.
|
|
79
|
+
result (str): The result of the function call.
|
|
80
|
+
"""
|
|
81
|
+
await self.connection.conversation.item.create(
|
|
82
|
+
item={
|
|
83
|
+
"type": "function_call_output",
|
|
84
|
+
"call_id": call_id,
|
|
85
|
+
"output": result,
|
|
86
|
+
},
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
await self.connection.response.create()
|
|
90
|
+
|
|
91
|
+
async def send_text(self, *, role: Role, text: str) -> None:
|
|
92
|
+
"""Send a text message to the OpenAI Realtime API.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
role (str): The role of the message.
|
|
96
|
+
text (str): The text of the message.
|
|
97
|
+
"""
|
|
98
|
+
await self.connection.response.cancel()
|
|
99
|
+
await self.connection.conversation.item.create(
|
|
100
|
+
item={"type": "message", "role": role, "content": [{"type": "input_text", "text": text}]}
|
|
101
|
+
)
|
|
102
|
+
await self.connection.response.create()
|
|
103
|
+
|
|
104
|
+
async def send_audio(self, audio: str) -> None:
|
|
105
|
+
"""Send audio to the OpenAI Realtime API.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
audio (str): The audio to send.
|
|
109
|
+
"""
|
|
110
|
+
await self.queue_input_audio_buffer_delta(audio)
|
|
111
|
+
await self.connection.input_audio_buffer.append(audio=audio)
|
|
112
|
+
|
|
113
|
+
async def truncate_audio(self, audio_end_ms: int, content_index: int, item_id: str) -> None:
|
|
114
|
+
"""Truncate audio in the OpenAI Realtime API.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
audio_end_ms (int): The end of the audio to truncate.
|
|
118
|
+
content_index (int): The index of the content to truncate.
|
|
119
|
+
item_id (str): The ID of the item to truncate.
|
|
120
|
+
"""
|
|
121
|
+
await self.connection.conversation.item.truncate(
|
|
122
|
+
audio_end_ms=audio_end_ms, content_index=content_index, item_id=item_id
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
async def _initialize_session(self) -> None:
|
|
126
|
+
"""Control initial session with OpenAI."""
|
|
127
|
+
session_update = {
|
|
128
|
+
"turn_detection": {"type": "server_vad"},
|
|
129
|
+
"voice": self._voice,
|
|
130
|
+
"modalities": ["audio", "text"],
|
|
131
|
+
"temperature": self._temperature,
|
|
132
|
+
}
|
|
133
|
+
await self.session_update(session_options=session_update)
|
|
134
|
+
|
|
135
|
+
async def session_update(self, session_options: dict[str, Any]) -> None:
|
|
136
|
+
"""Send a session update to the OpenAI Realtime API.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
session_options (dict[str, Any]): The session options to update.
|
|
140
|
+
"""
|
|
141
|
+
logger = self.logger
|
|
142
|
+
logger.info(f"Sending session update: {session_options}")
|
|
143
|
+
await self.connection.session.update(session=session_options) # type: ignore[arg-type]
|
|
144
|
+
logger.info("Sending session update finished")
|
|
145
|
+
|
|
146
|
+
@asynccontextmanager
|
|
147
|
+
async def connect(self) -> AsyncGenerator[None, None]:
|
|
148
|
+
"""Connect to the OpenAI Realtime API."""
|
|
149
|
+
try:
|
|
150
|
+
if not self._client:
|
|
151
|
+
self._client = AsyncOpenAI(
|
|
152
|
+
api_key=self.config.get("api_key", None),
|
|
153
|
+
organization=self.config.get("organization", None),
|
|
154
|
+
project=self.config.get("project", None),
|
|
155
|
+
base_url=self.config.get("base_url", None),
|
|
156
|
+
websocket_base_url=self.config.get("websocket_base_url", None),
|
|
157
|
+
timeout=self.config.get("timeout", NOT_GIVEN),
|
|
158
|
+
max_retries=self.config.get("max_retries", DEFAULT_MAX_RETRIES),
|
|
159
|
+
default_headers=self.config.get("default_headers", None),
|
|
160
|
+
default_query=self.config.get("default_query", None),
|
|
161
|
+
)
|
|
162
|
+
async with self._client.beta.realtime.connect(
|
|
163
|
+
model=self._model,
|
|
164
|
+
) as self._connection:
|
|
165
|
+
await self._initialize_session()
|
|
166
|
+
yield
|
|
167
|
+
finally:
|
|
168
|
+
self._connection = None
|
|
169
|
+
|
|
170
|
+
async def read_events(self) -> AsyncGenerator[RealtimeEvent, None]:
|
|
171
|
+
"""Read messages from the OpenAI Realtime API."""
|
|
172
|
+
if self._connection is None:
|
|
173
|
+
raise RuntimeError("Client is not connected, call connect() first.")
|
|
174
|
+
|
|
175
|
+
try:
|
|
176
|
+
async for event in self._read_events():
|
|
177
|
+
yield event
|
|
178
|
+
|
|
179
|
+
finally:
|
|
180
|
+
self._connection = None
|
|
181
|
+
|
|
182
|
+
async def _read_from_connection(self) -> AsyncGenerator[RealtimeEvent, None]:
|
|
183
|
+
"""Read messages from the OpenAI Realtime API."""
|
|
184
|
+
async for message in self._connection:
|
|
185
|
+
for event in self._parse_message(message.model_dump()):
|
|
186
|
+
yield event
|
|
187
|
+
|
|
188
|
+
def _parse_message(self, message: dict[str, Any]) -> list[RealtimeEvent]:
|
|
189
|
+
"""Parse a message from the OpenAI Realtime API.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
message (dict[str, Any]): The message to parse.
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
RealtimeEvent: The parsed event.
|
|
196
|
+
"""
|
|
197
|
+
return [parse_oai_message(message)]
|
|
198
|
+
|
|
199
|
+
@classmethod
|
|
200
|
+
def get_factory(
|
|
201
|
+
cls, llm_config: Union[LLMConfig, dict[str, Any]], logger: Logger, **kwargs: Any
|
|
202
|
+
) -> Optional[Callable[[], "RealtimeClientProtocol"]]:
|
|
203
|
+
"""Create a Realtime API client.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
llm_config: The config for the client.
|
|
207
|
+
logger: The logger to use for logging events.
|
|
208
|
+
kwargs: Additional arguments.
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
RealtimeClientProtocol: The Realtime API client is returned if the model matches the pattern
|
|
212
|
+
"""
|
|
213
|
+
if llm_config["config_list"][0].get("api_type", "openai") == "openai" and list(kwargs.keys()) == []:
|
|
214
|
+
return lambda: OpenAIRealtimeClient(llm_config=llm_config, logger=logger, **kwargs)
|
|
215
|
+
return None
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
# needed for mypy to check if OpenAIRealtimeWebRTCClient implements RealtimeClientProtocol
|
|
219
|
+
if TYPE_CHECKING:
|
|
220
|
+
_client: RealtimeClientProtocol = OpenAIRealtimeClient(llm_config={})
|
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from collections.abc import AsyncGenerator
|
|
7
|
+
from contextlib import asynccontextmanager
|
|
8
|
+
from logging import Logger, getLogger
|
|
9
|
+
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
|
|
10
|
+
|
|
11
|
+
from autogen.import_utils import optional_import_block, require_optional_import
|
|
12
|
+
|
|
13
|
+
from ......doc_utils import export_module
|
|
14
|
+
from ......llm_config import LLMConfig
|
|
15
|
+
from ...realtime_events import RealtimeEvent
|
|
16
|
+
from ..realtime_client import RealtimeClientBase, Role, register_realtime_client
|
|
17
|
+
from .utils import parse_oai_message
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from ...websockets import WebSocketProtocol as WebSocket
|
|
21
|
+
from ..realtime_client import RealtimeClientProtocol
|
|
22
|
+
|
|
23
|
+
with optional_import_block():
|
|
24
|
+
import httpx
|
|
25
|
+
|
|
26
|
+
__all__ = ["OpenAIRealtimeWebRTCClient"]
|
|
27
|
+
|
|
28
|
+
global_logger = getLogger(__name__)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@register_realtime_client()
|
|
32
|
+
@require_optional_import("httpx", "openai-realtime", except_for="get_factory")
|
|
33
|
+
@export_module("autogen.agentchat.realtime.experimental.clients.oai")
|
|
34
|
+
class OpenAIRealtimeWebRTCClient(RealtimeClientBase):
|
|
35
|
+
"""(Experimental) Client for OpenAI Realtime API that uses WebRTC protocol."""
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
*,
|
|
40
|
+
llm_config: Union[LLMConfig, dict[str, Any]],
|
|
41
|
+
websocket: "WebSocket",
|
|
42
|
+
logger: Optional[Logger] = None,
|
|
43
|
+
) -> None:
|
|
44
|
+
"""(Experimental) Client for OpenAI Realtime API.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
llm_config: The config for the client.
|
|
48
|
+
websocket: the websocket to use for the connection
|
|
49
|
+
logger: the logger to use for logging events
|
|
50
|
+
"""
|
|
51
|
+
super().__init__()
|
|
52
|
+
self._llm_config = llm_config
|
|
53
|
+
self._logger = logger
|
|
54
|
+
self._websocket = websocket
|
|
55
|
+
|
|
56
|
+
config = llm_config["config_list"][0]
|
|
57
|
+
self._model: str = config["model"]
|
|
58
|
+
self._voice: str = config.get("voice", "alloy")
|
|
59
|
+
self._temperature: float = llm_config.get("temperature", 0.8) # type: ignore[union-attr]
|
|
60
|
+
self._config = config
|
|
61
|
+
self._base_url = config.get("base_url", "https://api.openai.com/v1/realtime/sessions")
|
|
62
|
+
|
|
63
|
+
@property
|
|
64
|
+
def logger(self) -> Logger:
|
|
65
|
+
"""Get the logger for the OpenAI Realtime API."""
|
|
66
|
+
return self._logger or global_logger
|
|
67
|
+
|
|
68
|
+
async def send_function_result(self, call_id: str, result: str) -> None:
|
|
69
|
+
"""Send the result of a function call to the OpenAI Realtime API.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
call_id (str): The ID of the function call.
|
|
73
|
+
result (str): The result of the function call.
|
|
74
|
+
"""
|
|
75
|
+
await self._websocket.send_json({
|
|
76
|
+
"type": "conversation.item.create",
|
|
77
|
+
"item": {
|
|
78
|
+
"type": "function_call_output",
|
|
79
|
+
"call_id": call_id,
|
|
80
|
+
"output": result,
|
|
81
|
+
},
|
|
82
|
+
})
|
|
83
|
+
await self._websocket.send_json({"type": "response.create"})
|
|
84
|
+
|
|
85
|
+
async def send_text(self, *, role: Role, text: str) -> None:
|
|
86
|
+
"""Send a text message to the OpenAI Realtime API.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
role (str): The role of the message.
|
|
90
|
+
text (str): The text of the message.
|
|
91
|
+
"""
|
|
92
|
+
# await self.connection.response.cancel() #why is this here?
|
|
93
|
+
await self._websocket.send_json({
|
|
94
|
+
"type": "response.cancel",
|
|
95
|
+
})
|
|
96
|
+
await self._websocket.send_json({
|
|
97
|
+
"type": "conversation.item.create",
|
|
98
|
+
"item": {"type": "message", "role": role, "content": [{"type": "input_text", "text": text}]},
|
|
99
|
+
})
|
|
100
|
+
# await self.connection.response.create()
|
|
101
|
+
await self._websocket.send_json({"type": "response.create"})
|
|
102
|
+
|
|
103
|
+
async def send_audio(self, audio: str) -> None:
|
|
104
|
+
"""Send audio to the OpenAI Realtime API.
|
|
105
|
+
in case of WebRTC, audio is already sent by js client, so we just queue it in order to be logged.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
audio (str): The audio to send.
|
|
109
|
+
"""
|
|
110
|
+
await self.queue_input_audio_buffer_delta(audio)
|
|
111
|
+
|
|
112
|
+
async def truncate_audio(self, audio_end_ms: int, content_index: int, item_id: str) -> None:
|
|
113
|
+
"""Truncate audio in the OpenAI Realtime API.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
audio_end_ms (int): The end of the audio to truncate.
|
|
117
|
+
content_index (int): The index of the content to truncate.
|
|
118
|
+
item_id (str): The ID of the item to truncate.
|
|
119
|
+
"""
|
|
120
|
+
await self._websocket.send_json({
|
|
121
|
+
"type": "conversation.item.truncate",
|
|
122
|
+
"content_index": content_index,
|
|
123
|
+
"item_id": item_id,
|
|
124
|
+
"audio_end_ms": audio_end_ms,
|
|
125
|
+
})
|
|
126
|
+
|
|
127
|
+
async def session_update(self, session_options: dict[str, Any]) -> None:
|
|
128
|
+
"""Send a session update to the OpenAI Realtime API.
|
|
129
|
+
|
|
130
|
+
In the case of WebRTC we can not send it directly, but we can send it
|
|
131
|
+
to the javascript over the websocket, and rely on it to send session
|
|
132
|
+
update to OpenAI
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
session_options (dict[str, Any]): The session options to update.
|
|
136
|
+
"""
|
|
137
|
+
logger = self.logger
|
|
138
|
+
logger.info(f"Sending session update: {session_options}")
|
|
139
|
+
# await self.connection.session.update(session=session_options) # type: ignore[arg-type]
|
|
140
|
+
await self._websocket.send_json({"type": "session.update", "session": session_options})
|
|
141
|
+
logger.info("Sending session update finished")
|
|
142
|
+
|
|
143
|
+
def session_init_data(self) -> list[dict[str, Any]]:
|
|
144
|
+
"""Control initial session with OpenAI."""
|
|
145
|
+
session_update = {
|
|
146
|
+
"turn_detection": {"type": "server_vad"},
|
|
147
|
+
"voice": self._voice,
|
|
148
|
+
"modalities": ["audio", "text"],
|
|
149
|
+
"temperature": self._temperature,
|
|
150
|
+
}
|
|
151
|
+
return [{"type": "session.update", "session": session_update}]
|
|
152
|
+
|
|
153
|
+
async def _initialize_session(self) -> None: ...
|
|
154
|
+
|
|
155
|
+
@asynccontextmanager
|
|
156
|
+
async def connect(self) -> AsyncGenerator[None, None]:
|
|
157
|
+
"""Connect to the OpenAI Realtime API.
|
|
158
|
+
|
|
159
|
+
In the case of WebRTC, we pass connection information over the
|
|
160
|
+
websocket, so that javascript on the other end of websocket open
|
|
161
|
+
actual connection to OpenAI
|
|
162
|
+
"""
|
|
163
|
+
try:
|
|
164
|
+
base_url = self._base_url
|
|
165
|
+
api_key = self._config.get("api_key", None)
|
|
166
|
+
headers = {
|
|
167
|
+
"Authorization": f"Bearer {api_key}", # Use os.getenv to get from environment
|
|
168
|
+
"Content-Type": "application/json",
|
|
169
|
+
}
|
|
170
|
+
data = {
|
|
171
|
+
# "model": "gpt-4o-realtime-preview-2024-12-17",
|
|
172
|
+
"model": self._model,
|
|
173
|
+
"voice": self._voice,
|
|
174
|
+
}
|
|
175
|
+
async with httpx.AsyncClient() as client:
|
|
176
|
+
response = await client.post(base_url, headers=headers, json=data)
|
|
177
|
+
response.raise_for_status()
|
|
178
|
+
json_data = response.json()
|
|
179
|
+
json_data["model"] = self._model
|
|
180
|
+
if self._websocket is not None:
|
|
181
|
+
session_init = self.session_init_data()
|
|
182
|
+
await self._websocket.send_json({"type": "ag2.init", "config": json_data, "init": session_init})
|
|
183
|
+
yield
|
|
184
|
+
finally:
|
|
185
|
+
pass
|
|
186
|
+
|
|
187
|
+
async def read_events(self) -> AsyncGenerator[RealtimeEvent, None]:
|
|
188
|
+
"""Read events from the OpenAI Realtime API."""
|
|
189
|
+
async for event in self._read_events():
|
|
190
|
+
yield event
|
|
191
|
+
|
|
192
|
+
async def _read_from_connection(self) -> AsyncGenerator[RealtimeEvent, None]:
|
|
193
|
+
"""Read messages from the OpenAI Realtime API connection.
|
|
194
|
+
Again, in case of WebRTC, we do not read OpenAI messages directly since we
|
|
195
|
+
do not hold connection to OpenAI. Instead we read messages from the websocket, and javascript
|
|
196
|
+
client on the other side of the websocket that is connected to OpenAI is relaying events to us.
|
|
197
|
+
"""
|
|
198
|
+
while True:
|
|
199
|
+
try:
|
|
200
|
+
message_json = await self._websocket.receive_text()
|
|
201
|
+
message = json.loads(message_json)
|
|
202
|
+
for event in self._parse_message(message):
|
|
203
|
+
yield event
|
|
204
|
+
except Exception as e:
|
|
205
|
+
self.logger.exception(f"Error reading from connection {e}")
|
|
206
|
+
break
|
|
207
|
+
|
|
208
|
+
def _parse_message(self, message: dict[str, Any]) -> list[RealtimeEvent]:
|
|
209
|
+
"""Parse a message from the OpenAI Realtime API.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
message (dict[str, Any]): The message to parse.
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
RealtimeEvent: The parsed event.
|
|
216
|
+
"""
|
|
217
|
+
return [parse_oai_message(message)]
|
|
218
|
+
|
|
219
|
+
@classmethod
|
|
220
|
+
def get_factory(
|
|
221
|
+
cls, llm_config: Union[LLMConfig, dict[str, Any]], logger: Logger, **kwargs: Any
|
|
222
|
+
) -> Optional[Callable[[], "RealtimeClientProtocol"]]:
|
|
223
|
+
"""Create a Realtime API client.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
llm_config: The config for the client.
|
|
227
|
+
logger: The logger to use for logging events.
|
|
228
|
+
**kwargs: Additional arguments.
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
RealtimeClientProtocol: The Realtime API client is returned if the model matches the pattern
|
|
232
|
+
"""
|
|
233
|
+
if llm_config["config_list"][0].get("api_type", "openai") == "openai" and list(kwargs.keys()) == ["websocket"]:
|
|
234
|
+
return lambda: OpenAIRealtimeWebRTCClient(llm_config=llm_config, logger=logger, **kwargs)
|
|
235
|
+
|
|
236
|
+
return None
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
# needed for mypy to check if OpenAIRealtimeWebRTCClient implements RealtimeClientProtocol
|
|
240
|
+
if TYPE_CHECKING:
|
|
241
|
+
|
|
242
|
+
def _rtc_client(websocket: "WebSocket") -> RealtimeClientProtocol:
|
|
243
|
+
return OpenAIRealtimeWebRTCClient(llm_config={}, websocket=websocket)
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from ...realtime_events import (
|
|
9
|
+
AudioDelta,
|
|
10
|
+
FunctionCall,
|
|
11
|
+
InputAudioBufferDelta,
|
|
12
|
+
RealtimeEvent,
|
|
13
|
+
SessionCreated,
|
|
14
|
+
SessionUpdated,
|
|
15
|
+
SpeechStarted,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
__all__ = ["parse_oai_message"]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def parse_oai_message(message: dict[str, Any]) -> RealtimeEvent:
|
|
22
|
+
"""Parse a message from the OpenAI Realtime API.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
message (dict[str, Any]): The message to parse.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
RealtimeEvent: The parsed event.
|
|
29
|
+
"""
|
|
30
|
+
if message.get("type") == "session.created":
|
|
31
|
+
return SessionCreated(raw_message=message)
|
|
32
|
+
elif message.get("type") == "session.updated":
|
|
33
|
+
return SessionUpdated(raw_message=message)
|
|
34
|
+
elif message.get("type") == "response.audio.delta":
|
|
35
|
+
return AudioDelta(raw_message=message, delta=message["delta"], item_id=message["item_id"])
|
|
36
|
+
elif message.get("type") == "input_audio_buffer.speech_started":
|
|
37
|
+
return SpeechStarted(raw_message=message)
|
|
38
|
+
elif message.get("type") == "input_audio_buffer.delta":
|
|
39
|
+
return InputAudioBufferDelta(delta=message["delta"], item_id=None, raw_message=message)
|
|
40
|
+
elif message.get("type") == "response.function_call_arguments.done":
|
|
41
|
+
return FunctionCall(
|
|
42
|
+
raw_message=message,
|
|
43
|
+
call_id=message["call_id"],
|
|
44
|
+
name=message["name"],
|
|
45
|
+
arguments=json.loads(message["arguments"]),
|
|
46
|
+
)
|
|
47
|
+
else:
|
|
48
|
+
return RealtimeEvent(raw_message=message)
|