ag2 0.9.1a1__py3-none-any.whl → 0.9.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/METADATA +272 -75
- ag2-0.9.2.dist-info/RECORD +406 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/WHEEL +1 -2
- autogen/__init__.py +89 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +427 -0
- autogen/_website/generate_mkdocs.py +1174 -0
- autogen/_website/notebook_processor.py +476 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +412 -0
- autogen/agentchat/__init__.py +44 -0
- autogen/agentchat/agent.py +182 -0
- autogen/agentchat/assistant_agent.py +85 -0
- autogen/agentchat/chat.py +309 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +429 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +566 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +187 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
- autogen/agentchat/contrib/rag/query_engine.py +74 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
- autogen/agentchat/contrib/swarm_agent.py +1425 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +232 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +303 -0
- autogen/agentchat/conversable_agent.py +4023 -0
- autogen/agentchat/group/__init__.py +64 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +41 -0
- autogen/agentchat/group/context_variables.py +192 -0
- autogen/agentchat/group/group_tool_executor.py +202 -0
- autogen/agentchat/group/group_utils.py +591 -0
- autogen/agentchat/group/handoffs.py +244 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +237 -0
- autogen/agentchat/group/on_condition.py +58 -0
- autogen/agentchat/group/on_context_condition.py +54 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +159 -0
- autogen/agentchat/group/patterns/manual.py +176 -0
- autogen/agentchat/group/patterns/pattern.py +288 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +26 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/group_chat_target.py +132 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +413 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1694 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
- autogen/agentchat/realtime/experimental/function_observer.py +85 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +111 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +73 -0
- autogen/agents/contrib/time/time_tool_agent.py +51 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
- autogen/agents/experimental/document_agent/document_agent.py +461 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +380 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
- autogen/agents/experimental/document_agent/parser_utils.py +130 -0
- autogen/agents/experimental/document_agent/url_utils.py +426 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +77 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +62 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +75 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +102 -0
- autogen/cache/in_memory_cache.py +58 -0
- autogen/cache/redis_cache.py +123 -0
- autogen/code_utils.py +596 -0
- autogen/coding/__init__.py +22 -0
- autogen/coding/base.py +119 -0
- autogen/coding/docker_commandline_code_executor.py +268 -0
- autogen/coding/factory.py +47 -0
- autogen/coding/func_with_reqs.py +202 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +167 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +231 -0
- autogen/coding/jupyter/jupyter_code_executor.py +160 -0
- autogen/coding/jupyter/local_jupyter_server.py +172 -0
- autogen/coding/local_commandline_code_executor.py +405 -0
- autogen/coding/markdown_code_extractor.py +45 -0
- autogen/coding/utils.py +56 -0
- autogen/doc_utils.py +34 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1013 -0
- autogen/events/base_event.py +99 -0
- autogen/events/client_events.py +167 -0
- autogen/events/helpers.py +36 -0
- autogen/events/print_event.py +46 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +80 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +225 -0
- autogen/fast_depends/core/model.py +576 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +29 -0
- autogen/fast_depends/dependencies/provider.py +39 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +280 -0
- autogen/fast_depends/utils.py +187 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +178 -0
- autogen/import_utils.py +526 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
- autogen/interop/langchain/langchain_tool.py +82 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +179 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
- autogen/interop/registry.py +69 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +56 -0
- autogen/io/run_response.py +293 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +213 -0
- autogen/json_utils.py +43 -0
- autogen/llm_config.py +382 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +128 -0
- autogen/logger/file_logger.py +261 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +523 -0
- autogen/math_utils.py +339 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/mcp_client.py +208 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +63 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +581 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +158 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +114 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +400 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +948 -0
- autogen/messages/base_message.py +107 -0
- autogen/messages/client_messages.py +171 -0
- autogen/messages/print_message.py +49 -0
- autogen/oai/__init__.py +53 -0
- autogen/oai/anthropic.py +714 -0
- autogen/oai/bedrock.py +628 -0
- autogen/oai/cerebras.py +299 -0
- autogen/oai/client.py +1444 -0
- autogen/oai/client_utils.py +169 -0
- autogen/oai/cohere.py +479 -0
- autogen/oai/gemini.py +998 -0
- autogen/oai/gemini_types.py +155 -0
- autogen/oai/groq.py +305 -0
- autogen/oai/mistral.py +303 -0
- autogen/oai/oai_models/__init__.py +11 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +87 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +86 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
- autogen/oai/oai_models/completion_usage.py +60 -0
- autogen/oai/ollama.py +643 -0
- autogen/oai/openai_utils.py +881 -0
- autogen/oai/together.py +370 -0
- autogen/retrieve_utils.py +491 -0
- autogen/runtime_logging.py +160 -0
- autogen/token_count_utils.py +267 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +41 -0
- autogen/tools/dependency_injection.py +254 -0
- autogen/tools/experimental/__init__.py +48 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +161 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +328 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1316 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +183 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
- autogen/tools/function_utils.py +411 -0
- autogen/tools/tool.py +187 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +69 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
- ag2-0.9.1a1.dist-info/RECORD +0 -6
- ag2-0.9.1a1.dist-info/top_level.txt +0 -1
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/LICENSE +0 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/NOTICE.md +0 -0
|
@@ -0,0 +1,475 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import warnings
|
|
7
|
+
from collections import defaultdict
|
|
8
|
+
from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union
|
|
9
|
+
|
|
10
|
+
import anyio
|
|
11
|
+
from asyncer import asyncify, create_task_group, syncify
|
|
12
|
+
|
|
13
|
+
from ....agentchat.contrib.swarm_agent import AfterWorkOption, initiate_swarm_chat
|
|
14
|
+
from ....cache import AbstractCache
|
|
15
|
+
from ....code_utils import content_str
|
|
16
|
+
from ....doc_utils import export_module
|
|
17
|
+
from ... import Agent, ChatResult, ConversableAgent, LLMAgent
|
|
18
|
+
from ...utils import consolidate_chat_info, gather_usage_summary
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from .clients import Role
|
|
22
|
+
from .realtime_agent import RealtimeAgent
|
|
23
|
+
|
|
24
|
+
__all__ = ["register_swarm"]
|
|
25
|
+
|
|
26
|
+
SWARM_SYSTEM_MESSAGE = (
|
|
27
|
+
"You are a helpful voice assistant. Your task is to listen to user and to coordinate the tasks based on his/her inputs."
|
|
28
|
+
"You can and will communicate using audio output only."
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
QUESTION_ROLE: "Role" = "user"
|
|
32
|
+
QUESTION_MESSAGE = (
|
|
33
|
+
"I have a question/information for myself. DO NOT ANSWER YOURSELF, GET THE ANSWER FROM ME. "
|
|
34
|
+
"repeat the question to me **WITH AUDIO OUTPUT** and AFTER YOU GET THE ANSWER FROM ME call 'answer_task_question' with the answer in first person\n\n"
|
|
35
|
+
"IMPORTANT: repeat just the question, without any additional information or context\n\n"
|
|
36
|
+
"The question is: '{}'\n\n"
|
|
37
|
+
)
|
|
38
|
+
QUESTION_TIMEOUT_SECONDS = 20
|
|
39
|
+
|
|
40
|
+
logger = logging.getLogger(__name__)
|
|
41
|
+
|
|
42
|
+
F = TypeVar("F", bound=Callable[..., Any])
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def message_to_dict(message: Union[dict[str, Any], str]) -> dict[str, Any]:
|
|
46
|
+
if isinstance(message, str):
|
|
47
|
+
return {"content": message}
|
|
48
|
+
elif isinstance(message, dict):
|
|
49
|
+
return message
|
|
50
|
+
else:
|
|
51
|
+
return dict(message)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def parse_oai_message(message: Union[dict[str, Any], str], role: str, adressee: Agent) -> dict[str, Any]:
|
|
55
|
+
"""
|
|
56
|
+
Parse a message into an OpenAI-compatible message format.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
message: The message to parse.
|
|
60
|
+
role: The role associated with the message.
|
|
61
|
+
adressee: The agent that will receive the message.
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
The parsed message in OpenAI-compatible format.
|
|
65
|
+
|
|
66
|
+
Raises:
|
|
67
|
+
ValueError: If the message lacks required fields like 'content', 'function_call', or 'tool_calls'.
|
|
68
|
+
"""
|
|
69
|
+
message = message_to_dict(message)
|
|
70
|
+
|
|
71
|
+
# Extract relevant fields while ensuring none are None
|
|
72
|
+
oai_message = {
|
|
73
|
+
key: message[key]
|
|
74
|
+
for key in ("content", "function_call", "tool_calls", "tool_responses", "tool_call_id", "name", "context")
|
|
75
|
+
if key in message and message[key] is not None
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
# Validate or set the content field
|
|
79
|
+
if "content" not in oai_message:
|
|
80
|
+
if "function_call" in oai_message or "tool_calls" in oai_message:
|
|
81
|
+
oai_message["content"] = None
|
|
82
|
+
else:
|
|
83
|
+
raise ValueError("Message must have either 'content', 'function_call', or 'tool_calls' field.")
|
|
84
|
+
|
|
85
|
+
# Determine and assign the role
|
|
86
|
+
if message.get("role") in ["function", "tool"]:
|
|
87
|
+
oai_message["role"] = message["role"]
|
|
88
|
+
# Ensure all tool responses have string content
|
|
89
|
+
for tool_response in oai_message.get("tool_responses", []):
|
|
90
|
+
tool_response["content"] = str(tool_response["content"])
|
|
91
|
+
elif "override_role" in message:
|
|
92
|
+
oai_message["role"] = message["override_role"]
|
|
93
|
+
else:
|
|
94
|
+
oai_message["role"] = role
|
|
95
|
+
|
|
96
|
+
# Enforce specific role requirements for assistant messages
|
|
97
|
+
if oai_message.get("function_call") or oai_message.get("tool_calls"):
|
|
98
|
+
oai_message["role"] = "assistant"
|
|
99
|
+
|
|
100
|
+
# Add a name field if missing
|
|
101
|
+
if "name" not in oai_message:
|
|
102
|
+
oai_message["name"] = adressee.name
|
|
103
|
+
|
|
104
|
+
return oai_message
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class SwarmableAgent:
|
|
108
|
+
"""A class for an agent that can participate in a swarm chat."""
|
|
109
|
+
|
|
110
|
+
def __init__(
|
|
111
|
+
self,
|
|
112
|
+
name: str,
|
|
113
|
+
system_message: str = "You are a helpful AI Assistant.",
|
|
114
|
+
is_termination_msg: Optional[Callable[..., bool]] = None,
|
|
115
|
+
description: Optional[str] = None,
|
|
116
|
+
silent: Optional[bool] = None,
|
|
117
|
+
):
|
|
118
|
+
self._oai_messages: dict[Agent, Any] = defaultdict(list)
|
|
119
|
+
|
|
120
|
+
self._system_message = system_message
|
|
121
|
+
self._description = description if description is not None else system_message
|
|
122
|
+
self._is_termination_msg = (
|
|
123
|
+
is_termination_msg
|
|
124
|
+
if is_termination_msg is not None
|
|
125
|
+
else (lambda x: content_str(x.get("content")) == "TERMINATE")
|
|
126
|
+
)
|
|
127
|
+
self.silent = silent
|
|
128
|
+
|
|
129
|
+
self._name = name
|
|
130
|
+
|
|
131
|
+
# Initialize standalone client cache object.
|
|
132
|
+
self.client_cache = None
|
|
133
|
+
self.previous_cache = None
|
|
134
|
+
|
|
135
|
+
self.reply_at_receive: dict[Agent, bool] = defaultdict(bool)
|
|
136
|
+
|
|
137
|
+
@property
|
|
138
|
+
def system_message(self) -> str:
|
|
139
|
+
return self._system_message
|
|
140
|
+
|
|
141
|
+
def update_system_message(self, system_message: str) -> None:
|
|
142
|
+
"""Update this agent's system message.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
system_message (str): system message for inference.
|
|
146
|
+
"""
|
|
147
|
+
self._system_message = system_message
|
|
148
|
+
|
|
149
|
+
@property
|
|
150
|
+
def name(self) -> str:
|
|
151
|
+
return self._name
|
|
152
|
+
|
|
153
|
+
@property
|
|
154
|
+
def description(self) -> str:
|
|
155
|
+
return self._description
|
|
156
|
+
|
|
157
|
+
def send(
|
|
158
|
+
self,
|
|
159
|
+
message: Union[dict[str, Any], str],
|
|
160
|
+
recipient: Agent,
|
|
161
|
+
request_reply: Optional[bool] = None,
|
|
162
|
+
silent: Optional[bool] = False,
|
|
163
|
+
) -> None:
|
|
164
|
+
self._oai_messages[recipient].append(parse_oai_message(message, "assistant", recipient))
|
|
165
|
+
recipient.receive(message, self, request_reply)
|
|
166
|
+
|
|
167
|
+
def receive(
|
|
168
|
+
self,
|
|
169
|
+
message: Union[dict[str, Any], str],
|
|
170
|
+
sender: Agent,
|
|
171
|
+
request_reply: Optional[bool] = None,
|
|
172
|
+
silent: Optional[bool] = False,
|
|
173
|
+
) -> None:
|
|
174
|
+
self._oai_messages[sender].append(parse_oai_message(message, "user", self))
|
|
175
|
+
if request_reply is False or (request_reply is None and self.reply_at_receive[sender] is False):
|
|
176
|
+
return
|
|
177
|
+
reply = self.generate_reply(messages=self.chat_messages[sender], sender=sender)
|
|
178
|
+
if reply is not None:
|
|
179
|
+
self.send(reply, sender, silent=silent)
|
|
180
|
+
|
|
181
|
+
def generate_reply(
|
|
182
|
+
self,
|
|
183
|
+
messages: Optional[list[dict[str, Any]]] = None,
|
|
184
|
+
sender: Optional["Agent"] = None,
|
|
185
|
+
**kwargs: Any,
|
|
186
|
+
) -> Union[str, dict[str, Any], None]:
|
|
187
|
+
if messages is None:
|
|
188
|
+
if sender is None:
|
|
189
|
+
raise ValueError("Either messages or sender must be provided.")
|
|
190
|
+
messages = self._oai_messages[sender]
|
|
191
|
+
|
|
192
|
+
_, reply = self.check_termination_and_human_reply(messages=messages, sender=sender, config=None)
|
|
193
|
+
|
|
194
|
+
return reply
|
|
195
|
+
|
|
196
|
+
def check_termination_and_human_reply(
|
|
197
|
+
self,
|
|
198
|
+
messages: Optional[list[dict[str, Any]]] = None,
|
|
199
|
+
sender: Optional[Agent] = None,
|
|
200
|
+
config: Optional[Any] = None,
|
|
201
|
+
) -> tuple[bool, Union[str, None]]:
|
|
202
|
+
raise NotImplementedError
|
|
203
|
+
|
|
204
|
+
def initiate_chat(
|
|
205
|
+
self,
|
|
206
|
+
recipient: ConversableAgent,
|
|
207
|
+
message: Union[dict[str, Any], str],
|
|
208
|
+
clear_history: bool = True,
|
|
209
|
+
silent: Optional[bool] = False,
|
|
210
|
+
cache: Optional[AbstractCache] = None,
|
|
211
|
+
summary_args: Optional[dict[str, Any]] = {},
|
|
212
|
+
**kwargs: dict[str, Any],
|
|
213
|
+
) -> ChatResult:
|
|
214
|
+
_chat_info = locals().copy()
|
|
215
|
+
_chat_info["sender"] = self
|
|
216
|
+
consolidate_chat_info(_chat_info, uniform_sender=self)
|
|
217
|
+
recipient._raise_exception_on_async_reply_functions()
|
|
218
|
+
recipient.previous_cache = recipient.client_cache # type: ignore[attr-defined]
|
|
219
|
+
recipient.client_cache = cache # type: ignore[attr-defined, assignment]
|
|
220
|
+
|
|
221
|
+
self._prepare_chat(recipient, clear_history)
|
|
222
|
+
self.send(message, recipient, silent=silent)
|
|
223
|
+
summary = self._last_msg_as_summary(self, recipient, summary_args)
|
|
224
|
+
|
|
225
|
+
recipient.client_cache = recipient.previous_cache # type: ignore[attr-defined]
|
|
226
|
+
recipient.previous_cache = None # type: ignore[attr-defined]
|
|
227
|
+
|
|
228
|
+
chat_result = ChatResult(
|
|
229
|
+
chat_history=self.chat_messages[recipient],
|
|
230
|
+
summary=summary,
|
|
231
|
+
cost=gather_usage_summary([self, recipient]), # type: ignore[arg-type]
|
|
232
|
+
human_input=[],
|
|
233
|
+
)
|
|
234
|
+
return chat_result
|
|
235
|
+
|
|
236
|
+
async def a_generate_reply(
|
|
237
|
+
self,
|
|
238
|
+
messages: Optional[list[dict[str, Any]]] = None,
|
|
239
|
+
sender: Optional["Agent"] = None,
|
|
240
|
+
**kwargs: Any,
|
|
241
|
+
) -> Union[str, dict[str, Any], None]:
|
|
242
|
+
raise NotImplementedError
|
|
243
|
+
|
|
244
|
+
async def a_receive(
|
|
245
|
+
self,
|
|
246
|
+
message: Union[dict[str, Any], str],
|
|
247
|
+
sender: "Agent",
|
|
248
|
+
request_reply: Optional[bool] = None,
|
|
249
|
+
) -> None:
|
|
250
|
+
raise NotImplementedError
|
|
251
|
+
|
|
252
|
+
async def a_send(
|
|
253
|
+
self,
|
|
254
|
+
message: Union[dict[str, Any], str],
|
|
255
|
+
recipient: "Agent",
|
|
256
|
+
request_reply: Optional[bool] = None,
|
|
257
|
+
) -> None:
|
|
258
|
+
raise NotImplementedError
|
|
259
|
+
|
|
260
|
+
@property
|
|
261
|
+
def chat_messages(self) -> dict[Agent, list[dict[str, Any]]]:
|
|
262
|
+
"""A dictionary of conversations from agent to list of messages."""
|
|
263
|
+
return self._oai_messages
|
|
264
|
+
|
|
265
|
+
def last_message(self, agent: Optional[Agent] = None) -> Optional[dict[str, Any]]:
|
|
266
|
+
if agent is None:
|
|
267
|
+
n_conversations = len(self._oai_messages)
|
|
268
|
+
if n_conversations == 0:
|
|
269
|
+
return None
|
|
270
|
+
if n_conversations == 1:
|
|
271
|
+
for conversation in self._oai_messages.values():
|
|
272
|
+
return conversation[-1] # type: ignore[no-any-return]
|
|
273
|
+
raise ValueError("More than one conversation is found. Please specify the sender to get the last message.")
|
|
274
|
+
if agent not in self._oai_messages():
|
|
275
|
+
raise KeyError(
|
|
276
|
+
f"The agent '{agent.name}' is not present in any conversation. No history available for this agent."
|
|
277
|
+
)
|
|
278
|
+
return self._oai_messages[agent][-1] # type: ignore[no-any-return]
|
|
279
|
+
|
|
280
|
+
def _prepare_chat(
|
|
281
|
+
self,
|
|
282
|
+
recipient: ConversableAgent,
|
|
283
|
+
clear_history: bool,
|
|
284
|
+
prepare_recipient: bool = True,
|
|
285
|
+
reply_at_receive: bool = True,
|
|
286
|
+
) -> None:
|
|
287
|
+
self.reply_at_receive[recipient] = reply_at_receive
|
|
288
|
+
if clear_history:
|
|
289
|
+
self._oai_messages[recipient].clear()
|
|
290
|
+
if prepare_recipient:
|
|
291
|
+
recipient._prepare_chat(self, clear_history, False, reply_at_receive) # type: ignore[arg-type]
|
|
292
|
+
|
|
293
|
+
def _raise_exception_on_async_reply_functions(self) -> None:
|
|
294
|
+
pass
|
|
295
|
+
|
|
296
|
+
@staticmethod
|
|
297
|
+
def _last_msg_as_summary(sender: Agent, recipient: Agent, summary_args: Optional[dict[str, Any]]) -> str:
|
|
298
|
+
"""Get a chat summary from the last message of the recipient."""
|
|
299
|
+
summary = ""
|
|
300
|
+
try:
|
|
301
|
+
content = recipient.last_message(sender)["content"] # type: ignore[attr-defined]
|
|
302
|
+
if isinstance(content, str):
|
|
303
|
+
summary = content.replace("TERMINATE", "")
|
|
304
|
+
elif isinstance(content, list):
|
|
305
|
+
summary = "\n".join(
|
|
306
|
+
x["text"].replace("TERMINATE", "") for x in content if isinstance(x, dict) and "text" in x
|
|
307
|
+
)
|
|
308
|
+
except (IndexError, AttributeError) as e:
|
|
309
|
+
warnings.warn(f"Cannot extract summary using last_msg: {e}. Using an empty str as summary.", UserWarning)
|
|
310
|
+
return summary
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
# check that the SwarmableAgent class is implementing LLMAgent protocol
|
|
314
|
+
if TYPE_CHECKING:
|
|
315
|
+
|
|
316
|
+
def _create_swarmable_agent(
|
|
317
|
+
name: str,
|
|
318
|
+
system_message: str,
|
|
319
|
+
is_termination_msg: Optional[Callable[..., bool]],
|
|
320
|
+
description: Optional[str],
|
|
321
|
+
silent: Optional[bool],
|
|
322
|
+
) -> LLMAgent:
|
|
323
|
+
return SwarmableAgent(
|
|
324
|
+
name=name,
|
|
325
|
+
system_message=system_message,
|
|
326
|
+
is_termination_msg=is_termination_msg,
|
|
327
|
+
description=description,
|
|
328
|
+
silent=silent,
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
class SwarmableRealtimeAgent(SwarmableAgent):
|
|
333
|
+
def __init__(
|
|
334
|
+
self,
|
|
335
|
+
realtime_agent: "RealtimeAgent",
|
|
336
|
+
initial_agent: ConversableAgent,
|
|
337
|
+
agents: list[ConversableAgent],
|
|
338
|
+
question_message: Optional[str] = None,
|
|
339
|
+
) -> None:
|
|
340
|
+
self._initial_agent = initial_agent
|
|
341
|
+
self._agents = agents
|
|
342
|
+
self._realtime_agent = realtime_agent
|
|
343
|
+
|
|
344
|
+
self._answer_event: anyio.Event = anyio.Event()
|
|
345
|
+
self._answer: str = ""
|
|
346
|
+
self.question_message = question_message or QUESTION_MESSAGE
|
|
347
|
+
|
|
348
|
+
super().__init__(
|
|
349
|
+
name=realtime_agent._name,
|
|
350
|
+
is_termination_msg=None,
|
|
351
|
+
description=None,
|
|
352
|
+
silent=None,
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
def reset_answer(self) -> None:
|
|
356
|
+
"""Reset the answer event."""
|
|
357
|
+
self._answer_event = anyio.Event()
|
|
358
|
+
|
|
359
|
+
def set_answer(self, answer: str) -> str:
|
|
360
|
+
"""Set the answer to the question."""
|
|
361
|
+
self._answer = answer
|
|
362
|
+
self._answer_event.set()
|
|
363
|
+
return "Answer set successfully."
|
|
364
|
+
|
|
365
|
+
async def get_answer(self) -> str:
|
|
366
|
+
"""Get the answer to the question."""
|
|
367
|
+
await self._answer_event.wait()
|
|
368
|
+
return self._answer
|
|
369
|
+
|
|
370
|
+
async def ask_question(self, question: str, question_timeout: int) -> None:
|
|
371
|
+
"""Send a question for the user to the agent and wait for the answer.
|
|
372
|
+
If the answer is not received within the timeout, the question is repeated.
|
|
373
|
+
|
|
374
|
+
Args:
|
|
375
|
+
question: The question to ask the user.
|
|
376
|
+
question_timeout: The time in seconds to wait for the answer.
|
|
377
|
+
"""
|
|
378
|
+
self.reset_answer()
|
|
379
|
+
realtime_client = self._realtime_agent._realtime_client
|
|
380
|
+
await realtime_client.send_text(role=QUESTION_ROLE, text=question)
|
|
381
|
+
|
|
382
|
+
async def _check_event_set(timeout: int = question_timeout) -> bool:
|
|
383
|
+
for _ in range(timeout):
|
|
384
|
+
if self._answer_event.is_set():
|
|
385
|
+
return True
|
|
386
|
+
await anyio.sleep(1)
|
|
387
|
+
return False
|
|
388
|
+
|
|
389
|
+
while not await _check_event_set():
|
|
390
|
+
await realtime_client.send_text(role=QUESTION_ROLE, text=question)
|
|
391
|
+
|
|
392
|
+
def check_termination_and_human_reply(
|
|
393
|
+
self,
|
|
394
|
+
messages: Optional[list[dict[str, Any]]] = None,
|
|
395
|
+
sender: Optional[Agent] = None,
|
|
396
|
+
config: Optional[Any] = None,
|
|
397
|
+
) -> tuple[bool, Optional[str]]:
|
|
398
|
+
"""Check if the conversation should be terminated and if the agent should reply.
|
|
399
|
+
|
|
400
|
+
Called when its agents turn in the chat conversation.
|
|
401
|
+
|
|
402
|
+
Args:
|
|
403
|
+
messages (list[dict[str, Any]]): The messages in the conversation.
|
|
404
|
+
sender (Agent): The agent that sent the message.
|
|
405
|
+
config (Optional[Any]): The configuration for the agent.
|
|
406
|
+
"""
|
|
407
|
+
if not messages:
|
|
408
|
+
return False, None
|
|
409
|
+
|
|
410
|
+
async def get_input() -> None:
|
|
411
|
+
async with create_task_group() as tg:
|
|
412
|
+
tg.soonify(self.ask_question)(
|
|
413
|
+
self.question_message.format(messages[-1]["content"]),
|
|
414
|
+
question_timeout=QUESTION_TIMEOUT_SECONDS,
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
syncify(get_input)()
|
|
418
|
+
|
|
419
|
+
return True, {"role": "user", "content": self._answer} # type: ignore[return-value]
|
|
420
|
+
|
|
421
|
+
def start_chat(self) -> None:
|
|
422
|
+
raise NotImplementedError
|
|
423
|
+
|
|
424
|
+
def configure_realtime_agent(self, system_message: Optional[str]) -> None:
|
|
425
|
+
realtime_agent = self._realtime_agent
|
|
426
|
+
|
|
427
|
+
logger = realtime_agent.logger
|
|
428
|
+
if not system_message:
|
|
429
|
+
if realtime_agent.system_message != "You are a helpful AI Assistant.":
|
|
430
|
+
logger.warning(
|
|
431
|
+
"Overriding system message set up in `__init__`, please use `system_message` parameter of the `register_swarm` function instead."
|
|
432
|
+
)
|
|
433
|
+
system_message = SWARM_SYSTEM_MESSAGE
|
|
434
|
+
|
|
435
|
+
realtime_agent._system_message = system_message
|
|
436
|
+
|
|
437
|
+
realtime_agent.register_realtime_function(
|
|
438
|
+
name="answer_task_question", description="Answer question from the task"
|
|
439
|
+
)(self.set_answer)
|
|
440
|
+
|
|
441
|
+
async def on_observers_ready() -> None:
|
|
442
|
+
self._realtime_agent._tg.soonify(asyncify(initiate_swarm_chat))(
|
|
443
|
+
initial_agent=self._initial_agent,
|
|
444
|
+
agents=self._agents,
|
|
445
|
+
user_agent=self, # type: ignore[arg-type]
|
|
446
|
+
messages="Find out what the user wants.",
|
|
447
|
+
after_work=AfterWorkOption.REVERT_TO_USER,
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
self._realtime_agent.callbacks.on_observers_ready = on_observers_ready
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
@export_module("autogen.agentchat.realtime.experimental")
|
|
454
|
+
def register_swarm(
|
|
455
|
+
*,
|
|
456
|
+
realtime_agent: "RealtimeAgent",
|
|
457
|
+
initial_agent: ConversableAgent,
|
|
458
|
+
agents: list[ConversableAgent],
|
|
459
|
+
system_message: Optional[str] = None,
|
|
460
|
+
question_message: Optional[str] = None,
|
|
461
|
+
) -> None:
|
|
462
|
+
"""Create a SwarmableRealtimeAgent.
|
|
463
|
+
|
|
464
|
+
Args:
|
|
465
|
+
realtime_agent (RealtimeAgent): The RealtimeAgent to create the SwarmableRealtimeAgent from.
|
|
466
|
+
initial_agent (ConversableAgent): The initial agent.
|
|
467
|
+
agents (list[ConversableAgent]): The agents in the swarm.
|
|
468
|
+
system_message (Optional[str]): The system message to set for the agent. If None, the default system message is used.
|
|
469
|
+
question_message (Optional[str]): The question message to set for the agent. If None, the default QUESTION_MESSAGE is used.
|
|
470
|
+
"""
|
|
471
|
+
swarmable_agent = SwarmableRealtimeAgent(
|
|
472
|
+
realtime_agent=realtime_agent, initial_agent=initial_agent, agents=agents, question_message=question_message
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
swarmable_agent.configure_realtime_agent(system_message=system_message)
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from collections.abc import AsyncIterator
|
|
6
|
+
from typing import Any, Protocol, runtime_checkable
|
|
7
|
+
|
|
8
|
+
__all__ = ["WebSocketProtocol"]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@runtime_checkable
|
|
12
|
+
class WebSocketProtocol(Protocol):
|
|
13
|
+
"""WebSocket protocol for sending and receiving JSON data modelled after FastAPI's WebSocket."""
|
|
14
|
+
|
|
15
|
+
async def send_json(self, data: Any, mode: str = "text") -> None: ...
|
|
16
|
+
|
|
17
|
+
async def receive_json(self, mode: str = "text") -> Any: ...
|
|
18
|
+
|
|
19
|
+
async def receive_text(self) -> str: ...
|
|
20
|
+
|
|
21
|
+
def iter_text(self) -> AsyncIterator[str]: ...
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from ..realtime.experimental import (
|
|
6
|
+
FunctionObserver,
|
|
7
|
+
RealtimeAgent,
|
|
8
|
+
RealtimeObserver,
|
|
9
|
+
TwilioAudioAdapter,
|
|
10
|
+
WebSocketAudioAdapter,
|
|
11
|
+
register_swarm,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"FunctionObserver",
|
|
16
|
+
"RealtimeAgent",
|
|
17
|
+
"RealtimeObserver",
|
|
18
|
+
"TwilioAudioAdapter",
|
|
19
|
+
"WebSocketAudioAdapter",
|
|
20
|
+
"register_swarm",
|
|
21
|
+
]
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from typing import Any, Callable, Literal, Optional, Union
|
|
8
|
+
|
|
9
|
+
from ..doc_utils import export_module
|
|
10
|
+
from ..llm_config import LLMConfig
|
|
11
|
+
from ..runtime_logging import log_new_agent, logging_enabled
|
|
12
|
+
from .conversable_agent import ConversableAgent
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@export_module("autogen")
|
|
16
|
+
class UserProxyAgent(ConversableAgent):
|
|
17
|
+
"""(In preview) A proxy agent for the user, that can execute code and provide feedback to the other agents.
|
|
18
|
+
|
|
19
|
+
UserProxyAgent is a subclass of ConversableAgent configured with `human_input_mode` to ALWAYS
|
|
20
|
+
and `llm_config` to False. By default, the agent will prompt for human input every time a message is received.
|
|
21
|
+
Code execution is enabled by default. LLM-based auto reply is disabled by default.
|
|
22
|
+
To modify auto reply, register a method with [`register_reply`](../ConversableAgent#register-reply).
|
|
23
|
+
To modify the way to get human input, override `get_human_input` method.
|
|
24
|
+
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
|
|
25
|
+
`run_code`, and `execute_function` methods respectively.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
# Default UserProxyAgent.description values, based on human_input_mode
|
|
29
|
+
DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS = {
|
|
30
|
+
"ALWAYS": "An attentive HUMAN user who can answer questions about the task, and can perform tasks such as running Python code or inputting command line commands at a Linux terminal and reporting back the execution results.",
|
|
31
|
+
"TERMINATE": "A user that can run Python code or input command line commands at a Linux terminal and report back the execution results.",
|
|
32
|
+
"NEVER": "A computer terminal that performs no other action than running Python scripts (provided to it quoted in ```python code blocks), or sh shell scripts (provided to it quoted in ```sh code blocks).",
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
name: str,
|
|
38
|
+
is_termination_msg: Optional[Callable[[dict[str, Any]], bool]] = None,
|
|
39
|
+
max_consecutive_auto_reply: Optional[int] = None,
|
|
40
|
+
human_input_mode: Literal["ALWAYS", "TERMINATE", "NEVER"] = "ALWAYS",
|
|
41
|
+
function_map: Optional[dict[str, Callable[..., Any]]] = None,
|
|
42
|
+
code_execution_config: Union[dict[str, Any], Literal[False]] = {},
|
|
43
|
+
default_auto_reply: Optional[Union[str, dict[str, Any]]] = "",
|
|
44
|
+
llm_config: Optional[Union[LLMConfig, dict[str, Any], Literal[False]]] = False,
|
|
45
|
+
system_message: Optional[Union[str, list[str]]] = "",
|
|
46
|
+
description: Optional[str] = None,
|
|
47
|
+
**kwargs: Any,
|
|
48
|
+
):
|
|
49
|
+
"""Args:
|
|
50
|
+
name (str): name of the agent.
|
|
51
|
+
is_termination_msg (function): a function that takes a message in the form of a dictionary
|
|
52
|
+
and returns a boolean value indicating if this received message is a termination message.
|
|
53
|
+
The dict can contain the following keys: "content", "role", "name", "function_call".
|
|
54
|
+
max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
|
|
55
|
+
default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
|
|
56
|
+
The limit only plays a role when human_input_mode is not "ALWAYS".
|
|
57
|
+
human_input_mode (str): whether to ask for human inputs every time a message is received.
|
|
58
|
+
Possible values are "ALWAYS", "TERMINATE", "NEVER".
|
|
59
|
+
(1) When "ALWAYS", the agent prompts for human input every time a message is received.
|
|
60
|
+
Under this mode, the conversation stops when the human input is "exit",
|
|
61
|
+
or when is_termination_msg is True and there is no human input.
|
|
62
|
+
(2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or
|
|
63
|
+
the number of auto reply reaches the max_consecutive_auto_reply.
|
|
64
|
+
(3) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
|
|
65
|
+
when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
|
|
66
|
+
function_map (dict[str, callable]): Mapping function names (passed to openai) to callable functions.
|
|
67
|
+
code_execution_config (dict or False): config for the code execution.
|
|
68
|
+
To disable code execution, set to False. Otherwise, set to a dictionary with the following keys:
|
|
69
|
+
- work_dir (Optional, str): The working directory for the code execution.
|
|
70
|
+
If None, a default working directory will be used.
|
|
71
|
+
The default working directory is the "extensions" directory under
|
|
72
|
+
"path_to_autogen".
|
|
73
|
+
- use_docker (Optional, list, str or bool): The docker image to use for code execution.
|
|
74
|
+
Default is True, which means the code will be executed in a docker container. A default list of images will be used.
|
|
75
|
+
If a list or a str of image name(s) is provided, the code will be executed in a docker container
|
|
76
|
+
with the first image successfully pulled.
|
|
77
|
+
If False, the code will be executed in the current environment.
|
|
78
|
+
We strongly recommend using docker for code execution.
|
|
79
|
+
- timeout (Optional, int): The maximum execution time in seconds.
|
|
80
|
+
- last_n_messages (Experimental, Optional, int): The number of messages to look back for code execution. Default to 1.
|
|
81
|
+
default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.
|
|
82
|
+
llm_config (LLMConfig or dict or False or None): llm inference configuration.
|
|
83
|
+
Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create)
|
|
84
|
+
for available options.
|
|
85
|
+
Default to False, which disables llm-based auto reply.
|
|
86
|
+
When set to None, will use self.DEFAULT_CONFIG, which defaults to False.
|
|
87
|
+
system_message (str or List): system message for ChatCompletion inference.
|
|
88
|
+
Only used when llm_config is not False. Use it to reprogram the agent.
|
|
89
|
+
description (str): a short description of the agent. This description is used by other agents
|
|
90
|
+
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
|
|
91
|
+
**kwargs (dict): Please refer to other kwargs in
|
|
92
|
+
[ConversableAgent](https://docs.ag2.ai/latest/docs/api-reference/autogen/ConversableAgent).
|
|
93
|
+
"""
|
|
94
|
+
super().__init__(
|
|
95
|
+
name=name,
|
|
96
|
+
system_message=system_message,
|
|
97
|
+
is_termination_msg=is_termination_msg,
|
|
98
|
+
max_consecutive_auto_reply=max_consecutive_auto_reply,
|
|
99
|
+
human_input_mode=human_input_mode,
|
|
100
|
+
function_map=function_map,
|
|
101
|
+
code_execution_config=code_execution_config,
|
|
102
|
+
llm_config=llm_config,
|
|
103
|
+
default_auto_reply=default_auto_reply,
|
|
104
|
+
description=(
|
|
105
|
+
description if description is not None else self.DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS[human_input_mode]
|
|
106
|
+
),
|
|
107
|
+
**kwargs,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
if logging_enabled():
|
|
111
|
+
log_new_agent(self, locals())
|