ag2 0.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ag2-0.10.2.dist-info/METADATA +819 -0
- ag2-0.10.2.dist-info/RECORD +423 -0
- ag2-0.10.2.dist-info/WHEEL +4 -0
- ag2-0.10.2.dist-info/licenses/LICENSE +201 -0
- ag2-0.10.2.dist-info/licenses/NOTICE.md +19 -0
- autogen/__init__.py +88 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +426 -0
- autogen/_website/generate_mkdocs.py +1216 -0
- autogen/_website/notebook_processor.py +475 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +413 -0
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +86 -0
- autogen/a2a/client.py +357 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +207 -0
- autogen/agentchat/__init__.py +47 -0
- autogen/agentchat/agent.py +180 -0
- autogen/agentchat/assistant_agent.py +86 -0
- autogen/agentchat/chat.py +325 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +432 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +578 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +215 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +514 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +334 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +167 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +263 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +189 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +268 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +195 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +319 -0
- autogen/agentchat/contrib/rag/query_engine.py +76 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +59 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +704 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +200 -0
- autogen/agentchat/contrib/swarm_agent.py +1404 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +224 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +316 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +405 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +551 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +927 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +304 -0
- autogen/agentchat/conversable_agent.py +4307 -0
- autogen/agentchat/group/__init__.py +67 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +39 -0
- autogen/agentchat/group/context_variables.py +182 -0
- autogen/agentchat/group/events/transition_events.py +111 -0
- autogen/agentchat/group/group_tool_executor.py +324 -0
- autogen/agentchat/group/group_utils.py +659 -0
- autogen/agentchat/group/guardrails.py +179 -0
- autogen/agentchat/group/handoffs.py +303 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +291 -0
- autogen/agentchat/group/on_condition.py +55 -0
- autogen/agentchat/group/on_context_condition.py +51 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +160 -0
- autogen/agentchat/group/patterns/manual.py +177 -0
- autogen/agentchat/group/patterns/pattern.py +295 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +24 -0
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +140 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/function_target.py +245 -0
- autogen/agentchat/group/targets/group_chat_target.py +133 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +424 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1832 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +191 -0
- autogen/agentchat/realtime/experimental/function_observer.py +84 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +533 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +114 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +74 -0
- autogen/agents/contrib/time/time_tool_agent.py +52 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +301 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +113 -0
- autogen/agents/experimental/document_agent/document_agent.py +643 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +376 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +214 -0
- autogen/agents/experimental/document_agent/parser_utils.py +134 -0
- autogen/agents/experimental/document_agent/url_utils.py +417 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +76 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +70 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +88 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +71 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +97 -0
- autogen/cache/in_memory_cache.py +54 -0
- autogen/cache/redis_cache.py +119 -0
- autogen/code_utils.py +598 -0
- autogen/coding/__init__.py +30 -0
- autogen/coding/base.py +120 -0
- autogen/coding/docker_commandline_code_executor.py +283 -0
- autogen/coding/factory.py +56 -0
- autogen/coding/func_with_reqs.py +203 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +160 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +224 -0
- autogen/coding/jupyter/jupyter_code_executor.py +154 -0
- autogen/coding/jupyter/local_jupyter_server.py +164 -0
- autogen/coding/local_commandline_code_executor.py +341 -0
- autogen/coding/markdown_code_extractor.py +44 -0
- autogen/coding/utils.py +55 -0
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/doc_utils.py +35 -0
- autogen/environments/__init__.py +10 -0
- autogen/environments/docker_python_environment.py +365 -0
- autogen/environments/python_environment.py +125 -0
- autogen/environments/system_python_environment.py +85 -0
- autogen/environments/venv_python_environment.py +220 -0
- autogen/environments/working_directory.py +74 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1016 -0
- autogen/events/base_event.py +100 -0
- autogen/events/client_events.py +168 -0
- autogen/events/helpers.py +44 -0
- autogen/events/print_event.py +45 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +75 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +206 -0
- autogen/fast_depends/core/model.py +527 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +30 -0
- autogen/fast_depends/dependencies/provider.py +40 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +272 -0
- autogen/fast_depends/utils.py +177 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +173 -0
- autogen/import_utils.py +539 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +156 -0
- autogen/interop/langchain/langchain_tool.py +78 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +178 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +172 -0
- autogen/interop/registry.py +70 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +61 -0
- autogen/io/run_response.py +294 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +214 -0
- autogen/json_utils.py +42 -0
- autogen/llm_clients/MIGRATION_TO_V2.md +782 -0
- autogen/llm_clients/__init__.py +77 -0
- autogen/llm_clients/client_v2.py +122 -0
- autogen/llm_clients/models/__init__.py +55 -0
- autogen/llm_clients/models/content_blocks.py +389 -0
- autogen/llm_clients/models/unified_message.py +145 -0
- autogen/llm_clients/models/unified_response.py +83 -0
- autogen/llm_clients/openai_completions_client.py +444 -0
- autogen/llm_config/__init__.py +11 -0
- autogen/llm_config/client.py +59 -0
- autogen/llm_config/config.py +461 -0
- autogen/llm_config/entry.py +169 -0
- autogen/llm_config/types.py +37 -0
- autogen/llm_config/utils.py +223 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +129 -0
- autogen/logger/file_logger.py +262 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +524 -0
- autogen/math_utils.py +338 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/helpers.py +45 -0
- autogen/mcp/mcp_client.py +349 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +62 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +577 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +166 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +110 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +399 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +946 -0
- autogen/messages/base_message.py +108 -0
- autogen/messages/client_messages.py +172 -0
- autogen/messages/print_message.py +48 -0
- autogen/oai/__init__.py +61 -0
- autogen/oai/anthropic.py +1516 -0
- autogen/oai/bedrock.py +800 -0
- autogen/oai/cerebras.py +302 -0
- autogen/oai/client.py +1658 -0
- autogen/oai/client_utils.py +196 -0
- autogen/oai/cohere.py +494 -0
- autogen/oai/gemini.py +1045 -0
- autogen/oai/gemini_types.py +156 -0
- autogen/oai/groq.py +319 -0
- autogen/oai/mistral.py +311 -0
- autogen/oai/oai_models/__init__.py +23 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +86 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +97 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +60 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +62 -0
- autogen/oai/oai_models/completion_usage.py +59 -0
- autogen/oai/ollama.py +657 -0
- autogen/oai/openai_responses.py +451 -0
- autogen/oai/openai_utils.py +897 -0
- autogen/oai/together.py +387 -0
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +197 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/retrieve_utils.py +490 -0
- autogen/runtime_logging.py +161 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/token_count_utils.py +280 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +40 -0
- autogen/tools/dependency_injection.py +249 -0
- autogen/tools/experimental/__init__.py +54 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +154 -0
- autogen/tools/experimental/code_execution/__init__.py +7 -0
- autogen/tools/experimental/code_execution/python_code_execution.py +86 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +150 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +329 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +103 -0
- autogen/tools/experimental/firecrawl/__init__.py +7 -0
- autogen/tools/experimental/firecrawl/firecrawl_tool.py +836 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +284 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +385 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +271 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +249 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1311 -0
- autogen/tools/experimental/searxng/__init__.py +7 -0
- autogen/tools/experimental/searxng/searxng_search.py +142 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +176 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +120 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +284 -0
- autogen/tools/function_utils.py +412 -0
- autogen/tools/tool.py +188 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +72 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
|
@@ -0,0 +1,790 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
import hashlib
|
|
8
|
+
import importlib
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
import re
|
|
12
|
+
import subprocess as sp
|
|
13
|
+
import time
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
from termcolor import colored
|
|
17
|
+
|
|
18
|
+
from .... import AssistantAgent, ConversableAgent, OpenAIWrapper, UserProxyAgent
|
|
19
|
+
from ....code_utils import CODE_BLOCK_PATTERN
|
|
20
|
+
from ....doc_utils import export_module
|
|
21
|
+
from ....llm_config import LLMConfig
|
|
22
|
+
|
|
23
|
+
__all__ = ["AgentBuilder"]
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _config_check(config: dict):
|
|
29
|
+
# check config loading
|
|
30
|
+
assert config.get("coding") is not None, 'Missing "coding" in your config.'
|
|
31
|
+
assert config.get("default_llm_config") is not None, 'Missing "default_llm_config" in your config.'
|
|
32
|
+
assert config.get("code_execution_config") is not None, 'Missing "code_execution_config" in your config.'
|
|
33
|
+
|
|
34
|
+
for agent_config in config["agent_configs"]:
|
|
35
|
+
assert agent_config.get("name", None) is not None, 'Missing agent "name" in your agent_configs.'
|
|
36
|
+
assert agent_config.get("system_message", None) is not None, (
|
|
37
|
+
'Missing agent "system_message" in your agent_configs.'
|
|
38
|
+
)
|
|
39
|
+
assert agent_config.get("description", None) is not None, 'Missing agent "description" in your agent_configs.'
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _retrieve_json(text):
|
|
43
|
+
match = re.findall(CODE_BLOCK_PATTERN, text, flags=re.DOTALL)
|
|
44
|
+
if not match:
|
|
45
|
+
return text
|
|
46
|
+
code_blocks = []
|
|
47
|
+
for _, code in match:
|
|
48
|
+
code_blocks.append(code)
|
|
49
|
+
return code_blocks[0]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@export_module("autogen.agentchat.contrib.captainagent")
|
|
53
|
+
class AgentBuilder:
|
|
54
|
+
"""AgentBuilder can help user build an automatic task solving process powered by multi-agent system.
|
|
55
|
+
Specifically, our building pipeline includes initialize and build.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
online_server_name = "online"
|
|
59
|
+
|
|
60
|
+
DEFAULT_PROXY_AUTO_REPLY = 'There is no code from the last 1 message for me to execute. Group chat manager should let other participants to continue the conversation. If the group chat manager want to end the conversation, you should let other participant reply me only with "TERMINATE"'
|
|
61
|
+
|
|
62
|
+
GROUP_CHAT_DESCRIPTION = """ # Group chat instruction
|
|
63
|
+
You are now working in a group chat with different expert and a group chat manager.
|
|
64
|
+
You should refer to the previous message from other participant members or yourself, follow their topic and reply to them.
|
|
65
|
+
|
|
66
|
+
**Your role is**: {name}
|
|
67
|
+
Group chat members: {members}{user_proxy_desc}
|
|
68
|
+
|
|
69
|
+
When the task is complete and the result has been carefully verified, after obtaining agreement from the other members, you can end the conversation by replying only with "TERMINATE".
|
|
70
|
+
|
|
71
|
+
# Your profile
|
|
72
|
+
{sys_msg}
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
DEFAULT_DESCRIPTION = """## Your role
|
|
76
|
+
[Complete this part with expert's name and skill description]
|
|
77
|
+
|
|
78
|
+
## Task and skill instructions
|
|
79
|
+
- [Complete this part with task description]
|
|
80
|
+
- [Complete this part with skill description]
|
|
81
|
+
- [(Optional) Complete this part with other information]
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
CODING_AND_TASK_SKILL_INSTRUCTION = """## Useful instructions for task-solving
|
|
85
|
+
- Solve the task step by step if you need to.
|
|
86
|
+
- When you find an answer, verify the answer carefully. Include verifiable evidence with possible test case in your response if possible.
|
|
87
|
+
- All your reply should be based on the provided facts.
|
|
88
|
+
|
|
89
|
+
## How to verify?
|
|
90
|
+
**You have to keep believing that everyone else's answers are wrong until they provide clear enough evidence.**
|
|
91
|
+
- Verifying with step-by-step backward reasoning.
|
|
92
|
+
- Write test cases according to the general task.
|
|
93
|
+
|
|
94
|
+
## How to use code?
|
|
95
|
+
- Suggest python code (in a python coding block) or shell script (in a sh coding block) for the Computer_terminal to execute.
|
|
96
|
+
- If missing python packages, you can install the package by suggesting a `pip install` code in the ```sh ... ``` block.
|
|
97
|
+
- When using code, you must indicate the script type in the coding block.
|
|
98
|
+
- Do not the coding block which requires users to modify.
|
|
99
|
+
- Do not suggest a coding block if it's not intended to be executed by the Computer_terminal.
|
|
100
|
+
- The Computer_terminal cannot modify your code.
|
|
101
|
+
- **Use 'print' function for the output when relevant**.
|
|
102
|
+
- Check the execution result returned by the Computer_terminal.
|
|
103
|
+
- Do not ask Computer_terminal to copy and paste the result.
|
|
104
|
+
- If the result indicates there is an error, fix the error and output the code again. """
|
|
105
|
+
|
|
106
|
+
CODING_PROMPT = """Does the following task need programming (i.e., access external API or tool by coding) to solve,
|
|
107
|
+
or coding may help the following task become easier?
|
|
108
|
+
|
|
109
|
+
TASK: {task}
|
|
110
|
+
|
|
111
|
+
Answer only YES or NO.
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
AGENT_NAME_PROMPT = """# Your task
|
|
115
|
+
Suggest no more than {max_agents} experts with their name according to the following user requirement.
|
|
116
|
+
|
|
117
|
+
## User requirement
|
|
118
|
+
{task}
|
|
119
|
+
|
|
120
|
+
# Task requirement
|
|
121
|
+
- Expert's name should follow the format: [skill]_Expert.
|
|
122
|
+
- Only reply the names of the experts, separated by ",".
|
|
123
|
+
- If coding skills are required, they should be limited to Python and Shell.
|
|
124
|
+
For example: Python_Expert, Math_Expert, ... """
|
|
125
|
+
|
|
126
|
+
AGENT_SYS_MSG_PROMPT = """# Your goal
|
|
127
|
+
- According to the task and expert name, write a high-quality description for the expert by filling the given template.
|
|
128
|
+
- Ensure that your description are clear and unambiguous, and include all necessary information.
|
|
129
|
+
|
|
130
|
+
# Task
|
|
131
|
+
{task}
|
|
132
|
+
|
|
133
|
+
# Expert name
|
|
134
|
+
{position}
|
|
135
|
+
|
|
136
|
+
# Template
|
|
137
|
+
{default_sys_msg}
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
AGENT_DESCRIPTION_PROMPT = """# Your goal
|
|
141
|
+
Summarize the following expert's description in a sentence.
|
|
142
|
+
|
|
143
|
+
# Expert name
|
|
144
|
+
{position}
|
|
145
|
+
|
|
146
|
+
# Expert's description
|
|
147
|
+
{sys_msg}
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
AGENT_SEARCHING_PROMPT = """# Your goal
|
|
151
|
+
Considering the following task, what experts should be involved to the task?
|
|
152
|
+
|
|
153
|
+
# TASK
|
|
154
|
+
{task}
|
|
155
|
+
|
|
156
|
+
# EXPERT LIST
|
|
157
|
+
{agent_list}
|
|
158
|
+
|
|
159
|
+
# Requirement
|
|
160
|
+
- You should consider if the experts' name and profile match the task.
|
|
161
|
+
- Considering the effort, you should select less then {max_agents} experts; less is better.
|
|
162
|
+
- Separate expert names by commas and use "_" instead of space. For example, Product_manager,Programmer
|
|
163
|
+
- Only return the list of expert names.
|
|
164
|
+
"""
|
|
165
|
+
|
|
166
|
+
AGENT_SELECTION_PROMPT = """# Your goal
|
|
167
|
+
Match roles in the role set to each expert in expert set.
|
|
168
|
+
|
|
169
|
+
# Skill set
|
|
170
|
+
{skills}
|
|
171
|
+
|
|
172
|
+
# Expert pool (formatting with name: description)
|
|
173
|
+
{expert_pool}
|
|
174
|
+
|
|
175
|
+
# Answer format
|
|
176
|
+
```json
|
|
177
|
+
{{
|
|
178
|
+
"skill_1 description": "expert_name: expert_description", // if there exists an expert that suitable for skill_1
|
|
179
|
+
"skill_2 description": "None", // if there is no experts that suitable for skill_2
|
|
180
|
+
...
|
|
181
|
+
}}
|
|
182
|
+
```
|
|
183
|
+
"""
|
|
184
|
+
|
|
185
|
+
def __init__(
|
|
186
|
+
self,
|
|
187
|
+
config_file_or_env: str | None = "OAI_CONFIG_LIST",
|
|
188
|
+
config_file_location: str | None = "",
|
|
189
|
+
llm_config: LLMConfig | dict[str, Any] | None = None,
|
|
190
|
+
builder_model: str | list | None = [],
|
|
191
|
+
agent_model: str | list | None = [],
|
|
192
|
+
builder_model_tags: list | None = [],
|
|
193
|
+
agent_model_tags: list | None = [],
|
|
194
|
+
max_agents: int | None = 5,
|
|
195
|
+
):
|
|
196
|
+
"""(These APIs are experimental and may change in the future.)
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
config_file_or_env (Optional[str], optional): Path to the config file or name of the environment
|
|
200
|
+
variable containing the OpenAI API configurations. Defaults to "OAI_CONFIG_LIST".
|
|
201
|
+
config_file_location (Optional[str], optional): Location of the config file if not in the
|
|
202
|
+
current directory. Defaults to "".
|
|
203
|
+
llm_config (Optional[Union[LLMConfig, dict[str, Any]]], optional): Specific configs for LLM
|
|
204
|
+
builder_model (Optional[Union[str, list]], optional): Model identifier(s) to use as the
|
|
205
|
+
builder/manager model that coordinates agent creation. Can be a string or list of strings.
|
|
206
|
+
Filters the config list to match these models. Defaults to [].
|
|
207
|
+
agent_model (Optional[Union[str, list]], optional): Model identifier(s) to use for the
|
|
208
|
+
generated participant agents. Can be a string or list of strings. Defaults to [].
|
|
209
|
+
builder_model_tags (Optional[list], optional): Tags to filter which models from the config
|
|
210
|
+
can be used as builder models. Defaults to [].
|
|
211
|
+
agent_model_tags (Optional[list], optional): Tags to filter which models from the config
|
|
212
|
+
can be used as agent models. Defaults to [].
|
|
213
|
+
max_agents (Optional[int], optional): Maximum number of agents to create for each task.
|
|
214
|
+
Defaults to 5.
|
|
215
|
+
"""
|
|
216
|
+
builder_model = builder_model if isinstance(builder_model, list) else [builder_model]
|
|
217
|
+
builder_filter_dict = {}
|
|
218
|
+
if len(builder_model) != 0:
|
|
219
|
+
builder_filter_dict.update({"model": builder_model})
|
|
220
|
+
if len(builder_model_tags) != 0:
|
|
221
|
+
builder_filter_dict.update({"tags": builder_model_tags})
|
|
222
|
+
|
|
223
|
+
llm_config = (
|
|
224
|
+
LLMConfig.from_json(env=config_file_or_env, file_location=config_file_location).where(**builder_filter_dict)
|
|
225
|
+
if llm_config is None
|
|
226
|
+
else llm_config
|
|
227
|
+
)
|
|
228
|
+
builder_config_list = llm_config.config_list
|
|
229
|
+
|
|
230
|
+
if len(builder_config_list) == 0:
|
|
231
|
+
raise RuntimeError(
|
|
232
|
+
f"Fail to initialize build manager: {builder_model}{builder_model_tags} does not exist in {config_file_or_env}. "
|
|
233
|
+
f'If you want to change this model, please specify the "builder_model" in the constructor.'
|
|
234
|
+
)
|
|
235
|
+
self.builder_model = OpenAIWrapper(config_list=builder_config_list)
|
|
236
|
+
|
|
237
|
+
self.agent_model = agent_model if isinstance(agent_model, list) else [agent_model]
|
|
238
|
+
self.agent_model_tags = agent_model_tags
|
|
239
|
+
self.config_file_or_env = config_file_or_env
|
|
240
|
+
self.config_file_location = config_file_location
|
|
241
|
+
self.llm_config = llm_config
|
|
242
|
+
|
|
243
|
+
self.building_task: str = None
|
|
244
|
+
self.agent_configs: list[dict[str, Any]] = []
|
|
245
|
+
self.open_ports: list[str] = []
|
|
246
|
+
self.agent_procs: dict[str, tuple[sp.Popen, str]] = {}
|
|
247
|
+
self.agent_procs_assign: dict[str, tuple[ConversableAgent, str]] = {}
|
|
248
|
+
self.cached_configs: dict = {}
|
|
249
|
+
|
|
250
|
+
self.max_agents = max_agents
|
|
251
|
+
|
|
252
|
+
def set_builder_model(self, model: str):
|
|
253
|
+
self.builder_model = model
|
|
254
|
+
|
|
255
|
+
def set_agent_model(self, model: str):
|
|
256
|
+
self.agent_model = model
|
|
257
|
+
|
|
258
|
+
def _create_agent(
|
|
259
|
+
self,
|
|
260
|
+
agent_config: dict[str, Any],
|
|
261
|
+
member_name: list[str],
|
|
262
|
+
llm_config: LLMConfig | dict[str, Any],
|
|
263
|
+
use_oai_assistant: bool | None = False,
|
|
264
|
+
) -> AssistantAgent:
|
|
265
|
+
"""Create a group chat participant agent.
|
|
266
|
+
|
|
267
|
+
If the agent rely on an open-source model, this function will automatically set up an endpoint for that agent.
|
|
268
|
+
The API address of that endpoint will be "localhost:{free port}".
|
|
269
|
+
|
|
270
|
+
Args:
|
|
271
|
+
agent_config: agent's config. It should include the following information:
|
|
272
|
+
1. model_name: backbone model of an agent, e.g., gpt-4-1106-preview, meta/Llama-2-70b-chat
|
|
273
|
+
2. agent_name: use to identify an agent in the group chat.
|
|
274
|
+
3. system_message: including persona, task solving instruction, etc.
|
|
275
|
+
4. description: brief description of an agent that help group chat manager to pick the speaker.
|
|
276
|
+
member_name: a list of agent names in the group chat.
|
|
277
|
+
llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...).
|
|
278
|
+
use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
|
|
279
|
+
|
|
280
|
+
Returns:
|
|
281
|
+
agent: a set-up agent.
|
|
282
|
+
"""
|
|
283
|
+
model_name_or_hf_repo = agent_config.get("model", [])
|
|
284
|
+
model_name_or_hf_repo = (
|
|
285
|
+
model_name_or_hf_repo if isinstance(model_name_or_hf_repo, list) else [model_name_or_hf_repo]
|
|
286
|
+
)
|
|
287
|
+
model_tags = agent_config.get("tags", [])
|
|
288
|
+
agent_name = agent_config["name"]
|
|
289
|
+
system_message = agent_config["system_message"]
|
|
290
|
+
description = agent_config["description"]
|
|
291
|
+
|
|
292
|
+
# Path to the customize **ConversableAgent** class.
|
|
293
|
+
agent_path = agent_config.get("agent_path")
|
|
294
|
+
filter_dict = {}
|
|
295
|
+
if len(model_name_or_hf_repo) > 0:
|
|
296
|
+
filter_dict.update({"model": model_name_or_hf_repo})
|
|
297
|
+
if len(model_tags) > 0:
|
|
298
|
+
filter_dict.update({"tags": model_tags})
|
|
299
|
+
config_list = (
|
|
300
|
+
LLMConfig.from_json(env=self.config_file_or_env, file_location=self.config_file_location)
|
|
301
|
+
.where(**filter_dict)
|
|
302
|
+
.config_list
|
|
303
|
+
if self.llm_config is None
|
|
304
|
+
else self.llm_config.config_list
|
|
305
|
+
)
|
|
306
|
+
if len(config_list) == 0:
|
|
307
|
+
raise RuntimeError(
|
|
308
|
+
f"Fail to initialize agent {agent_name}: {model_name_or_hf_repo}{model_tags} does not exist in {self.config_file_or_env}.\n"
|
|
309
|
+
f'If you would like to change this model, please specify the "agent_model" in the constructor.\n'
|
|
310
|
+
f"If you load configs from json, make sure the model in agent_configs is in the {self.config_file_or_env}."
|
|
311
|
+
)
|
|
312
|
+
server_id = self.online_server_name
|
|
313
|
+
current_config = llm_config.copy()
|
|
314
|
+
current_config.update({"config_list": config_list})
|
|
315
|
+
if use_oai_assistant:
|
|
316
|
+
from ..gpt_assistant_agent import GPTAssistantAgent
|
|
317
|
+
|
|
318
|
+
agent = GPTAssistantAgent(
|
|
319
|
+
name=agent_name,
|
|
320
|
+
llm_config={**current_config, "assistant_id": None},
|
|
321
|
+
instructions=system_message,
|
|
322
|
+
overwrite_instructions=False,
|
|
323
|
+
)
|
|
324
|
+
else:
|
|
325
|
+
user_proxy_desc = ""
|
|
326
|
+
if self.cached_configs["coding"] is True:
|
|
327
|
+
user_proxy_desc = (
|
|
328
|
+
"\nThe group also include a Computer_terminal to help you run the python and shell code."
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
model_class = AssistantAgent
|
|
332
|
+
if agent_path:
|
|
333
|
+
module_path, model_class_name = agent_path.replace("/", ".").rsplit(".", 1)
|
|
334
|
+
module = importlib.import_module(module_path)
|
|
335
|
+
model_class = getattr(module, model_class_name)
|
|
336
|
+
if not issubclass(model_class, ConversableAgent):
|
|
337
|
+
logger.error(f"{model_class} is not a ConversableAgent. Use AssistantAgent as default")
|
|
338
|
+
model_class = AssistantAgent
|
|
339
|
+
|
|
340
|
+
additional_config = {
|
|
341
|
+
k: v
|
|
342
|
+
for k, v in agent_config.items()
|
|
343
|
+
if k not in ["model", "name", "system_message", "description", "agent_path", "tags"]
|
|
344
|
+
}
|
|
345
|
+
agent = model_class(
|
|
346
|
+
name=agent_name, llm_config=current_config.copy(), description=description, **additional_config
|
|
347
|
+
)
|
|
348
|
+
if system_message == "":
|
|
349
|
+
system_message = agent.system_message
|
|
350
|
+
else:
|
|
351
|
+
system_message = f"{system_message}\n\n{self.CODING_AND_TASK_SKILL_INSTRUCTION}"
|
|
352
|
+
|
|
353
|
+
enhanced_sys_msg = self.GROUP_CHAT_DESCRIPTION.format(
|
|
354
|
+
name=agent_name, members=member_name, user_proxy_desc=user_proxy_desc, sys_msg=system_message
|
|
355
|
+
)
|
|
356
|
+
agent.update_system_message(enhanced_sys_msg)
|
|
357
|
+
self.agent_procs_assign[agent_name] = (agent, server_id)
|
|
358
|
+
return agent
|
|
359
|
+
|
|
360
|
+
def clear_agent(self, agent_name: str, recycle_endpoint: bool | None = True):
|
|
361
|
+
"""Clear a specific agent by name.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
agent_name: the name of agent.
|
|
365
|
+
recycle_endpoint: trigger for recycle the endpoint server. If true, the endpoint will be recycled
|
|
366
|
+
when there is no agent depending on.
|
|
367
|
+
"""
|
|
368
|
+
_, server_id = self.agent_procs_assign[agent_name]
|
|
369
|
+
del self.agent_procs_assign[agent_name]
|
|
370
|
+
if recycle_endpoint:
|
|
371
|
+
if server_id == self.online_server_name:
|
|
372
|
+
return
|
|
373
|
+
else:
|
|
374
|
+
for _, iter_sid in self.agent_procs_assign.values():
|
|
375
|
+
if server_id == iter_sid:
|
|
376
|
+
return
|
|
377
|
+
self.agent_procs[server_id][0].terminate()
|
|
378
|
+
self.open_ports.append(server_id.split("_")[-1])
|
|
379
|
+
print(colored(f"Agent {agent_name} has been cleared.", "yellow"), flush=True)
|
|
380
|
+
|
|
381
|
+
def clear_all_agents(self, recycle_endpoint: bool | None = True):
|
|
382
|
+
"""Clear all cached agents."""
|
|
383
|
+
for agent_name in list(self.agent_procs_assign):
|
|
384
|
+
self.clear_agent(agent_name, recycle_endpoint)
|
|
385
|
+
print(colored("All agents have been cleared.", "yellow"), flush=True)
|
|
386
|
+
|
|
387
|
+
def build(
|
|
388
|
+
self,
|
|
389
|
+
building_task: str,
|
|
390
|
+
default_llm_config: LLMConfig | dict[str, Any],
|
|
391
|
+
coding: bool | None = None,
|
|
392
|
+
code_execution_config: dict[str, Any] | None = None,
|
|
393
|
+
use_oai_assistant: bool | None = False,
|
|
394
|
+
user_proxy: ConversableAgent | None = None,
|
|
395
|
+
max_agents: int | None = None,
|
|
396
|
+
**kwargs: Any,
|
|
397
|
+
) -> tuple[list[ConversableAgent], dict[str, Any]]:
|
|
398
|
+
"""Auto build agents based on the building task.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
building_task: instruction that helps build manager (gpt-4) to decide what agent should be built.
|
|
402
|
+
default_llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...).
|
|
403
|
+
coding: use to identify if the user proxy (a code interpreter) should be added.
|
|
404
|
+
code_execution_config: specific configs for user proxy (e.g., last_n_messages, work_dir, ...).
|
|
405
|
+
use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
|
|
406
|
+
user_proxy: user proxy's class that can be used to replace the default user proxy.
|
|
407
|
+
max_agents (Optional[int], default=None): Maximum number of agents to create for the task. If None, uses the value from self.max_agents.
|
|
408
|
+
**kwargs (Any): Additional arguments to pass to _build_agents.
|
|
409
|
+
- agent_configs: Optional list of predefined agent configurations to use.
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
agent_list: a list of agents.
|
|
413
|
+
cached_configs: cached configs.
|
|
414
|
+
"""
|
|
415
|
+
if code_execution_config is None:
|
|
416
|
+
code_execution_config = {
|
|
417
|
+
"last_n_messages": 1,
|
|
418
|
+
"work_dir": "groupchat",
|
|
419
|
+
"use_docker": False,
|
|
420
|
+
"timeout": 10,
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
if max_agents is None:
|
|
424
|
+
max_agents = self.max_agents
|
|
425
|
+
|
|
426
|
+
agent_configs = kwargs.get("agent_configs", [])
|
|
427
|
+
self.building_task = building_task
|
|
428
|
+
|
|
429
|
+
print(colored("==> Generating agents...", "green"), flush=True)
|
|
430
|
+
resp_agent_name = (
|
|
431
|
+
self.builder_model.create(
|
|
432
|
+
messages=[
|
|
433
|
+
{
|
|
434
|
+
"role": "user",
|
|
435
|
+
"content": self.AGENT_NAME_PROMPT.format(task=building_task, max_agents=max_agents),
|
|
436
|
+
}
|
|
437
|
+
]
|
|
438
|
+
)
|
|
439
|
+
.choices[0]
|
|
440
|
+
.message.content
|
|
441
|
+
)
|
|
442
|
+
agent_name_list = [agent_name.strip().replace(" ", "_") for agent_name in resp_agent_name.split(",")]
|
|
443
|
+
print(f"{agent_name_list} are generated.", flush=True)
|
|
444
|
+
|
|
445
|
+
print(colored("==> Generating system message...", "green"), flush=True)
|
|
446
|
+
agent_sys_msg_list = []
|
|
447
|
+
for name in agent_name_list:
|
|
448
|
+
print(f"Preparing system message for {name}", flush=True)
|
|
449
|
+
resp_agent_sys_msg = (
|
|
450
|
+
self.builder_model.create(
|
|
451
|
+
messages=[
|
|
452
|
+
{
|
|
453
|
+
"role": "user",
|
|
454
|
+
"content": self.AGENT_SYS_MSG_PROMPT.format(
|
|
455
|
+
task=building_task,
|
|
456
|
+
position=name,
|
|
457
|
+
default_sys_msg=self.DEFAULT_DESCRIPTION,
|
|
458
|
+
),
|
|
459
|
+
}
|
|
460
|
+
]
|
|
461
|
+
)
|
|
462
|
+
.choices[0]
|
|
463
|
+
.message.content
|
|
464
|
+
)
|
|
465
|
+
agent_sys_msg_list.append(resp_agent_sys_msg)
|
|
466
|
+
|
|
467
|
+
print(colored("==> Generating description...", "green"), flush=True)
|
|
468
|
+
agent_description_list = []
|
|
469
|
+
for name, sys_msg in list(zip(agent_name_list, agent_sys_msg_list)):
|
|
470
|
+
print(f"Preparing description for {name}", flush=True)
|
|
471
|
+
resp_agent_description = (
|
|
472
|
+
self.builder_model.create(
|
|
473
|
+
messages=[
|
|
474
|
+
{
|
|
475
|
+
"role": "user",
|
|
476
|
+
"content": self.AGENT_DESCRIPTION_PROMPT.format(position=name, sys_msg=sys_msg),
|
|
477
|
+
}
|
|
478
|
+
]
|
|
479
|
+
)
|
|
480
|
+
.choices[0]
|
|
481
|
+
.message.content
|
|
482
|
+
)
|
|
483
|
+
agent_description_list.append(resp_agent_description)
|
|
484
|
+
|
|
485
|
+
for name, sys_msg, description in list(zip(agent_name_list, agent_sys_msg_list, agent_description_list)):
|
|
486
|
+
agent_configs.append({
|
|
487
|
+
"name": name,
|
|
488
|
+
"model": self.agent_model,
|
|
489
|
+
"tags": self.agent_model_tags,
|
|
490
|
+
"system_message": sys_msg,
|
|
491
|
+
"description": description,
|
|
492
|
+
})
|
|
493
|
+
|
|
494
|
+
if coding is None:
|
|
495
|
+
resp = (
|
|
496
|
+
self.builder_model.create(
|
|
497
|
+
messages=[{"role": "user", "content": self.CODING_PROMPT.format(task=building_task)}]
|
|
498
|
+
)
|
|
499
|
+
.choices[0]
|
|
500
|
+
.message.content
|
|
501
|
+
)
|
|
502
|
+
coding = resp == "YES"
|
|
503
|
+
|
|
504
|
+
self.cached_configs.update({
|
|
505
|
+
"building_task": building_task,
|
|
506
|
+
"agent_configs": agent_configs,
|
|
507
|
+
"coding": coding,
|
|
508
|
+
"default_llm_config": default_llm_config,
|
|
509
|
+
"code_execution_config": code_execution_config,
|
|
510
|
+
})
|
|
511
|
+
_config_check(self.cached_configs)
|
|
512
|
+
return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)
|
|
513
|
+
|
|
514
|
+
def build_from_library(
|
|
515
|
+
self,
|
|
516
|
+
building_task: str,
|
|
517
|
+
library_path_or_json: str,
|
|
518
|
+
default_llm_config: LLMConfig | dict[str, Any],
|
|
519
|
+
top_k: int = 3,
|
|
520
|
+
coding: bool | None = None,
|
|
521
|
+
code_execution_config: dict[str, Any] | None = None,
|
|
522
|
+
use_oai_assistant: bool | None = False,
|
|
523
|
+
embedding_model: str | None = "all-mpnet-base-v2",
|
|
524
|
+
user_proxy: ConversableAgent | None = None,
|
|
525
|
+
**kwargs: Any,
|
|
526
|
+
) -> tuple[list[ConversableAgent], dict[str, Any]]:
|
|
527
|
+
"""Build agents from a library.
|
|
528
|
+
The library is a list of agent configs, which contains the name and system_message for each agent.
|
|
529
|
+
We use a build manager to decide what agent in that library should be involved to the task.
|
|
530
|
+
|
|
531
|
+
Args:
|
|
532
|
+
building_task: instruction that helps build manager (gpt-4) to decide what agent should be built.
|
|
533
|
+
library_path_or_json: path or JSON string config of agent library.
|
|
534
|
+
default_llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...).
|
|
535
|
+
top_k: number of results to return.
|
|
536
|
+
coding: use to identify if the user proxy (a code interpreter) should be added.
|
|
537
|
+
code_execution_config: specific configs for user proxy (e.g., last_n_messages, work_dir, ...).
|
|
538
|
+
use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
|
|
539
|
+
embedding_model: a Sentence-Transformers model use for embedding similarity to select agents from library.
|
|
540
|
+
As reference, chromadb use "all-mpnet-base-v2" as default.
|
|
541
|
+
user_proxy: user proxy's class that can be used to replace the default user proxy.
|
|
542
|
+
**kwargs: Additional arguments to pass to _build_agents.
|
|
543
|
+
|
|
544
|
+
Returns:
|
|
545
|
+
agent_list: a list of agents.
|
|
546
|
+
cached_configs: cached configs.
|
|
547
|
+
"""
|
|
548
|
+
import chromadb
|
|
549
|
+
from chromadb.utils import embedding_functions
|
|
550
|
+
|
|
551
|
+
if code_execution_config is None:
|
|
552
|
+
code_execution_config = {
|
|
553
|
+
"last_n_messages": 1,
|
|
554
|
+
"work_dir": "groupchat",
|
|
555
|
+
"use_docker": False,
|
|
556
|
+
"timeout": 120,
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
try:
|
|
560
|
+
agent_library = json.loads(library_path_or_json)
|
|
561
|
+
except json.decoder.JSONDecodeError:
|
|
562
|
+
with open(library_path_or_json) as f:
|
|
563
|
+
agent_library = json.load(f)
|
|
564
|
+
except Exception as e:
|
|
565
|
+
raise e
|
|
566
|
+
|
|
567
|
+
print(colored("==> Looking for suitable agents in the library...", "green"), flush=True)
|
|
568
|
+
skills = building_task.replace(":", " ").split("\n")
|
|
569
|
+
# skills = [line.split("-", 1)[1].strip() if line.startswith("-") else line for line in lines]
|
|
570
|
+
if len(skills) == 0:
|
|
571
|
+
skills = [building_task]
|
|
572
|
+
|
|
573
|
+
chroma_client = chromadb.Client()
|
|
574
|
+
collection = chroma_client.create_collection(
|
|
575
|
+
name="agent_list",
|
|
576
|
+
embedding_function=embedding_functions.SentenceTransformerEmbeddingFunction(model_name=embedding_model),
|
|
577
|
+
)
|
|
578
|
+
collection.add(
|
|
579
|
+
documents=[agent["description"] for agent in agent_library],
|
|
580
|
+
metadatas=[{"source": "agent_profile"} for _ in range(len(agent_library))],
|
|
581
|
+
ids=[f"agent_{i}" for i in range(len(agent_library))],
|
|
582
|
+
)
|
|
583
|
+
agent_desc_list = set()
|
|
584
|
+
for skill in skills:
|
|
585
|
+
recall = set(collection.query(query_texts=[skill], n_results=top_k)["documents"][0])
|
|
586
|
+
agent_desc_list = agent_desc_list.union(recall)
|
|
587
|
+
|
|
588
|
+
agent_config_list = []
|
|
589
|
+
for description in list(agent_desc_list):
|
|
590
|
+
for agent in agent_library:
|
|
591
|
+
if agent["description"] == description:
|
|
592
|
+
agent_config_list.append(agent.copy())
|
|
593
|
+
break
|
|
594
|
+
chroma_client.delete_collection(collection.name)
|
|
595
|
+
|
|
596
|
+
# double recall from the searching result
|
|
597
|
+
expert_pool = [f"{agent['name']}: {agent['description']}" for agent in agent_config_list]
|
|
598
|
+
while True:
|
|
599
|
+
skill_agent_pair_json = (
|
|
600
|
+
self.builder_model.create(
|
|
601
|
+
messages=[
|
|
602
|
+
{
|
|
603
|
+
"role": "user",
|
|
604
|
+
"content": self.AGENT_SELECTION_PROMPT.format(
|
|
605
|
+
skills=building_task, expert_pool=expert_pool, max_agents=self.max_agents
|
|
606
|
+
),
|
|
607
|
+
}
|
|
608
|
+
]
|
|
609
|
+
)
|
|
610
|
+
.choices[0]
|
|
611
|
+
.message.content
|
|
612
|
+
)
|
|
613
|
+
try:
|
|
614
|
+
skill_agent_pair_json = _retrieve_json(skill_agent_pair_json)
|
|
615
|
+
skill_agent_pair = json.loads(skill_agent_pair_json)
|
|
616
|
+
break
|
|
617
|
+
except Exception as e:
|
|
618
|
+
print(e, flush=True)
|
|
619
|
+
time.sleep(5)
|
|
620
|
+
continue
|
|
621
|
+
|
|
622
|
+
recalled_agent_config_list = []
|
|
623
|
+
recalled_name_desc = []
|
|
624
|
+
for skill, agent_profile in skill_agent_pair.items():
|
|
625
|
+
# If no suitable agent, generate an agent
|
|
626
|
+
if agent_profile == "None":
|
|
627
|
+
_, agent_config_temp = self.build(
|
|
628
|
+
building_task=skill,
|
|
629
|
+
default_llm_config=default_llm_config.copy(),
|
|
630
|
+
coding=False,
|
|
631
|
+
use_oai_assistant=use_oai_assistant,
|
|
632
|
+
max_agents=1,
|
|
633
|
+
)
|
|
634
|
+
self.clear_agent(agent_config_temp["agent_configs"][0]["name"])
|
|
635
|
+
recalled_agent_config_list.append(agent_config_temp["agent_configs"][0])
|
|
636
|
+
else:
|
|
637
|
+
if agent_profile in recalled_name_desc:
|
|
638
|
+
# prevent identical agents
|
|
639
|
+
continue
|
|
640
|
+
recalled_name_desc.append(agent_profile)
|
|
641
|
+
name = agent_profile.split(":")[0].strip()
|
|
642
|
+
desc = agent_profile.split(":")[1].strip()
|
|
643
|
+
for agent in agent_config_list:
|
|
644
|
+
if name == agent["name"] and desc == agent["description"]:
|
|
645
|
+
recalled_agent_config_list.append(agent.copy())
|
|
646
|
+
|
|
647
|
+
print(f"{[agent['name'] for agent in recalled_agent_config_list]} are selected.", flush=True)
|
|
648
|
+
|
|
649
|
+
if coding is None:
|
|
650
|
+
resp = (
|
|
651
|
+
self.builder_model.create(
|
|
652
|
+
messages=[{"role": "user", "content": self.CODING_PROMPT.format(task=building_task)}]
|
|
653
|
+
)
|
|
654
|
+
.choices[0]
|
|
655
|
+
.message.content
|
|
656
|
+
)
|
|
657
|
+
coding = resp == "YES"
|
|
658
|
+
|
|
659
|
+
self.cached_configs.update({
|
|
660
|
+
"building_task": building_task,
|
|
661
|
+
"agent_configs": recalled_agent_config_list,
|
|
662
|
+
"coding": coding,
|
|
663
|
+
"default_llm_config": default_llm_config,
|
|
664
|
+
"code_execution_config": code_execution_config,
|
|
665
|
+
})
|
|
666
|
+
_config_check(self.cached_configs)
|
|
667
|
+
|
|
668
|
+
return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)
|
|
669
|
+
|
|
670
|
+
def _build_agents(
|
|
671
|
+
self, use_oai_assistant: bool | None = False, user_proxy: ConversableAgent | None = None, **kwargs
|
|
672
|
+
) -> tuple[list[ConversableAgent], dict[str, Any]]:
|
|
673
|
+
"""Build agents with generated configs.
|
|
674
|
+
|
|
675
|
+
Args:
|
|
676
|
+
use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
|
|
677
|
+
user_proxy: user proxy's class that can be used to replace the default user proxy.
|
|
678
|
+
**kwargs: Additional keyword arguments.
|
|
679
|
+
|
|
680
|
+
Returns:
|
|
681
|
+
agent_list: a list of agents.
|
|
682
|
+
cached_configs: cached configs.
|
|
683
|
+
"""
|
|
684
|
+
agent_configs = self.cached_configs["agent_configs"]
|
|
685
|
+
default_llm_config = self.cached_configs["default_llm_config"]
|
|
686
|
+
coding = self.cached_configs["coding"]
|
|
687
|
+
code_execution_config = self.cached_configs["code_execution_config"]
|
|
688
|
+
|
|
689
|
+
print(colored("==> Creating agents...", "green"), flush=True)
|
|
690
|
+
for config in agent_configs:
|
|
691
|
+
print(f"Creating agent {config['name']}...", flush=True)
|
|
692
|
+
self._create_agent(
|
|
693
|
+
agent_config=config.copy(),
|
|
694
|
+
member_name=[agent["name"] for agent in agent_configs],
|
|
695
|
+
llm_config=default_llm_config,
|
|
696
|
+
use_oai_assistant=use_oai_assistant,
|
|
697
|
+
)
|
|
698
|
+
agent_list = [agent_config[0] for agent_config in self.agent_procs_assign.values()]
|
|
699
|
+
|
|
700
|
+
if coding is True:
|
|
701
|
+
print("Adding user console proxy...", flush=True)
|
|
702
|
+
if user_proxy is None:
|
|
703
|
+
user_proxy = UserProxyAgent(
|
|
704
|
+
name="Computer_terminal",
|
|
705
|
+
is_termination_msg=lambda x: x == "TERMINATE" or x == "TERMINATE.",
|
|
706
|
+
code_execution_config=code_execution_config,
|
|
707
|
+
human_input_mode="NEVER",
|
|
708
|
+
default_auto_reply=self.DEFAULT_PROXY_AUTO_REPLY,
|
|
709
|
+
)
|
|
710
|
+
agent_list = agent_list + [user_proxy]
|
|
711
|
+
|
|
712
|
+
return agent_list, self.cached_configs.copy()
|
|
713
|
+
|
|
714
|
+
def save(self, filepath: str | None = None) -> str:
|
|
715
|
+
"""Save building configs. If the filepath is not specific, this function will create a filename by encrypt the
|
|
716
|
+
building_task string by md5 with "save_config_" prefix, and save config to the local path.
|
|
717
|
+
|
|
718
|
+
Args:
|
|
719
|
+
filepath: save path.
|
|
720
|
+
|
|
721
|
+
Return:
|
|
722
|
+
filepath: path save.
|
|
723
|
+
"""
|
|
724
|
+
if filepath is None:
|
|
725
|
+
filepath = f"./save_config_{hashlib.md5(self.building_task.encode('utf-8')).hexdigest()}.json"
|
|
726
|
+
with open(filepath, "w") as save_file:
|
|
727
|
+
json.dump(self.cached_configs, save_file, indent=4)
|
|
728
|
+
print(colored(f"Building config saved to {filepath}", "green"), flush=True)
|
|
729
|
+
|
|
730
|
+
return filepath
|
|
731
|
+
|
|
732
|
+
def load(
|
|
733
|
+
self,
|
|
734
|
+
filepath: str | None = None,
|
|
735
|
+
config_json: str | None = None,
|
|
736
|
+
use_oai_assistant: bool | None = False,
|
|
737
|
+
**kwargs: Any,
|
|
738
|
+
) -> tuple[list[ConversableAgent], dict[str, Any]]:
|
|
739
|
+
"""Load building configs and call the build function to complete building without calling online LLMs' api.
|
|
740
|
+
|
|
741
|
+
Args:
|
|
742
|
+
filepath: filepath or JSON string for the save config.
|
|
743
|
+
config_json: JSON string for the save config.
|
|
744
|
+
use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
|
|
745
|
+
**kwargs (Any): Additional arguments to pass to _build_agents:
|
|
746
|
+
- code_execution_config (Optional[dict[str, Any]]): If provided, overrides the
|
|
747
|
+
code execution configuration from the loaded config.
|
|
748
|
+
|
|
749
|
+
Returns:
|
|
750
|
+
agent_list: a list of agents.
|
|
751
|
+
cached_configs: cached configs.
|
|
752
|
+
"""
|
|
753
|
+
# load json string.
|
|
754
|
+
if config_json is not None:
|
|
755
|
+
print(colored("Loading config from JSON...", "green"), flush=True)
|
|
756
|
+
cached_configs = json.loads(config_json)
|
|
757
|
+
|
|
758
|
+
# load from path.
|
|
759
|
+
if filepath is not None:
|
|
760
|
+
print(colored(f"Loading config from {filepath}", "green"), flush=True)
|
|
761
|
+
with open(filepath) as f:
|
|
762
|
+
cached_configs = json.load(f)
|
|
763
|
+
|
|
764
|
+
_config_check(cached_configs)
|
|
765
|
+
|
|
766
|
+
agent_configs = cached_configs["agent_configs"]
|
|
767
|
+
default_llm_config = cached_configs["default_llm_config"]
|
|
768
|
+
coding = cached_configs["coding"]
|
|
769
|
+
|
|
770
|
+
if kwargs.get("code_execution_config") is not None:
|
|
771
|
+
# for test
|
|
772
|
+
self.cached_configs.update({
|
|
773
|
+
"building_task": cached_configs["building_task"],
|
|
774
|
+
"agent_configs": agent_configs,
|
|
775
|
+
"coding": coding,
|
|
776
|
+
"default_llm_config": default_llm_config,
|
|
777
|
+
"code_execution_config": kwargs["code_execution_config"],
|
|
778
|
+
})
|
|
779
|
+
del kwargs["code_execution_config"]
|
|
780
|
+
return self._build_agents(use_oai_assistant, **kwargs)
|
|
781
|
+
else:
|
|
782
|
+
code_execution_config = cached_configs["code_execution_config"]
|
|
783
|
+
self.cached_configs.update({
|
|
784
|
+
"building_task": cached_configs["building_task"],
|
|
785
|
+
"agent_configs": agent_configs,
|
|
786
|
+
"coding": coding,
|
|
787
|
+
"default_llm_config": default_llm_config,
|
|
788
|
+
"code_execution_config": code_execution_config,
|
|
789
|
+
})
|
|
790
|
+
return self._build_agents(use_oai_assistant, **kwargs)
|