ag2 0.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ag2-0.10.2.dist-info/METADATA +819 -0
- ag2-0.10.2.dist-info/RECORD +423 -0
- ag2-0.10.2.dist-info/WHEEL +4 -0
- ag2-0.10.2.dist-info/licenses/LICENSE +201 -0
- ag2-0.10.2.dist-info/licenses/NOTICE.md +19 -0
- autogen/__init__.py +88 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +426 -0
- autogen/_website/generate_mkdocs.py +1216 -0
- autogen/_website/notebook_processor.py +475 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +413 -0
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +86 -0
- autogen/a2a/client.py +357 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +207 -0
- autogen/agentchat/__init__.py +47 -0
- autogen/agentchat/agent.py +180 -0
- autogen/agentchat/assistant_agent.py +86 -0
- autogen/agentchat/chat.py +325 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +432 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +578 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +215 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +514 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +334 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +167 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +263 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +189 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +268 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +195 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +319 -0
- autogen/agentchat/contrib/rag/query_engine.py +76 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +59 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +704 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +200 -0
- autogen/agentchat/contrib/swarm_agent.py +1404 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +224 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +316 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +405 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +551 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +927 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +304 -0
- autogen/agentchat/conversable_agent.py +4307 -0
- autogen/agentchat/group/__init__.py +67 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +39 -0
- autogen/agentchat/group/context_variables.py +182 -0
- autogen/agentchat/group/events/transition_events.py +111 -0
- autogen/agentchat/group/group_tool_executor.py +324 -0
- autogen/agentchat/group/group_utils.py +659 -0
- autogen/agentchat/group/guardrails.py +179 -0
- autogen/agentchat/group/handoffs.py +303 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +291 -0
- autogen/agentchat/group/on_condition.py +55 -0
- autogen/agentchat/group/on_context_condition.py +51 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +160 -0
- autogen/agentchat/group/patterns/manual.py +177 -0
- autogen/agentchat/group/patterns/pattern.py +295 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +24 -0
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +140 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/function_target.py +245 -0
- autogen/agentchat/group/targets/group_chat_target.py +133 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +424 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1832 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +191 -0
- autogen/agentchat/realtime/experimental/function_observer.py +84 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +533 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +114 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +74 -0
- autogen/agents/contrib/time/time_tool_agent.py +52 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +301 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +113 -0
- autogen/agents/experimental/document_agent/document_agent.py +643 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +376 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +214 -0
- autogen/agents/experimental/document_agent/parser_utils.py +134 -0
- autogen/agents/experimental/document_agent/url_utils.py +417 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +76 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +70 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +88 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +71 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +97 -0
- autogen/cache/in_memory_cache.py +54 -0
- autogen/cache/redis_cache.py +119 -0
- autogen/code_utils.py +598 -0
- autogen/coding/__init__.py +30 -0
- autogen/coding/base.py +120 -0
- autogen/coding/docker_commandline_code_executor.py +283 -0
- autogen/coding/factory.py +56 -0
- autogen/coding/func_with_reqs.py +203 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +160 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +224 -0
- autogen/coding/jupyter/jupyter_code_executor.py +154 -0
- autogen/coding/jupyter/local_jupyter_server.py +164 -0
- autogen/coding/local_commandline_code_executor.py +341 -0
- autogen/coding/markdown_code_extractor.py +44 -0
- autogen/coding/utils.py +55 -0
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/doc_utils.py +35 -0
- autogen/environments/__init__.py +10 -0
- autogen/environments/docker_python_environment.py +365 -0
- autogen/environments/python_environment.py +125 -0
- autogen/environments/system_python_environment.py +85 -0
- autogen/environments/venv_python_environment.py +220 -0
- autogen/environments/working_directory.py +74 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1016 -0
- autogen/events/base_event.py +100 -0
- autogen/events/client_events.py +168 -0
- autogen/events/helpers.py +44 -0
- autogen/events/print_event.py +45 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +75 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +206 -0
- autogen/fast_depends/core/model.py +527 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +30 -0
- autogen/fast_depends/dependencies/provider.py +40 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +272 -0
- autogen/fast_depends/utils.py +177 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +173 -0
- autogen/import_utils.py +539 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +156 -0
- autogen/interop/langchain/langchain_tool.py +78 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +178 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +172 -0
- autogen/interop/registry.py +70 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +61 -0
- autogen/io/run_response.py +294 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +214 -0
- autogen/json_utils.py +42 -0
- autogen/llm_clients/MIGRATION_TO_V2.md +782 -0
- autogen/llm_clients/__init__.py +77 -0
- autogen/llm_clients/client_v2.py +122 -0
- autogen/llm_clients/models/__init__.py +55 -0
- autogen/llm_clients/models/content_blocks.py +389 -0
- autogen/llm_clients/models/unified_message.py +145 -0
- autogen/llm_clients/models/unified_response.py +83 -0
- autogen/llm_clients/openai_completions_client.py +444 -0
- autogen/llm_config/__init__.py +11 -0
- autogen/llm_config/client.py +59 -0
- autogen/llm_config/config.py +461 -0
- autogen/llm_config/entry.py +169 -0
- autogen/llm_config/types.py +37 -0
- autogen/llm_config/utils.py +223 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +129 -0
- autogen/logger/file_logger.py +262 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +524 -0
- autogen/math_utils.py +338 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/helpers.py +45 -0
- autogen/mcp/mcp_client.py +349 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +62 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +577 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +166 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +110 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +399 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +946 -0
- autogen/messages/base_message.py +108 -0
- autogen/messages/client_messages.py +172 -0
- autogen/messages/print_message.py +48 -0
- autogen/oai/__init__.py +61 -0
- autogen/oai/anthropic.py +1516 -0
- autogen/oai/bedrock.py +800 -0
- autogen/oai/cerebras.py +302 -0
- autogen/oai/client.py +1658 -0
- autogen/oai/client_utils.py +196 -0
- autogen/oai/cohere.py +494 -0
- autogen/oai/gemini.py +1045 -0
- autogen/oai/gemini_types.py +156 -0
- autogen/oai/groq.py +319 -0
- autogen/oai/mistral.py +311 -0
- autogen/oai/oai_models/__init__.py +23 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +86 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +97 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +60 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +62 -0
- autogen/oai/oai_models/completion_usage.py +59 -0
- autogen/oai/ollama.py +657 -0
- autogen/oai/openai_responses.py +451 -0
- autogen/oai/openai_utils.py +897 -0
- autogen/oai/together.py +387 -0
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +197 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/retrieve_utils.py +490 -0
- autogen/runtime_logging.py +161 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/token_count_utils.py +280 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +40 -0
- autogen/tools/dependency_injection.py +249 -0
- autogen/tools/experimental/__init__.py +54 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +154 -0
- autogen/tools/experimental/code_execution/__init__.py +7 -0
- autogen/tools/experimental/code_execution/python_code_execution.py +86 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +150 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +329 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +103 -0
- autogen/tools/experimental/firecrawl/__init__.py +7 -0
- autogen/tools/experimental/firecrawl/firecrawl_tool.py +836 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +284 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +385 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +271 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +249 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1311 -0
- autogen/tools/experimental/searxng/__init__.py +7 -0
- autogen/tools/experimental/searxng/searxng_search.py +142 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +176 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +120 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +284 -0
- autogen/tools/function_utils.py +412 -0
- autogen/tools/tool.py +188 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +72 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from collections.abc import Sequence
|
|
8
|
+
from typing import Any, Protocol
|
|
9
|
+
|
|
10
|
+
from ..doc_utils import export_module
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@export_module("autogen")
|
|
14
|
+
class ModelClient(Protocol):
|
|
15
|
+
"""A client class must implement the following methods:
|
|
16
|
+
- create must return a response object that implements the ModelClientResponseProtocol
|
|
17
|
+
- cost must return the cost of the response
|
|
18
|
+
- get_usage must return a dict with the following keys:
|
|
19
|
+
- prompt_tokens
|
|
20
|
+
- completion_tokens
|
|
21
|
+
- total_tokens
|
|
22
|
+
- cost
|
|
23
|
+
- model
|
|
24
|
+
|
|
25
|
+
This class is used to create a client that can be used by OpenAIWrapper.
|
|
26
|
+
The response returned from create must adhere to the ModelClientResponseProtocol but can be extended however needed.
|
|
27
|
+
The message_retrieval method must be implemented to return a list of str or a list of messages from the response.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
RESPONSE_USAGE_KEYS: list[str] = ["prompt_tokens", "completion_tokens", "total_tokens", "cost", "model"]
|
|
31
|
+
|
|
32
|
+
class ModelClientResponseProtocol(Protocol):
|
|
33
|
+
class Choice(Protocol):
|
|
34
|
+
class Message(Protocol):
|
|
35
|
+
content: str | dict[str, Any] | list[dict[str, Any]] | None
|
|
36
|
+
|
|
37
|
+
message: Message
|
|
38
|
+
|
|
39
|
+
choices: Sequence[Choice]
|
|
40
|
+
model: str
|
|
41
|
+
|
|
42
|
+
def create(self, params: dict[str, Any]) -> ModelClientResponseProtocol: ... # pragma: no cover
|
|
43
|
+
|
|
44
|
+
def message_retrieval(
|
|
45
|
+
self, response: ModelClientResponseProtocol
|
|
46
|
+
) -> list[str] | list["ModelClient.ModelClientResponseProtocol.Choice.Message"]:
|
|
47
|
+
"""Retrieve and return a list of strings or a list of Choice.Message from the response.
|
|
48
|
+
|
|
49
|
+
NOTE: if a list of Choice.Message is returned, it currently needs to contain the fields of OpenAI's ChatCompletion Message object,
|
|
50
|
+
since that is expected for function or tool calling in the rest of the codebase at the moment, unless a custom agent is being used.
|
|
51
|
+
"""
|
|
52
|
+
... # pragma: no cover
|
|
53
|
+
|
|
54
|
+
def cost(self, response: ModelClientResponseProtocol) -> float: ... # pragma: no cover
|
|
55
|
+
|
|
56
|
+
@staticmethod
|
|
57
|
+
def get_usage(response: ModelClientResponseProtocol) -> dict[str, Any]:
|
|
58
|
+
"""Return usage summary of the response using RESPONSE_USAGE_KEYS."""
|
|
59
|
+
... # pragma: no cover
|
|
@@ -0,0 +1,461 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import functools
|
|
6
|
+
import json
|
|
7
|
+
import re
|
|
8
|
+
import warnings
|
|
9
|
+
from collections.abc import Iterable
|
|
10
|
+
from contextvars import ContextVar
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Annotated, Any, Literal, TypeAlias
|
|
13
|
+
|
|
14
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
15
|
+
from typing_extensions import Self, deprecated
|
|
16
|
+
|
|
17
|
+
from autogen.doc_utils import export_module
|
|
18
|
+
|
|
19
|
+
from .entry import ApplicationConfig, LLMConfigEntry
|
|
20
|
+
from .types import ConfigEntries
|
|
21
|
+
from .utils import config_list_from_json, filter_config
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# Meta class to allow LLMConfig.current and LLMConfig.default to be used as class properties
|
|
25
|
+
class MetaLLMConfig(type):
|
|
26
|
+
def __init__(cls, *args: Any, **kwargs: Any) -> None:
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
@deprecated(
|
|
31
|
+
"`LLMConfig.current / .default` properties are deprecated. "
|
|
32
|
+
"Pass config object to usage explicitly instead. "
|
|
33
|
+
"Scheduled for removal in 0.11.0 version."
|
|
34
|
+
)
|
|
35
|
+
def current(cls) -> "LLMConfig":
|
|
36
|
+
current_llm_config = LLMConfig.get_current_llm_config(llm_config=None)
|
|
37
|
+
if current_llm_config is None:
|
|
38
|
+
raise ValueError("No current LLMConfig set. Are you inside a context block?")
|
|
39
|
+
return current_llm_config # type: ignore[return-value]
|
|
40
|
+
|
|
41
|
+
@property
|
|
42
|
+
@deprecated(
|
|
43
|
+
"`LLMConfig.current / .default` properties are deprecated. "
|
|
44
|
+
"Pass config object to usage explicitly instead. "
|
|
45
|
+
"Scheduled for removal in 0.11.0 version."
|
|
46
|
+
)
|
|
47
|
+
def default(cls) -> "LLMConfig":
|
|
48
|
+
return cls.current
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
ConfigItem: TypeAlias = LLMConfigEntry | ConfigEntries | dict[str, Any]
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@export_module("autogen")
|
|
55
|
+
class LLMConfig(metaclass=MetaLLMConfig):
|
|
56
|
+
_current_llm_config: ContextVar["LLMConfig"] = ContextVar("current_llm_config")
|
|
57
|
+
config_list: list[ConfigEntries]
|
|
58
|
+
|
|
59
|
+
def __init__(
|
|
60
|
+
self,
|
|
61
|
+
*configs: ConfigItem,
|
|
62
|
+
top_p: float | None = None,
|
|
63
|
+
temperature: float | None = None,
|
|
64
|
+
max_tokens: int | None = None,
|
|
65
|
+
check_every_ms: int | None = None,
|
|
66
|
+
allow_format_str_template: bool | None = None,
|
|
67
|
+
response_format: str | dict[str, Any] | BaseModel | type[BaseModel] | None = None,
|
|
68
|
+
timeout: int | None = None,
|
|
69
|
+
seed: int | None = None,
|
|
70
|
+
cache_seed: int | None = None,
|
|
71
|
+
parallel_tool_calls: bool | None = None,
|
|
72
|
+
tools: Iterable[Any] = (),
|
|
73
|
+
functions: Iterable[Any] = (),
|
|
74
|
+
routing_method: Literal["fixed_order", "round_robin"] | None = None,
|
|
75
|
+
config_list: Annotated[
|
|
76
|
+
Iterable[ConfigItem] | dict[str, Any],
|
|
77
|
+
deprecated(
|
|
78
|
+
"`LLMConfig(config_list=[{'model': ..., 'api_key': ...}, ...])` syntax is deprecated. "
|
|
79
|
+
"Use `LLMConfig({'api_key': ..., 'model': ...}, ...)` instead. "
|
|
80
|
+
"Scheduled for removal in 0.11.0 version."
|
|
81
|
+
),
|
|
82
|
+
] = (),
|
|
83
|
+
**kwargs: Annotated[
|
|
84
|
+
Any,
|
|
85
|
+
deprecated(
|
|
86
|
+
"`LLMConfig(api_key=..., model=...)` syntax is deprecated. "
|
|
87
|
+
"Use `LLMConfig({'api_key': ..., 'model': ...})` instead. "
|
|
88
|
+
"Scheduled for removal in 0.11.0 version."
|
|
89
|
+
),
|
|
90
|
+
],
|
|
91
|
+
) -> None:
|
|
92
|
+
r"""Initializes the LLMConfig object.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
*configs: A list of LLM configuration entries or dictionaries.
|
|
96
|
+
config_list: A list of LLM configuration entries or dictionaries.
|
|
97
|
+
temperature: The sampling temperature for LLM generation.
|
|
98
|
+
check_every_ms: The interval (in milliseconds) to check for updates
|
|
99
|
+
allow_format_str_template: Whether to allow format string templates.
|
|
100
|
+
response_format: The format of the response (e.g., JSON, text).
|
|
101
|
+
timeout: The timeout for LLM requests in seconds.
|
|
102
|
+
seed: The random seed for reproducible results.
|
|
103
|
+
cache_seed: The seed for caching LLM responses.
|
|
104
|
+
parallel_tool_calls: Whether to enable parallel tool calls.
|
|
105
|
+
tools: A list of tools available for the LLM.
|
|
106
|
+
functions: A list of functions available for the LLM.
|
|
107
|
+
max_tokens: The maximum number of tokens to generate.
|
|
108
|
+
top_p: The nucleus sampling probability.
|
|
109
|
+
routing_method: The method used to route requests (e.g., fixed_order, round_robin).
|
|
110
|
+
**kwargs: Additional keyword arguments for\ future extensions.
|
|
111
|
+
|
|
112
|
+
Examples:
|
|
113
|
+
```python
|
|
114
|
+
# Example 1: create config from one model dictionary
|
|
115
|
+
config = LLMConfig({
|
|
116
|
+
"model": "gpt-5-mini",
|
|
117
|
+
"api_key": os.environ["OPENAI_API_KEY"],
|
|
118
|
+
})
|
|
119
|
+
|
|
120
|
+
# Example 2: create config from list of dictionaries
|
|
121
|
+
config = LLMConfig(
|
|
122
|
+
{
|
|
123
|
+
"model": "gpt-5-mini",
|
|
124
|
+
"api_key": os.environ["OPENAI_API_KEY"],
|
|
125
|
+
},
|
|
126
|
+
{
|
|
127
|
+
"model": "gpt-4",
|
|
128
|
+
"api_key": os.environ["OPENAI_API_KEY"],
|
|
129
|
+
},
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# Example 3 (deprecated): create config from `kwargs` options
|
|
133
|
+
config = LLMConfig(
|
|
134
|
+
model="gpt-5-mini",
|
|
135
|
+
api_key=os.environ["OPENAI_API_KEY"],
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
# Example 4 (deprecated): create config from `config_list` dictionary
|
|
139
|
+
config = LLMConfig(
|
|
140
|
+
config_list={
|
|
141
|
+
"model": "gpt-5-mini",
|
|
142
|
+
"api_key": os.environ["OPENAI_API_KEY"],
|
|
143
|
+
}
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
# Example 5 (deprecated): create config from `config_list` list
|
|
147
|
+
config = LLMConfig(
|
|
148
|
+
config_list=[
|
|
149
|
+
{
|
|
150
|
+
"model": "gpt-5-mini",
|
|
151
|
+
"api_key": os.environ["OPENAI_API_KEY"],
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
"model": "gpt-5",
|
|
155
|
+
"api_key": os.environ["OPENAI_API_KEY"],
|
|
156
|
+
},
|
|
157
|
+
]
|
|
158
|
+
)
|
|
159
|
+
```
|
|
160
|
+
"""
|
|
161
|
+
if isinstance(config_list, dict):
|
|
162
|
+
config_list = [config_list]
|
|
163
|
+
|
|
164
|
+
if kwargs:
|
|
165
|
+
warnings.warn(
|
|
166
|
+
(
|
|
167
|
+
"`LLMConfig(api_key=..., model=...)` syntax is deprecated. "
|
|
168
|
+
"Use `LLMConfig({'api_key': ..., 'model': ...})` instead. "
|
|
169
|
+
"Scheduled for removal in 0.11.0 version."
|
|
170
|
+
),
|
|
171
|
+
DeprecationWarning,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
if config_list:
|
|
175
|
+
warnings.warn(
|
|
176
|
+
(
|
|
177
|
+
"`LLMConfig(config_list=[{'model': ..., 'api_key': ...}, ...])` syntax is deprecated. "
|
|
178
|
+
"Use `LLMConfig({'api_key': ..., 'model': ...}, ...)` instead. "
|
|
179
|
+
"Scheduled for removal in 0.11.0 version."
|
|
180
|
+
),
|
|
181
|
+
DeprecationWarning,
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
app_config = ApplicationConfig(
|
|
185
|
+
max_tokens=max_tokens,
|
|
186
|
+
top_p=top_p,
|
|
187
|
+
temperature=temperature,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
application_level_options = app_config.model_dump(exclude_none=True)
|
|
191
|
+
|
|
192
|
+
final_config_list: list[LLMConfigEntry | dict[str, Any]] = []
|
|
193
|
+
for c in filter(bool, (*configs, *config_list, kwargs)):
|
|
194
|
+
if isinstance(c, LLMConfigEntry):
|
|
195
|
+
final_config_list.append(c.apply_application_config(app_config))
|
|
196
|
+
continue
|
|
197
|
+
|
|
198
|
+
else:
|
|
199
|
+
final_config_list.append({
|
|
200
|
+
"api_type": "openai", # default api_type
|
|
201
|
+
**application_level_options,
|
|
202
|
+
**c,
|
|
203
|
+
})
|
|
204
|
+
|
|
205
|
+
self._model = _LLMConfig(
|
|
206
|
+
**application_level_options,
|
|
207
|
+
config_list=final_config_list,
|
|
208
|
+
check_every_ms=check_every_ms,
|
|
209
|
+
seed=seed,
|
|
210
|
+
allow_format_str_template=allow_format_str_template,
|
|
211
|
+
response_format=response_format,
|
|
212
|
+
timeout=timeout,
|
|
213
|
+
cache_seed=cache_seed,
|
|
214
|
+
tools=tools or [],
|
|
215
|
+
functions=functions or [],
|
|
216
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
217
|
+
routing_method=routing_method,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
@classmethod
|
|
221
|
+
def ensure_config(cls, config: "LLMConfig | ConfigItem | Iterable[ConfigItem]", /) -> "LLMConfig":
|
|
222
|
+
"""Transforms passed objects to LLMConfig object.
|
|
223
|
+
|
|
224
|
+
Method to use for `Agent(llm_config={...})` cases.
|
|
225
|
+
|
|
226
|
+
>>> LLMConfig.ensure_config(LLMConfig(...))
|
|
227
|
+
LLMConfig(...)
|
|
228
|
+
|
|
229
|
+
>>> LLMConfig.ensure_config(LLMConfigEntry(...))
|
|
230
|
+
LLMConfig(LLMConfigEntry(...))
|
|
231
|
+
|
|
232
|
+
>>> LLMConfig.ensure_config({"model": "gpt-o3"})
|
|
233
|
+
LLMConfig(OpenAILLMConfigEntry(model="o3"))
|
|
234
|
+
|
|
235
|
+
>>> LLMConfig.ensure_config([{"model": "gpt-o3"}, ...])
|
|
236
|
+
LLMConfig(OpenAILLMConfigEntry(model="o3"), ...)
|
|
237
|
+
|
|
238
|
+
>>> (deprecated) LLMConfig.ensure_config({"config_list": [{ "model": "gpt-o3" }, ...]})
|
|
239
|
+
LLMConfig(OpenAILLMConfigEntry(model="o3"), ...)
|
|
240
|
+
"""
|
|
241
|
+
if isinstance(config, LLMConfig):
|
|
242
|
+
return config.copy()
|
|
243
|
+
|
|
244
|
+
if isinstance(config, LLMConfigEntry):
|
|
245
|
+
return LLMConfig(config)
|
|
246
|
+
|
|
247
|
+
if isinstance(config, dict):
|
|
248
|
+
if "config_list" in config: # backport compatibility
|
|
249
|
+
return LLMConfig(**config)
|
|
250
|
+
return LLMConfig(config)
|
|
251
|
+
|
|
252
|
+
return LLMConfig(*config)
|
|
253
|
+
|
|
254
|
+
@deprecated(
|
|
255
|
+
"`with llm_config: ...` context manager is deprecated. "
|
|
256
|
+
"Pass config object to usage explicitly instead. "
|
|
257
|
+
"Scheduled for removal in 0.11.0 version."
|
|
258
|
+
)
|
|
259
|
+
def __enter__(self) -> "LLMConfig":
|
|
260
|
+
warnings.warn(
|
|
261
|
+
(
|
|
262
|
+
"`with llm_config: ...` context manager is deprecated. "
|
|
263
|
+
"Pass config object to usage explicitly instead. "
|
|
264
|
+
"Scheduled for removal in 0.11.0 version."
|
|
265
|
+
),
|
|
266
|
+
DeprecationWarning,
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
self._token = LLMConfig._current_llm_config.set(self)
|
|
270
|
+
return self
|
|
271
|
+
|
|
272
|
+
def __exit__(self, exc_type: type[Exception], exc_val: Exception, exc_tb: Any) -> None:
|
|
273
|
+
LLMConfig._current_llm_config.reset(self._token)
|
|
274
|
+
|
|
275
|
+
@classmethod
|
|
276
|
+
@deprecated(
|
|
277
|
+
"`LLMConfig.current / .default` properties are deprecated. "
|
|
278
|
+
"Pass config object to usage explicitly instead. "
|
|
279
|
+
"Scheduled for removal in 0.11.0 version."
|
|
280
|
+
)
|
|
281
|
+
def get_current_llm_config(cls, llm_config: "LLMConfig | None" = None) -> "LLMConfig | None":
|
|
282
|
+
warnings.warn(
|
|
283
|
+
(
|
|
284
|
+
"`LLMConfig.current / .default` properties are deprecated. "
|
|
285
|
+
"Pass config object to usage explicitly instead. "
|
|
286
|
+
"Scheduled for removal in 0.11.0 version."
|
|
287
|
+
),
|
|
288
|
+
DeprecationWarning,
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
if llm_config is not None:
|
|
292
|
+
return llm_config
|
|
293
|
+
try:
|
|
294
|
+
return (LLMConfig._current_llm_config.get()).copy()
|
|
295
|
+
except LookupError:
|
|
296
|
+
return None
|
|
297
|
+
|
|
298
|
+
@classmethod
|
|
299
|
+
def from_json(
|
|
300
|
+
cls,
|
|
301
|
+
*,
|
|
302
|
+
env: str | None = None,
|
|
303
|
+
path: str | Path | None = None,
|
|
304
|
+
file_location: str | None = None,
|
|
305
|
+
filter_dict: dict[str, list[str | None] | set[str | None]] | None = None,
|
|
306
|
+
**kwargs: Any,
|
|
307
|
+
) -> Self:
|
|
308
|
+
if env is None and path is None:
|
|
309
|
+
raise ValueError("Either 'env' or 'path' must be provided")
|
|
310
|
+
|
|
311
|
+
if env is not None and path is not None:
|
|
312
|
+
raise ValueError("Only one of 'env' or 'path' can be provided")
|
|
313
|
+
|
|
314
|
+
config_list = config_list_from_json(
|
|
315
|
+
env_or_file=env if env is not None else str(path),
|
|
316
|
+
file_location=file_location,
|
|
317
|
+
filter_dict=filter_dict,
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
return cls(*config_list, **kwargs)
|
|
321
|
+
|
|
322
|
+
def where(self, *, exclude: bool = False, **kwargs: Any) -> "LLMConfig":
|
|
323
|
+
filtered_config_list = filter_config(
|
|
324
|
+
config_list=[c.model_dump() for c in self.config_list],
|
|
325
|
+
filter_dict=kwargs,
|
|
326
|
+
exclude=exclude,
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
if len(filtered_config_list) == 0:
|
|
330
|
+
raise ValueError(f"No config found that satisfies the filter criteria: {kwargs}")
|
|
331
|
+
|
|
332
|
+
kwargs = self.model_dump()
|
|
333
|
+
del kwargs["config_list"]
|
|
334
|
+
|
|
335
|
+
return LLMConfig(*filtered_config_list, **kwargs)
|
|
336
|
+
|
|
337
|
+
def model_dump(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> dict[str, Any]:
|
|
338
|
+
d = self._model.model_dump(*args, exclude_none=exclude_none, **kwargs)
|
|
339
|
+
return {k: v for k, v in d.items() if not (isinstance(v, list) and len(v) == 0)}
|
|
340
|
+
|
|
341
|
+
def model_dump_json(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> str:
|
|
342
|
+
# return self._model.model_dump_json(*args, exclude_none=exclude_none, **kwargs)
|
|
343
|
+
d = self.model_dump(*args, exclude_none=exclude_none, **kwargs)
|
|
344
|
+
return json.dumps(d)
|
|
345
|
+
|
|
346
|
+
def model_validate(self, *args: Any, **kwargs: Any) -> Any:
|
|
347
|
+
return self._model.model_validate(*args, **kwargs)
|
|
348
|
+
|
|
349
|
+
@functools.wraps(BaseModel.model_validate_json)
|
|
350
|
+
def model_validate_json(self, *args: Any, **kwargs: Any) -> Any:
|
|
351
|
+
return self._model.model_validate_json(*args, **kwargs)
|
|
352
|
+
|
|
353
|
+
@functools.wraps(BaseModel.model_validate_strings)
|
|
354
|
+
def model_validate_strings(self, *args: Any, **kwargs: Any) -> Any:
|
|
355
|
+
return self._model.model_validate_strings(*args, **kwargs)
|
|
356
|
+
|
|
357
|
+
def __eq__(self, value: Any) -> bool:
|
|
358
|
+
if not isinstance(value, LLMConfig):
|
|
359
|
+
return NotImplemented
|
|
360
|
+
return self._model == value._model
|
|
361
|
+
|
|
362
|
+
def _getattr(self, o: object, name: str) -> Any:
|
|
363
|
+
val = getattr(o, name)
|
|
364
|
+
return val
|
|
365
|
+
|
|
366
|
+
def get(self, key: str, default: Any | None = None) -> Any:
|
|
367
|
+
val = getattr(self._model, key, default)
|
|
368
|
+
return val
|
|
369
|
+
|
|
370
|
+
def __getitem__(self, key: str) -> Any:
|
|
371
|
+
try:
|
|
372
|
+
return self._getattr(self._model, key)
|
|
373
|
+
except AttributeError:
|
|
374
|
+
raise KeyError(f"Key '{key}' not found in {self.__class__.__name__}")
|
|
375
|
+
|
|
376
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
377
|
+
try:
|
|
378
|
+
setattr(self._model, key, value)
|
|
379
|
+
except ValueError:
|
|
380
|
+
raise ValueError(f"'{self.__class__.__name__}' object has no field '{key}'")
|
|
381
|
+
|
|
382
|
+
def __getattr__(self, name: Any) -> Any:
|
|
383
|
+
try:
|
|
384
|
+
return self._getattr(self._model, name)
|
|
385
|
+
except AttributeError:
|
|
386
|
+
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
|
|
387
|
+
|
|
388
|
+
def __setattr__(self, name: str, value: Any) -> None:
|
|
389
|
+
if name == "_model":
|
|
390
|
+
object.__setattr__(self, name, value)
|
|
391
|
+
else:
|
|
392
|
+
setattr(self._model, name, value)
|
|
393
|
+
|
|
394
|
+
def __contains__(self, key: str) -> bool:
|
|
395
|
+
return hasattr(self._model, key)
|
|
396
|
+
|
|
397
|
+
def __repr__(self) -> str:
|
|
398
|
+
d = self.model_dump()
|
|
399
|
+
r = [f"{k}={repr(v)}" for k, v in d.items()]
|
|
400
|
+
|
|
401
|
+
s = f"LLMConfig({', '.join(r)})"
|
|
402
|
+
# Replace any keys ending with 'key' or 'token' values with stars for security
|
|
403
|
+
s = re.sub(
|
|
404
|
+
r"(['\"])(\w*(key|token))\1:\s*(['\"])([^'\"]*)(?:\4)", r"\1\2\1: \4**********\4", s, flags=re.IGNORECASE
|
|
405
|
+
)
|
|
406
|
+
return s
|
|
407
|
+
|
|
408
|
+
def __copy__(self) -> "LLMConfig":
|
|
409
|
+
options = self._model.model_dump(exclude={"config_list"})
|
|
410
|
+
return LLMConfig(*self._model.config_list, **options)
|
|
411
|
+
|
|
412
|
+
def __deepcopy__(self, memo: dict[int, Any] | None = None) -> "LLMConfig":
|
|
413
|
+
return self.__copy__()
|
|
414
|
+
|
|
415
|
+
def copy(self) -> "LLMConfig":
|
|
416
|
+
return self.__copy__()
|
|
417
|
+
|
|
418
|
+
def deepcopy(self, memo: dict[int, Any] | None = None) -> "LLMConfig":
|
|
419
|
+
return self.__deepcopy__(memo)
|
|
420
|
+
|
|
421
|
+
def __str__(self) -> str:
|
|
422
|
+
return repr(self)
|
|
423
|
+
|
|
424
|
+
def items(self) -> Iterable[tuple[str, Any]]:
|
|
425
|
+
d = self.model_dump()
|
|
426
|
+
return d.items()
|
|
427
|
+
|
|
428
|
+
def keys(self) -> Iterable[str]:
|
|
429
|
+
d = self.model_dump()
|
|
430
|
+
return d.keys()
|
|
431
|
+
|
|
432
|
+
def values(self) -> Iterable[Any]:
|
|
433
|
+
d = self.model_dump()
|
|
434
|
+
return d.values()
|
|
435
|
+
|
|
436
|
+
_base_model_classes: dict[tuple[type["LLMConfigEntry"], ...], type[BaseModel]] = {}
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
class _LLMConfig(ApplicationConfig):
|
|
440
|
+
check_every_ms: int | None
|
|
441
|
+
seed: int | None
|
|
442
|
+
allow_format_str_template: bool | None
|
|
443
|
+
response_format: str | dict[str, Any] | BaseModel | type[BaseModel] | None
|
|
444
|
+
timeout: int | None
|
|
445
|
+
cache_seed: int | None
|
|
446
|
+
parallel_tool_calls: bool | None
|
|
447
|
+
|
|
448
|
+
tools: list[Any]
|
|
449
|
+
functions: list[Any]
|
|
450
|
+
|
|
451
|
+
config_list: list[
|
|
452
|
+
Annotated[
|
|
453
|
+
ConfigEntries,
|
|
454
|
+
Field(discriminator="api_type"),
|
|
455
|
+
],
|
|
456
|
+
] = Field(..., min_length=1)
|
|
457
|
+
|
|
458
|
+
routing_method: Literal["fixed_order", "round_robin"] | None
|
|
459
|
+
|
|
460
|
+
# Following field is configuration for pydantic to disallow extra fields
|
|
461
|
+
model_config = ConfigDict(extra="forbid")
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from collections.abc import Iterable, Mapping
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from httpx import Client as httpxClient
|
|
11
|
+
from pydantic import BaseModel, ConfigDict, Field, HttpUrl, SecretStr, ValidationInfo, field_serializer, field_validator
|
|
12
|
+
from typing_extensions import Required, Self, TypedDict
|
|
13
|
+
|
|
14
|
+
from .client import ModelClient
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class LLMConfigEntryDict(TypedDict, total=False):
|
|
18
|
+
api_type: Required[str]
|
|
19
|
+
model: str
|
|
20
|
+
max_tokens: int | None
|
|
21
|
+
top_p: float | None
|
|
22
|
+
temperature: float | None
|
|
23
|
+
|
|
24
|
+
api_key: SecretStr | str | None
|
|
25
|
+
api_version: str | None
|
|
26
|
+
base_url: HttpUrl | str | None
|
|
27
|
+
voice: str | None
|
|
28
|
+
http_client: httpxClient | None
|
|
29
|
+
model_client_cls: str | None
|
|
30
|
+
response_format: str | dict[str, Any] | BaseModel | type[BaseModel] | None
|
|
31
|
+
default_headers: Mapping[str, Any] | None
|
|
32
|
+
tags: list[str]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class ApplicationConfig(BaseModel):
|
|
36
|
+
max_tokens: int | None = Field(
|
|
37
|
+
default=None,
|
|
38
|
+
ge=0,
|
|
39
|
+
description="The maximum number of tokens to generate before stopping.",
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
top_p: float | None = Field(
|
|
43
|
+
default=None,
|
|
44
|
+
ge=0,
|
|
45
|
+
le=1,
|
|
46
|
+
description=(
|
|
47
|
+
"An alternative to sampling with temperature, called nucleus sampling, "
|
|
48
|
+
"where the model considers the results of the tokens with top_p probability mass."
|
|
49
|
+
"So 0.1 means only the tokens comprising the top 10% probability mass are considered."
|
|
50
|
+
"You should either alter `temperature` or `top_p`, but not both."
|
|
51
|
+
),
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
temperature: float | None = Field(
|
|
55
|
+
default=None,
|
|
56
|
+
ge=0,
|
|
57
|
+
le=1,
|
|
58
|
+
description=(
|
|
59
|
+
"Amount of randomness injected into the response. "
|
|
60
|
+
"Use `temperature` closer to `0.0` for analytical / multiple choice, and closer to a model's "
|
|
61
|
+
"maximum `temperature` for creative and generative tasks. "
|
|
62
|
+
"Note that even with `temperature` of `0.0`, the results will not be fully deterministic."
|
|
63
|
+
),
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class LLMConfigEntry(ApplicationConfig, ABC):
|
|
68
|
+
api_type: str
|
|
69
|
+
model: str = Field(..., min_length=1)
|
|
70
|
+
|
|
71
|
+
api_key: SecretStr | None = None
|
|
72
|
+
api_version: str | None = None
|
|
73
|
+
|
|
74
|
+
base_url: HttpUrl | None = None
|
|
75
|
+
http_client: httpxClient | None = None
|
|
76
|
+
|
|
77
|
+
voice: str | None = None
|
|
78
|
+
model_client_cls: str | None = None
|
|
79
|
+
response_format: str | dict[str, Any] | BaseModel | type[BaseModel] | None = None
|
|
80
|
+
default_headers: Mapping[str, Any] | None = None
|
|
81
|
+
tags: list[str] = Field(default_factory=list)
|
|
82
|
+
|
|
83
|
+
# Following field is configuration for pydantic to disallow extra fields
|
|
84
|
+
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
|
|
85
|
+
|
|
86
|
+
def apply_application_config(self, application_config: ApplicationConfig) -> Self:
|
|
87
|
+
"""Apply application level configurations."""
|
|
88
|
+
new_entry = self.model_copy()
|
|
89
|
+
new_entry.max_tokens = new_entry.max_tokens or application_config.max_tokens
|
|
90
|
+
new_entry.top_p = new_entry.top_p or application_config.top_p
|
|
91
|
+
new_entry.temperature = new_entry.temperature or application_config.temperature
|
|
92
|
+
return new_entry
|
|
93
|
+
|
|
94
|
+
@abstractmethod
|
|
95
|
+
def create_client(self) -> "ModelClient": ...
|
|
96
|
+
|
|
97
|
+
@field_validator("base_url", mode="before")
|
|
98
|
+
@classmethod
|
|
99
|
+
def check_base_url(cls, v: HttpUrl | str | None, info: ValidationInfo) -> str | None:
|
|
100
|
+
if v is None: # Handle None case explicitly
|
|
101
|
+
return None
|
|
102
|
+
# Allow WebSocket URLs as well as HTTP(S)
|
|
103
|
+
if not str(v).startswith(("https://", "http://", "wss://", "ws://")):
|
|
104
|
+
return f"http://{str(v)}"
|
|
105
|
+
return str(v)
|
|
106
|
+
|
|
107
|
+
@field_serializer("base_url", when_used="unless-none") # Ensure serializer also respects None
|
|
108
|
+
def serialize_base_url(self, v: HttpUrl | None) -> str | None:
|
|
109
|
+
return str(v) if v is not None else None
|
|
110
|
+
|
|
111
|
+
@field_serializer("api_key", when_used="unless-none")
|
|
112
|
+
def serialize_api_key(self, v: SecretStr) -> str:
|
|
113
|
+
return v.get_secret_value()
|
|
114
|
+
|
|
115
|
+
def model_dump(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> dict[str, Any]:
|
|
116
|
+
return BaseModel.model_dump(self, exclude_none=exclude_none, *args, **kwargs)
|
|
117
|
+
|
|
118
|
+
def model_dump_json(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> str:
|
|
119
|
+
return BaseModel.model_dump_json(self, exclude_none=exclude_none, *args, **kwargs)
|
|
120
|
+
|
|
121
|
+
def get(self, key: str, default: Any | None = None) -> Any:
|
|
122
|
+
val = getattr(self, key, default)
|
|
123
|
+
if isinstance(val, SecretStr):
|
|
124
|
+
return val.get_secret_value()
|
|
125
|
+
return val
|
|
126
|
+
|
|
127
|
+
def __getitem__(self, key: str) -> Any:
|
|
128
|
+
try:
|
|
129
|
+
val = getattr(self, key)
|
|
130
|
+
if isinstance(val, SecretStr):
|
|
131
|
+
return val.get_secret_value()
|
|
132
|
+
return val
|
|
133
|
+
except AttributeError:
|
|
134
|
+
raise KeyError(f"Key '{key}' not found in {self.__class__.__name__}")
|
|
135
|
+
|
|
136
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
137
|
+
setattr(self, key, value)
|
|
138
|
+
|
|
139
|
+
def __contains__(self, key: str) -> bool:
|
|
140
|
+
return hasattr(self, key)
|
|
141
|
+
|
|
142
|
+
def items(self) -> Iterable[tuple[str, Any]]:
|
|
143
|
+
d = self.model_dump()
|
|
144
|
+
return d.items()
|
|
145
|
+
|
|
146
|
+
def keys(self) -> Iterable[str]:
|
|
147
|
+
d = self.model_dump()
|
|
148
|
+
return d.keys()
|
|
149
|
+
|
|
150
|
+
def values(self) -> Iterable[Any]:
|
|
151
|
+
d = self.model_dump()
|
|
152
|
+
return d.values()
|
|
153
|
+
|
|
154
|
+
def __repr__(self) -> str:
|
|
155
|
+
# Override to eliminate none values from the repr
|
|
156
|
+
d = self.model_dump()
|
|
157
|
+
r = [f"{k}={repr(v)}" for k, v in d.items()]
|
|
158
|
+
|
|
159
|
+
s = f"{self.__class__.__name__}({', '.join(r)})"
|
|
160
|
+
|
|
161
|
+
# Replace any keys ending with '_key' or '_token' values with stars for security
|
|
162
|
+
# This regex will match any key ending with '_key' or '_token' and its value, and replace the value with stars
|
|
163
|
+
# It also captures the type of quote used (single or double) and reuses it in the replacement
|
|
164
|
+
s = re.sub(r'(\w+_(key|token)\s*=\s*)([\'"]).*?\3', r"\1\3**********\3", s, flags=re.IGNORECASE)
|
|
165
|
+
|
|
166
|
+
return s
|
|
167
|
+
|
|
168
|
+
def __str__(self) -> str:
|
|
169
|
+
return repr(self)
|