ag2 0.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ag2-0.10.2.dist-info/METADATA +819 -0
- ag2-0.10.2.dist-info/RECORD +423 -0
- ag2-0.10.2.dist-info/WHEEL +4 -0
- ag2-0.10.2.dist-info/licenses/LICENSE +201 -0
- ag2-0.10.2.dist-info/licenses/NOTICE.md +19 -0
- autogen/__init__.py +88 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +426 -0
- autogen/_website/generate_mkdocs.py +1216 -0
- autogen/_website/notebook_processor.py +475 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +413 -0
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +86 -0
- autogen/a2a/client.py +357 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +207 -0
- autogen/agentchat/__init__.py +47 -0
- autogen/agentchat/agent.py +180 -0
- autogen/agentchat/assistant_agent.py +86 -0
- autogen/agentchat/chat.py +325 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +432 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +578 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +215 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +514 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +334 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +167 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +263 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +189 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +268 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +195 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +319 -0
- autogen/agentchat/contrib/rag/query_engine.py +76 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +59 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +704 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +200 -0
- autogen/agentchat/contrib/swarm_agent.py +1404 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +224 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +316 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +405 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +551 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +927 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +304 -0
- autogen/agentchat/conversable_agent.py +4307 -0
- autogen/agentchat/group/__init__.py +67 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +39 -0
- autogen/agentchat/group/context_variables.py +182 -0
- autogen/agentchat/group/events/transition_events.py +111 -0
- autogen/agentchat/group/group_tool_executor.py +324 -0
- autogen/agentchat/group/group_utils.py +659 -0
- autogen/agentchat/group/guardrails.py +179 -0
- autogen/agentchat/group/handoffs.py +303 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +291 -0
- autogen/agentchat/group/on_condition.py +55 -0
- autogen/agentchat/group/on_context_condition.py +51 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +160 -0
- autogen/agentchat/group/patterns/manual.py +177 -0
- autogen/agentchat/group/patterns/pattern.py +295 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +24 -0
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +140 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/function_target.py +245 -0
- autogen/agentchat/group/targets/group_chat_target.py +133 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +424 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1832 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +191 -0
- autogen/agentchat/realtime/experimental/function_observer.py +84 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +533 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +114 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +74 -0
- autogen/agents/contrib/time/time_tool_agent.py +52 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +301 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +113 -0
- autogen/agents/experimental/document_agent/document_agent.py +643 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +376 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +214 -0
- autogen/agents/experimental/document_agent/parser_utils.py +134 -0
- autogen/agents/experimental/document_agent/url_utils.py +417 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +76 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +70 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +88 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +71 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +97 -0
- autogen/cache/in_memory_cache.py +54 -0
- autogen/cache/redis_cache.py +119 -0
- autogen/code_utils.py +598 -0
- autogen/coding/__init__.py +30 -0
- autogen/coding/base.py +120 -0
- autogen/coding/docker_commandline_code_executor.py +283 -0
- autogen/coding/factory.py +56 -0
- autogen/coding/func_with_reqs.py +203 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +160 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +224 -0
- autogen/coding/jupyter/jupyter_code_executor.py +154 -0
- autogen/coding/jupyter/local_jupyter_server.py +164 -0
- autogen/coding/local_commandline_code_executor.py +341 -0
- autogen/coding/markdown_code_extractor.py +44 -0
- autogen/coding/utils.py +55 -0
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/doc_utils.py +35 -0
- autogen/environments/__init__.py +10 -0
- autogen/environments/docker_python_environment.py +365 -0
- autogen/environments/python_environment.py +125 -0
- autogen/environments/system_python_environment.py +85 -0
- autogen/environments/venv_python_environment.py +220 -0
- autogen/environments/working_directory.py +74 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1016 -0
- autogen/events/base_event.py +100 -0
- autogen/events/client_events.py +168 -0
- autogen/events/helpers.py +44 -0
- autogen/events/print_event.py +45 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +75 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +206 -0
- autogen/fast_depends/core/model.py +527 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +30 -0
- autogen/fast_depends/dependencies/provider.py +40 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +272 -0
- autogen/fast_depends/utils.py +177 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +173 -0
- autogen/import_utils.py +539 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +156 -0
- autogen/interop/langchain/langchain_tool.py +78 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +178 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +172 -0
- autogen/interop/registry.py +70 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +61 -0
- autogen/io/run_response.py +294 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +214 -0
- autogen/json_utils.py +42 -0
- autogen/llm_clients/MIGRATION_TO_V2.md +782 -0
- autogen/llm_clients/__init__.py +77 -0
- autogen/llm_clients/client_v2.py +122 -0
- autogen/llm_clients/models/__init__.py +55 -0
- autogen/llm_clients/models/content_blocks.py +389 -0
- autogen/llm_clients/models/unified_message.py +145 -0
- autogen/llm_clients/models/unified_response.py +83 -0
- autogen/llm_clients/openai_completions_client.py +444 -0
- autogen/llm_config/__init__.py +11 -0
- autogen/llm_config/client.py +59 -0
- autogen/llm_config/config.py +461 -0
- autogen/llm_config/entry.py +169 -0
- autogen/llm_config/types.py +37 -0
- autogen/llm_config/utils.py +223 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +129 -0
- autogen/logger/file_logger.py +262 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +524 -0
- autogen/math_utils.py +338 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/helpers.py +45 -0
- autogen/mcp/mcp_client.py +349 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +62 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +577 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +166 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +110 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +399 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +946 -0
- autogen/messages/base_message.py +108 -0
- autogen/messages/client_messages.py +172 -0
- autogen/messages/print_message.py +48 -0
- autogen/oai/__init__.py +61 -0
- autogen/oai/anthropic.py +1516 -0
- autogen/oai/bedrock.py +800 -0
- autogen/oai/cerebras.py +302 -0
- autogen/oai/client.py +1658 -0
- autogen/oai/client_utils.py +196 -0
- autogen/oai/cohere.py +494 -0
- autogen/oai/gemini.py +1045 -0
- autogen/oai/gemini_types.py +156 -0
- autogen/oai/groq.py +319 -0
- autogen/oai/mistral.py +311 -0
- autogen/oai/oai_models/__init__.py +23 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +86 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +97 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +60 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +62 -0
- autogen/oai/oai_models/completion_usage.py +59 -0
- autogen/oai/ollama.py +657 -0
- autogen/oai/openai_responses.py +451 -0
- autogen/oai/openai_utils.py +897 -0
- autogen/oai/together.py +387 -0
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +197 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/retrieve_utils.py +490 -0
- autogen/runtime_logging.py +161 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/token_count_utils.py +280 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +40 -0
- autogen/tools/dependency_injection.py +249 -0
- autogen/tools/experimental/__init__.py +54 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +154 -0
- autogen/tools/experimental/code_execution/__init__.py +7 -0
- autogen/tools/experimental/code_execution/python_code_execution.py +86 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +150 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +329 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +103 -0
- autogen/tools/experimental/firecrawl/__init__.py +7 -0
- autogen/tools/experimental/firecrawl/firecrawl_tool.py +836 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +284 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +385 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +271 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +249 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1311 -0
- autogen/tools/experimental/searxng/__init__.py +7 -0
- autogen/tools/experimental/searxng/searxng_search.py +142 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +176 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +120 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +284 -0
- autogen/tools/function_utils.py +412 -0
- autogen/tools/tool.py +188 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +72 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
|
@@ -0,0 +1,897 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
|
|
8
|
+
import importlib
|
|
9
|
+
import importlib.metadata
|
|
10
|
+
import json
|
|
11
|
+
import logging
|
|
12
|
+
import os
|
|
13
|
+
import re
|
|
14
|
+
import tempfile
|
|
15
|
+
import time
|
|
16
|
+
import warnings
|
|
17
|
+
from copy import deepcopy
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import TYPE_CHECKING, Any, Union
|
|
20
|
+
|
|
21
|
+
from dotenv import find_dotenv, load_dotenv
|
|
22
|
+
from packaging.version import parse
|
|
23
|
+
from pydantic_core import to_jsonable_python
|
|
24
|
+
from typing_extensions import deprecated
|
|
25
|
+
|
|
26
|
+
from ..llm_config.utils import config_list_from_json as latest_config_list_from_json
|
|
27
|
+
from ..llm_config.utils import filter_config as latest_filter
|
|
28
|
+
|
|
29
|
+
if TYPE_CHECKING:
|
|
30
|
+
from openai import OpenAI
|
|
31
|
+
from openai.types.beta.assistant import Assistant
|
|
32
|
+
|
|
33
|
+
from ..llm_config import LLMConfig
|
|
34
|
+
|
|
35
|
+
from ..doc_utils import export_module
|
|
36
|
+
|
|
37
|
+
NON_CACHE_KEY = [
|
|
38
|
+
"api_key",
|
|
39
|
+
"base_url",
|
|
40
|
+
"api_type",
|
|
41
|
+
"api_version",
|
|
42
|
+
"azure_ad_token",
|
|
43
|
+
"azure_ad_token_provider",
|
|
44
|
+
"credentials",
|
|
45
|
+
]
|
|
46
|
+
DEFAULT_AZURE_API_VERSION = "2024-02-01"
|
|
47
|
+
|
|
48
|
+
# The below pricing is for 1K tokens. Whenever there is an update in the LLM's pricing,
|
|
49
|
+
# Please convert it to 1K tokens and update in the below dictionary in the format: (input_token_price, output_token_price).
|
|
50
|
+
OAI_PRICE1K = {
|
|
51
|
+
# https://openai.com/api/pricing/
|
|
52
|
+
# o1
|
|
53
|
+
"o1-preview-2024-09-12": (0.0015, 0.0060),
|
|
54
|
+
"o1-preview": (0.0015, 0.0060),
|
|
55
|
+
"o1-mini-2024-09-12": (0.0003, 0.0012),
|
|
56
|
+
"o1-mini": (0.0003, 0.0012),
|
|
57
|
+
"o1": (0.0015, 0.0060),
|
|
58
|
+
"o1-2024-12-17": (0.0015, 0.0060),
|
|
59
|
+
# o1 pro
|
|
60
|
+
"o1-pro": (0.15, 0.6), # $150 / $600!
|
|
61
|
+
"o1-pro-2025-03-19": (0.15, 0.6),
|
|
62
|
+
# o3
|
|
63
|
+
"o3": (0.0011, 0.0044),
|
|
64
|
+
"o3-mini-2025-01-31": (0.0011, 0.0044),
|
|
65
|
+
# gpt-5.1
|
|
66
|
+
"gpt-5.1": (0.00125, 0.01),
|
|
67
|
+
"gpt-5.1-2025-11-13": (0.00125, 0.01),
|
|
68
|
+
"gpt-5.1-chat-latest": (0.00125, 0.01),
|
|
69
|
+
"gpt-5.1-codex": (0.00125, 0.01),
|
|
70
|
+
"gpt-5.1-codex-mini": (0.00025, 0.002),
|
|
71
|
+
# gpt-5
|
|
72
|
+
"gpt-5": (0.00125, 0.01),
|
|
73
|
+
"gpt-5-2025-08-07": (0.00125, 0.01),
|
|
74
|
+
"gpt-5-chat-latest": (0.00125, 0.01),
|
|
75
|
+
"gpt-5-codex": (0.00125, 0.01),
|
|
76
|
+
"gpt-5-search-api": (0.00125, 0.01),
|
|
77
|
+
# gpt-5-mini
|
|
78
|
+
"gpt-5-mini": (0.00025, 0.002),
|
|
79
|
+
"gpt-5-mini-2025-08-07": (0.00025, 0.002),
|
|
80
|
+
# gpt-5-nano
|
|
81
|
+
"gpt-5-nano": (0.00005, 0.0004),
|
|
82
|
+
"gpt-5-nano-2025-08-07": (0.00005, 0.0004),
|
|
83
|
+
# gpt-5-pro
|
|
84
|
+
"gpt-5-pro": (0.015, 0.12),
|
|
85
|
+
# codex
|
|
86
|
+
"codex-mini-latest": (0.0015, 0.006),
|
|
87
|
+
# gpt-4o
|
|
88
|
+
"gpt-4o": (0.005, 0.015),
|
|
89
|
+
"gpt-4o-2024-05-13": (0.005, 0.015),
|
|
90
|
+
"gpt-4o-2024-08-06": (0.0025, 0.01),
|
|
91
|
+
"gpt-4o-2024-11-20": (0.0025, 0.01),
|
|
92
|
+
# gpt-4o-mini
|
|
93
|
+
"gpt-4o-mini": (0.000150, 0.000600),
|
|
94
|
+
"gpt-4o-mini-2024-07-18": (0.000150, 0.000600),
|
|
95
|
+
# gpt-4-turbo
|
|
96
|
+
"gpt-4-turbo-2024-04-09": (0.01, 0.03),
|
|
97
|
+
# gpt-4
|
|
98
|
+
"gpt-4": (0.03, 0.06),
|
|
99
|
+
"gpt-4-32k": (0.06, 0.12),
|
|
100
|
+
# gpt-4.1
|
|
101
|
+
"gpt-4.1": (0.002, 0.008),
|
|
102
|
+
"gpt-4.1-2025-04-14": (0.002, 0.008),
|
|
103
|
+
# gpt-4.1 mini
|
|
104
|
+
"gpt-4.1-mini": (0.0004, 0.0016),
|
|
105
|
+
"gpt-4.1-mini-2025-04-14": (0.0004, 0.0016),
|
|
106
|
+
# gpt-4.1 nano
|
|
107
|
+
"gpt-4.1-nano": (0.0001, 0.0004),
|
|
108
|
+
"gpt-4.1-nano-2025-04-14": (0.0001, 0.0004),
|
|
109
|
+
# gpt-3.5 turbo
|
|
110
|
+
"gpt-3.5-turbo": (0.0005, 0.0015), # default is 0125
|
|
111
|
+
"gpt-3.5-turbo-0125": (0.0005, 0.0015), # 16k
|
|
112
|
+
"gpt-3.5-turbo-instruct": (0.0015, 0.002),
|
|
113
|
+
# base model
|
|
114
|
+
"davinci-002": 0.002,
|
|
115
|
+
"babbage-002": 0.0004,
|
|
116
|
+
# old model
|
|
117
|
+
"gpt-4-0125-preview": (0.01, 0.03),
|
|
118
|
+
"gpt-4-1106-preview": (0.01, 0.03),
|
|
119
|
+
"gpt-4-1106-vision-preview": (0.01, 0.03), # TODO: support vision pricing of images
|
|
120
|
+
"gpt-3.5-turbo-1106": (0.001, 0.002),
|
|
121
|
+
"gpt-3.5-turbo-0613": (0.0015, 0.002),
|
|
122
|
+
# "gpt-3.5-turbo-16k": (0.003, 0.004),
|
|
123
|
+
"gpt-3.5-turbo-16k-0613": (0.003, 0.004),
|
|
124
|
+
"gpt-3.5-turbo-0301": (0.0015, 0.002),
|
|
125
|
+
"text-ada-001": 0.0004,
|
|
126
|
+
"text-babbage-001": 0.0005,
|
|
127
|
+
"text-curie-001": 0.002,
|
|
128
|
+
"code-cushman-001": 0.024,
|
|
129
|
+
"code-davinci-002": 0.1,
|
|
130
|
+
"text-davinci-002": 0.02,
|
|
131
|
+
"text-davinci-003": 0.02,
|
|
132
|
+
"gpt-4-0314": (0.03, 0.06), # deprecate in Sep
|
|
133
|
+
"gpt-4-32k-0314": (0.06, 0.12), # deprecate in Sep
|
|
134
|
+
"gpt-4-0613": (0.03, 0.06),
|
|
135
|
+
"gpt-4-32k-0613": (0.06, 0.12),
|
|
136
|
+
"gpt-4-turbo-preview": (0.01, 0.03),
|
|
137
|
+
# https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/#pricing
|
|
138
|
+
"gpt-35-turbo": (0.0005, 0.0015), # what's the default? using 0125 here.
|
|
139
|
+
"gpt-35-turbo-0125": (0.0005, 0.0015),
|
|
140
|
+
"gpt-35-turbo-instruct": (0.0015, 0.002),
|
|
141
|
+
"gpt-35-turbo-1106": (0.001, 0.002),
|
|
142
|
+
"gpt-35-turbo-0613": (0.0015, 0.002),
|
|
143
|
+
"gpt-35-turbo-0301": (0.0015, 0.002),
|
|
144
|
+
"gpt-35-turbo-16k": (0.003, 0.004),
|
|
145
|
+
"gpt-35-turbo-16k-0613": (0.003, 0.004),
|
|
146
|
+
# deepseek
|
|
147
|
+
"deepseek-chat": (0.00027, 0.0011),
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def get_key(config: dict[str, Any]) -> str:
|
|
152
|
+
"""Get a unique identifier of a configuration.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
config (dict or list): A configuration.
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
tuple: A unique identifier which can be used as a key for a dict.
|
|
159
|
+
"""
|
|
160
|
+
copied = False
|
|
161
|
+
for key in NON_CACHE_KEY:
|
|
162
|
+
if key in config:
|
|
163
|
+
config, copied = config.copy() if not copied else config, True
|
|
164
|
+
config.pop(key)
|
|
165
|
+
return to_jsonable_python(config) # type: ignore [no-any-return]
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def is_valid_api_key(api_key: str) -> bool:
|
|
169
|
+
"""Determine if input is valid OpenAI API key. As of 2024-09-24 there's no official definition of the key structure
|
|
170
|
+
so we will allow anything starting with "sk-" and having at least 48 alphanumeric (plus underscore and dash) characters.
|
|
171
|
+
Keys are known to start with "sk-", "sk-proj", "sk-None", and "sk-svcaat"
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
api_key (str): An input string to be validated.
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
bool: A boolean that indicates if input is valid OpenAI API key.
|
|
178
|
+
"""
|
|
179
|
+
api_key_re = re.compile(r"^sk-[A-Za-z0-9_-]{48,}$")
|
|
180
|
+
return bool(re.fullmatch(api_key_re, api_key))
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
@export_module("autogen")
|
|
184
|
+
def get_config_list(
|
|
185
|
+
api_keys: list[str],
|
|
186
|
+
base_urls: list[str] | None = None,
|
|
187
|
+
api_type: str | None = None,
|
|
188
|
+
api_version: str | None = None,
|
|
189
|
+
) -> list[dict[str, Any]]:
|
|
190
|
+
"""Get a list of configs for OpenAI API client.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
api_keys (list): The api keys for openai api calls.
|
|
194
|
+
base_urls (list, optional): The api bases for openai api calls. If provided, should match the length of api_keys.
|
|
195
|
+
api_type (str, optional): The api type for openai api calls.
|
|
196
|
+
api_version (str, optional): The api version for openai api calls.
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
list: A list of configs for OepnAI API calls.
|
|
200
|
+
|
|
201
|
+
Example:
|
|
202
|
+
```python
|
|
203
|
+
# Define a list of API keys
|
|
204
|
+
api_keys = ["key1", "key2", "key3"]
|
|
205
|
+
|
|
206
|
+
# Optionally, define a list of base URLs corresponding to each API key
|
|
207
|
+
base_urls = ["https://api.service1.com", "https://api.service2.com", "https://api.service3.com"]
|
|
208
|
+
|
|
209
|
+
# Optionally, define the API type and version if they are common for all keys
|
|
210
|
+
api_type = "azure"
|
|
211
|
+
api_version = "2024-02-01"
|
|
212
|
+
|
|
213
|
+
# Call the get_config_list function to get a list of configuration dictionaries
|
|
214
|
+
config_list = get_config_list(api_keys, base_urls, api_type, api_version)
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
"""
|
|
218
|
+
if base_urls is not None:
|
|
219
|
+
assert len(api_keys) == len(base_urls), "The length of api_keys must match the length of base_urls"
|
|
220
|
+
config_list = []
|
|
221
|
+
for i, api_key in enumerate(api_keys):
|
|
222
|
+
if not api_key.strip():
|
|
223
|
+
continue
|
|
224
|
+
config = {"api_key": api_key}
|
|
225
|
+
if base_urls:
|
|
226
|
+
config["base_url"] = base_urls[i]
|
|
227
|
+
if api_type:
|
|
228
|
+
config["api_type"] = api_type
|
|
229
|
+
if api_version:
|
|
230
|
+
config["api_version"] = api_version
|
|
231
|
+
config_list.append(config)
|
|
232
|
+
return config_list
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
@export_module("autogen")
|
|
236
|
+
def get_first_llm_config(
|
|
237
|
+
llm_config: Union["LLMConfig", dict[str, Any]],
|
|
238
|
+
) -> dict[str, Any]:
|
|
239
|
+
"""Get the first LLM config from the given LLM config.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
llm_config (dict): The LLM config.
|
|
243
|
+
|
|
244
|
+
Returns:
|
|
245
|
+
dict: The first LLM config.
|
|
246
|
+
|
|
247
|
+
Raises:
|
|
248
|
+
ValueError: If the LLM config is invalid.
|
|
249
|
+
"""
|
|
250
|
+
llm_config = deepcopy(llm_config)
|
|
251
|
+
if "config_list" not in llm_config:
|
|
252
|
+
if "model" in llm_config:
|
|
253
|
+
return llm_config # type: ignore [return-value]
|
|
254
|
+
raise ValueError("llm_config must be a valid config dictionary.")
|
|
255
|
+
|
|
256
|
+
if len(llm_config["config_list"]) == 0:
|
|
257
|
+
raise ValueError("Config list must contain at least one config.")
|
|
258
|
+
|
|
259
|
+
to_return = llm_config["config_list"][0]
|
|
260
|
+
return to_return if isinstance(to_return, dict) else to_return.model_dump() # type: ignore [no-any-return]
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
@export_module("autogen")
|
|
264
|
+
def config_list_openai_aoai(
|
|
265
|
+
key_file_path: str | None = ".",
|
|
266
|
+
openai_api_key_file: str | None = "key_openai.txt",
|
|
267
|
+
aoai_api_key_file: str | None = "key_aoai.txt",
|
|
268
|
+
openai_api_base_file: str | None = "base_openai.txt",
|
|
269
|
+
aoai_api_base_file: str | None = "base_aoai.txt",
|
|
270
|
+
exclude: str | None = None,
|
|
271
|
+
) -> list[dict[str, Any]]:
|
|
272
|
+
"""Get a list of configs for OpenAI API client (including Azure or local model deployments that support OpenAI's chat completion API).
|
|
273
|
+
|
|
274
|
+
This function constructs configurations by reading API keys and base URLs from environment variables or text files.
|
|
275
|
+
It supports configurations for both OpenAI and Azure OpenAI services, allowing for the exclusion of one or the other.
|
|
276
|
+
When text files are used, the environment variables will be overwritten.
|
|
277
|
+
To prevent text files from being used, set the corresponding file name to None.
|
|
278
|
+
Or set key_file_path to None to disallow reading from text files.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
key_file_path (str, optional): The directory path where the API key files are located. Defaults to the current directory.
|
|
282
|
+
openai_api_key_file (str, optional): The filename containing the OpenAI API key. Defaults to 'key_openai.txt'.
|
|
283
|
+
aoai_api_key_file (str, optional): The filename containing the Azure OpenAI API key. Defaults to 'key_aoai.txt'.
|
|
284
|
+
openai_api_base_file (str, optional): The filename containing the OpenAI API base URL. Defaults to 'base_openai.txt'.
|
|
285
|
+
aoai_api_base_file (str, optional): The filename containing the Azure OpenAI API base URL. Defaults to 'base_aoai.txt'.
|
|
286
|
+
exclude (str, optional): The API type to exclude from the configuration list. Can be 'openai' or 'aoai'. Defaults to None.
|
|
287
|
+
|
|
288
|
+
Returns:
|
|
289
|
+
List[Dict]: A list of configuration dictionaries. Each dictionary contains keys for 'api_key',
|
|
290
|
+
and optionally 'base_url', 'api_type', and 'api_version'.
|
|
291
|
+
|
|
292
|
+
Raises:
|
|
293
|
+
FileNotFoundError: If the specified key files are not found and the corresponding API key is not set in the environment variables.
|
|
294
|
+
|
|
295
|
+
Example:
|
|
296
|
+
# To generate configurations excluding Azure OpenAI:
|
|
297
|
+
configs = config_list_openai_aoai(exclude='aoai')
|
|
298
|
+
|
|
299
|
+
File samples:
|
|
300
|
+
- key_aoai.txt
|
|
301
|
+
|
|
302
|
+
```
|
|
303
|
+
aoai-12345abcdef67890ghijklmnopqr
|
|
304
|
+
aoai-09876zyxwvuts54321fedcba
|
|
305
|
+
```
|
|
306
|
+
|
|
307
|
+
- base_aoai.txt
|
|
308
|
+
|
|
309
|
+
```
|
|
310
|
+
https://api.azure.com/v1
|
|
311
|
+
https://api.azure2.com/v1
|
|
312
|
+
```
|
|
313
|
+
|
|
314
|
+
Notes:
|
|
315
|
+
- The function checks for API keys and base URLs in the following environment variables: 'OPENAI_API_KEY', 'AZURE_OPENAI_API_KEY',
|
|
316
|
+
'OPENAI_API_BASE' and 'AZURE_OPENAI_API_BASE'. If these are not found, it attempts to read from the specified files in the
|
|
317
|
+
'key_file_path' directory.
|
|
318
|
+
- The API version for Azure configurations is set to DEFAULT_AZURE_API_VERSION by default.
|
|
319
|
+
- If 'exclude' is set to 'openai', only Azure OpenAI configurations are returned, and vice versa.
|
|
320
|
+
- The function assumes that the API keys and base URLs in the environment variables are separated by new lines if there are
|
|
321
|
+
multiple entries.
|
|
322
|
+
"""
|
|
323
|
+
if exclude != "openai" and key_file_path is not None:
|
|
324
|
+
# skip if key_file_path is None
|
|
325
|
+
if openai_api_key_file is not None:
|
|
326
|
+
# skip if openai_api_key_file is None
|
|
327
|
+
try:
|
|
328
|
+
with open(f"{key_file_path}/{openai_api_key_file}") as key_file:
|
|
329
|
+
os.environ["OPENAI_API_KEY"] = key_file.read().strip()
|
|
330
|
+
except FileNotFoundError:
|
|
331
|
+
logging.info(
|
|
332
|
+
"OPENAI_API_KEY is not found in os.environ "
|
|
333
|
+
"and key_openai.txt is not found in the specified path. You can specify the api_key in the config_list."
|
|
334
|
+
)
|
|
335
|
+
if openai_api_base_file is not None:
|
|
336
|
+
# skip if openai_api_base_file is None
|
|
337
|
+
try:
|
|
338
|
+
with open(f"{key_file_path}/{openai_api_base_file}") as key_file:
|
|
339
|
+
os.environ["OPENAI_API_BASE"] = key_file.read().strip()
|
|
340
|
+
except FileNotFoundError:
|
|
341
|
+
logging.info(
|
|
342
|
+
"OPENAI_API_BASE is not found in os.environ "
|
|
343
|
+
"and base_openai.txt is not found in the specified path. You can specify the base_url in the config_list."
|
|
344
|
+
)
|
|
345
|
+
if exclude != "aoai" and key_file_path is not None:
|
|
346
|
+
# skip if key_file_path is None
|
|
347
|
+
if aoai_api_key_file is not None:
|
|
348
|
+
try:
|
|
349
|
+
with open(f"{key_file_path}/{aoai_api_key_file}") as key_file:
|
|
350
|
+
os.environ["AZURE_OPENAI_API_KEY"] = key_file.read().strip()
|
|
351
|
+
except FileNotFoundError:
|
|
352
|
+
logging.info(
|
|
353
|
+
"AZURE_OPENAI_API_KEY is not found in os.environ "
|
|
354
|
+
"and key_aoai.txt is not found in the specified path. You can specify the api_key in the config_list."
|
|
355
|
+
)
|
|
356
|
+
if aoai_api_base_file is not None:
|
|
357
|
+
try:
|
|
358
|
+
with open(f"{key_file_path}/{aoai_api_base_file}") as key_file:
|
|
359
|
+
os.environ["AZURE_OPENAI_API_BASE"] = key_file.read().strip()
|
|
360
|
+
except FileNotFoundError:
|
|
361
|
+
logging.info(
|
|
362
|
+
"AZURE_OPENAI_API_BASE is not found in os.environ "
|
|
363
|
+
"and base_aoai.txt is not found in the specified path. You can specify the base_url in the config_list."
|
|
364
|
+
)
|
|
365
|
+
aoai_config = (
|
|
366
|
+
get_config_list(
|
|
367
|
+
# Assuming Azure OpenAI api keys in os.environ["AZURE_OPENAI_API_KEY"], in separated lines
|
|
368
|
+
api_keys=os.environ.get("AZURE_OPENAI_API_KEY", "").split("\n"),
|
|
369
|
+
# Assuming Azure OpenAI api bases in os.environ["AZURE_OPENAI_API_BASE"], in separated lines
|
|
370
|
+
base_urls=os.environ.get("AZURE_OPENAI_API_BASE", "").split("\n"),
|
|
371
|
+
api_type="azure",
|
|
372
|
+
api_version=DEFAULT_AZURE_API_VERSION,
|
|
373
|
+
)
|
|
374
|
+
if exclude != "aoai"
|
|
375
|
+
else []
|
|
376
|
+
)
|
|
377
|
+
# process openai base urls
|
|
378
|
+
base_urls_env_var = os.environ.get("OPENAI_API_BASE", None)
|
|
379
|
+
base_urls = base_urls_env_var if base_urls_env_var is None else base_urls_env_var.split("\n")
|
|
380
|
+
openai_config = (
|
|
381
|
+
get_config_list(
|
|
382
|
+
# Assuming OpenAI API_KEY in os.environ["OPENAI_API_KEY"]
|
|
383
|
+
api_keys=os.environ.get("OPENAI_API_KEY", "").split("\n"),
|
|
384
|
+
base_urls=base_urls,
|
|
385
|
+
)
|
|
386
|
+
if exclude != "openai"
|
|
387
|
+
else []
|
|
388
|
+
)
|
|
389
|
+
config_list = openai_config + aoai_config
|
|
390
|
+
return config_list
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
@export_module("autogen")
|
|
394
|
+
def config_list_from_models(
|
|
395
|
+
key_file_path: str | None = ".",
|
|
396
|
+
openai_api_key_file: str | None = "key_openai.txt",
|
|
397
|
+
aoai_api_key_file: str | None = "key_aoai.txt",
|
|
398
|
+
aoai_api_base_file: str | None = "base_aoai.txt",
|
|
399
|
+
exclude: str | None = None,
|
|
400
|
+
model_list: list[str] | None = None,
|
|
401
|
+
) -> list[dict[str, Any]]:
|
|
402
|
+
"""Get a list of configs for API calls with models specified in the model list.
|
|
403
|
+
|
|
404
|
+
This function extends `config_list_openai_aoai` by allowing to clone its' out for each of the models provided.
|
|
405
|
+
Each configuration will have a 'model' key with the model name as its value. This is particularly useful when
|
|
406
|
+
all endpoints have same set of models.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
key_file_path (str, optional): The path to the key files.
|
|
410
|
+
openai_api_key_file (str, optional): The file name of the OpenAI API key.
|
|
411
|
+
aoai_api_key_file (str, optional): The file name of the Azure OpenAI API key.
|
|
412
|
+
aoai_api_base_file (str, optional): The file name of the Azure OpenAI API base.
|
|
413
|
+
exclude (str, optional): The API type to exclude, "openai" or "aoai".
|
|
414
|
+
model_list (list, optional): The list of model names to include in the configs.
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
list: A list of configs for OpenAI API calls, each including model information.
|
|
418
|
+
|
|
419
|
+
Example:
|
|
420
|
+
```python
|
|
421
|
+
# Define the path where the API key files are located
|
|
422
|
+
key_file_path = "/path/to/key/files"
|
|
423
|
+
|
|
424
|
+
# Define the file names for the OpenAI and Azure OpenAI API keys and bases
|
|
425
|
+
openai_api_key_file = "key_openai.txt"
|
|
426
|
+
aoai_api_key_file = "key_aoai.txt"
|
|
427
|
+
aoai_api_base_file = "base_aoai.txt"
|
|
428
|
+
|
|
429
|
+
# Define the list of models for which to create configurations
|
|
430
|
+
model_list = ["gpt-4", "gpt-3.5-turbo"]
|
|
431
|
+
|
|
432
|
+
# Call the function to get a list of configuration dictionaries
|
|
433
|
+
config_list = config_list_from_models(
|
|
434
|
+
key_file_path=key_file_path,
|
|
435
|
+
openai_api_key_file=openai_api_key_file,
|
|
436
|
+
aoai_api_key_file=aoai_api_key_file,
|
|
437
|
+
aoai_api_base_file=aoai_api_base_file,
|
|
438
|
+
model_list=model_list,
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
# The `config_list` will contain configurations for the specified models, for example:
|
|
442
|
+
# [
|
|
443
|
+
# {'api_key': '...', 'base_url': 'https://api.openai.com', 'model': 'gpt-4'},
|
|
444
|
+
# {'api_key': '...', 'base_url': 'https://api.openai.com', 'model': 'gpt-3.5-turbo'}
|
|
445
|
+
# ]
|
|
446
|
+
```
|
|
447
|
+
"""
|
|
448
|
+
config_list = config_list_openai_aoai(
|
|
449
|
+
key_file_path=key_file_path,
|
|
450
|
+
openai_api_key_file=openai_api_key_file,
|
|
451
|
+
aoai_api_key_file=aoai_api_key_file,
|
|
452
|
+
aoai_api_base_file=aoai_api_base_file,
|
|
453
|
+
exclude=exclude,
|
|
454
|
+
)
|
|
455
|
+
if model_list:
|
|
456
|
+
config_list = [{**config, "model": model} for model in model_list for config in config_list]
|
|
457
|
+
return config_list
|
|
458
|
+
|
|
459
|
+
|
|
460
|
+
@export_module("autogen")
|
|
461
|
+
def config_list_gpt4_gpt35(
|
|
462
|
+
key_file_path: str | None = ".",
|
|
463
|
+
openai_api_key_file: str | None = "key_openai.txt",
|
|
464
|
+
aoai_api_key_file: str | None = "key_aoai.txt",
|
|
465
|
+
aoai_api_base_file: str | None = "base_aoai.txt",
|
|
466
|
+
exclude: str | None = None,
|
|
467
|
+
) -> list[dict[str, Any]]:
|
|
468
|
+
"""Get a list of configs for 'gpt-4' followed by 'gpt-3.5-turbo' API calls.
|
|
469
|
+
|
|
470
|
+
Args:
|
|
471
|
+
key_file_path (str, optional): The path to the key files.
|
|
472
|
+
openai_api_key_file (str, optional): The file name of the openai api key.
|
|
473
|
+
aoai_api_key_file (str, optional): The file name of the azure openai api key.
|
|
474
|
+
aoai_api_base_file (str, optional): The file name of the azure openai api base.
|
|
475
|
+
exclude (str, optional): The api type to exclude, "openai" or "aoai".
|
|
476
|
+
|
|
477
|
+
Returns:
|
|
478
|
+
list: A list of configs for openai api calls.
|
|
479
|
+
"""
|
|
480
|
+
return config_list_from_models(
|
|
481
|
+
key_file_path,
|
|
482
|
+
openai_api_key_file,
|
|
483
|
+
aoai_api_key_file,
|
|
484
|
+
aoai_api_base_file,
|
|
485
|
+
exclude,
|
|
486
|
+
model_list=["gpt-4", "gpt-3.5-turbo"],
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
|
|
490
|
+
@export_module("autogen")
|
|
491
|
+
@deprecated(
|
|
492
|
+
"`autogen.filter_config(...)` is deprecated. "
|
|
493
|
+
'Please use the "autogen.LLMConfig.from_json(path="OAI_CONFIG_LIST").where(model="gpt-4o")" method instead. '
|
|
494
|
+
"Scheduled for removal in 0.11.0 version."
|
|
495
|
+
)
|
|
496
|
+
def filter_config(
|
|
497
|
+
config_list: list[dict[str, Any]],
|
|
498
|
+
filter_dict: dict[str, list[str | None] | set[str | None]] | None,
|
|
499
|
+
exclude: bool = False,
|
|
500
|
+
) -> list[dict[str, Any]]:
|
|
501
|
+
"""Filter configuration dictionaries based on specified criteria.
|
|
502
|
+
|
|
503
|
+
This function filters a list of configuration dictionaries by applying ALL criteria specified in `filter_dict`.
|
|
504
|
+
A configuration is included in the result if it satisfies every key-value constraint in the filter dictionary.
|
|
505
|
+
For each filter key, the configuration's corresponding field value must match at least one of the acceptable
|
|
506
|
+
values (OR logic within each criteria, AND logic between different criteria).
|
|
507
|
+
|
|
508
|
+
Args:
|
|
509
|
+
config_list (list of dict): A list of configuration dictionaries to be filtered.
|
|
510
|
+
|
|
511
|
+
filter_dict (dict, optional): A dictionary specifying filter criteria where:
|
|
512
|
+
- Keys are field names to check in each configuration dictionary
|
|
513
|
+
- Values can be:
|
|
514
|
+
* a single string value (e.g., {"model": "gpt-4o"})
|
|
515
|
+
* a list or set of acceptable values for that field (e.g., {"model": ["gpt-4o", "gpt-4o-mini"]})
|
|
516
|
+
- A configuration matches if ALL filter keys are satisfied AND for each key,
|
|
517
|
+
the config's field value matches at least one acceptable value
|
|
518
|
+
- If a filter value includes None, configurations missing that field will match
|
|
519
|
+
- If None, no filtering is applied
|
|
520
|
+
|
|
521
|
+
exclude (bool, optional): If False (default), return configurations that match the filter.
|
|
522
|
+
If True, return configurations that do NOT match the filter.
|
|
523
|
+
|
|
524
|
+
Returns:
|
|
525
|
+
list of dict: Filtered list of configuration dictionaries.
|
|
526
|
+
|
|
527
|
+
Matching Logic:
|
|
528
|
+
- **Between different filter keys**: AND logic (all criteria must be satisfied)
|
|
529
|
+
- **Within each filter key's values**: OR logic (any acceptable value can match)
|
|
530
|
+
- **For list-type config values**: Match if there's any intersection with acceptable values
|
|
531
|
+
- **For scalar config values**: Match if the value is in the list of acceptable values
|
|
532
|
+
- **Missing fields**: Only match if None is included in the acceptable values for that field
|
|
533
|
+
|
|
534
|
+
Examples:
|
|
535
|
+
```python
|
|
536
|
+
configs = [
|
|
537
|
+
{"model": "gpt-3.5-turbo", "api_type": "openai"},
|
|
538
|
+
{"model": "gpt-4", "api_type": "openai"},
|
|
539
|
+
{"model": "gpt-3.5-turbo", "api_type": "azure", "api_version": "2024-02-01"},
|
|
540
|
+
{"model": "gpt-4", "tags": ["premium", "latest"]},
|
|
541
|
+
]
|
|
542
|
+
|
|
543
|
+
# Example 1: Single criterion with single string
|
|
544
|
+
filter_dict = {"model": "gpt-4o"}
|
|
545
|
+
result = filter_config(configs, filter_dict)
|
|
546
|
+
# Returns: [{"model": "gpt-4o", "api_type": "openai"}] if present
|
|
547
|
+
|
|
548
|
+
# Example 2: Single criterion - matches any model in the list
|
|
549
|
+
filter_dict = {"model": ["gpt-4", "gpt-4o"]}
|
|
550
|
+
result = filter_config(configs, filter_dict)
|
|
551
|
+
# Returns: [{"model": "gpt-4", "api_type": "openai"}, {"model": "gpt-4", "tags": ["premium", "latest"]}]
|
|
552
|
+
|
|
553
|
+
# Example 3: Multiple criteria - must satisfy ALL the conditions
|
|
554
|
+
filter_dict = {"model": ["gpt-3.5-turbo"], "api_type": ["azure"]}
|
|
555
|
+
result = filter_config(configs, filter_dict)
|
|
556
|
+
# Returns: [{"model": "gpt-3.5-turbo", "api_type": "azure", "api_version": "2024-02-01"}]
|
|
557
|
+
|
|
558
|
+
# Example 4: Tag filtering with list intersection
|
|
559
|
+
filter_dict = {"tags": ["premium"]}
|
|
560
|
+
result = filter_config(configs, filter_dict)
|
|
561
|
+
# Returns: [{"model": "gpt-4", "tags": ["premium", "latest"]}]
|
|
562
|
+
|
|
563
|
+
# Example 5: Exclude matching configurations
|
|
564
|
+
filter_dict = {"api_type": ["openai"]}
|
|
565
|
+
result = filter_config(configs, filter_dict, exclude=True)
|
|
566
|
+
# Returns configs that do NOT have api_type="openai"
|
|
567
|
+
```
|
|
568
|
+
Note:
|
|
569
|
+
- If `filter_dict` is empty or None, no filtering is applied and `config_list` is returned as is.
|
|
570
|
+
- If a configuration dictionary in `config_list` does not contain a key specified in `filter_dict`,
|
|
571
|
+
it is considered a non-match and is excluded from the result.
|
|
572
|
+
|
|
573
|
+
"""
|
|
574
|
+
warnings.warn(
|
|
575
|
+
"`autogen.filter_config(...)` is deprecated. "
|
|
576
|
+
'Please use the "autogen.LLMConfig.from_json(path="OAI_CONFIG_LIST").where(model="gpt-4o")" method instead. '
|
|
577
|
+
"Scheduled for removal in 0.11.0 version.",
|
|
578
|
+
DeprecationWarning,
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
return latest_filter(config_list=config_list, filter_dict=filter_dict, exclude=exclude)
|
|
582
|
+
|
|
583
|
+
|
|
584
|
+
@export_module("autogen")
|
|
585
|
+
@deprecated(
|
|
586
|
+
"`autogen.config_list_from_json(...)` is deprecated. "
|
|
587
|
+
'Please use the "autogen.LLMConfig.from_json(path="OAI_CONFIG_LIST")" method instead. '
|
|
588
|
+
"Scheduled for removal in 0.11.0 version."
|
|
589
|
+
)
|
|
590
|
+
def config_list_from_json(
|
|
591
|
+
env_or_file: str,
|
|
592
|
+
file_location: str | None = "",
|
|
593
|
+
filter_dict: dict[str, list[str | None] | set[str | None]] | None = None,
|
|
594
|
+
) -> list[dict[str, Any]]:
|
|
595
|
+
"""Retrieves a list of API configurations from a JSON stored in an environment variable or a file.
|
|
596
|
+
|
|
597
|
+
This function attempts to parse JSON data from the given `env_or_file` parameter. If `env_or_file` is an
|
|
598
|
+
environment variable containing JSON data, it will be used directly. Otherwise, it is assumed to be a filename,
|
|
599
|
+
and the function will attempt to read the file from the specified `file_location`.
|
|
600
|
+
|
|
601
|
+
The `filter_dict` parameter allows for filtering the configurations based on specified criteria. Each key in the
|
|
602
|
+
`filter_dict` corresponds to a field in the configuration dictionaries, and the associated value is a list or set
|
|
603
|
+
of acceptable values for that field. If a field is missing in a configuration and `None` is included in the list
|
|
604
|
+
of acceptable values for that field, the configuration will still be considered a match.
|
|
605
|
+
|
|
606
|
+
Args:
|
|
607
|
+
env_or_file (str): The name of the environment variable, the filename, or the environment variable of the filename
|
|
608
|
+
that containing the JSON data.
|
|
609
|
+
file_location (str, optional): The directory path where the file is located, if `env_or_file` is a filename.
|
|
610
|
+
filter_dict (dict, optional): A dictionary specifying the filtering criteria for the configurations, with
|
|
611
|
+
keys representing field names and values being lists or sets of acceptable values for those fields.
|
|
612
|
+
|
|
613
|
+
Example:
|
|
614
|
+
```python
|
|
615
|
+
# Suppose we have an environment variable 'CONFIG_JSON' with the following content:
|
|
616
|
+
# '[{"model": "gpt-3.5-turbo", "api_type": "azure"}, {"model": "gpt-4"}]'
|
|
617
|
+
|
|
618
|
+
# We can retrieve a filtered list of configurations like this:
|
|
619
|
+
filter_criteria = {"model": ["gpt-3.5-turbo"]}
|
|
620
|
+
configs = config_list_from_json("CONFIG_JSON", filter_dict=filter_criteria)
|
|
621
|
+
# The 'configs' variable will now contain only the configurations that match the filter criteria.
|
|
622
|
+
```
|
|
623
|
+
|
|
624
|
+
Returns:
|
|
625
|
+
List[Dict]: A list of configuration dictionaries that match the filtering criteria specified in `filter_dict`.
|
|
626
|
+
|
|
627
|
+
Raises:
|
|
628
|
+
FileNotFoundError: if env_or_file is neither found as an environment variable nor a file
|
|
629
|
+
"""
|
|
630
|
+
warnings.warn(
|
|
631
|
+
"`autogen.config_list_from_json(...)` is deprecated. "
|
|
632
|
+
'Please use the "autogen.LLMConfig.from_json(path="OAI_CONFIG_LIST")" method instead. '
|
|
633
|
+
"Scheduled for removal in 0.11.0 version.",
|
|
634
|
+
DeprecationWarning,
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
return latest_config_list_from_json(
|
|
638
|
+
env_or_file=env_or_file,
|
|
639
|
+
file_location=file_location,
|
|
640
|
+
filter_dict=filter_dict,
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
|
|
644
|
+
def get_config(
|
|
645
|
+
api_key: str | None,
|
|
646
|
+
base_url: str | None = None,
|
|
647
|
+
api_type: str | None = None,
|
|
648
|
+
api_version: str | None = None,
|
|
649
|
+
) -> dict[str, Any]:
|
|
650
|
+
"""Constructs a configuration dictionary for a single model with the provided API configurations.
|
|
651
|
+
|
|
652
|
+
Example:
|
|
653
|
+
```python
|
|
654
|
+
config = get_config(api_key="sk-abcdef1234567890", base_url="https://api.openai.com", api_version="v1")
|
|
655
|
+
# The 'config' variable will now contain:
|
|
656
|
+
# {
|
|
657
|
+
# "api_key": "sk-abcdef1234567890",
|
|
658
|
+
# "base_url": "https://api.openai.com",
|
|
659
|
+
# "api_version": "v1"
|
|
660
|
+
# }
|
|
661
|
+
```
|
|
662
|
+
|
|
663
|
+
Args:
|
|
664
|
+
api_key (str): The API key for authenticating API requests.
|
|
665
|
+
base_url (Optional[str]): The base URL of the API. If not provided, defaults to None.
|
|
666
|
+
api_type (Optional[str]): The type of API. If not provided, defaults to None.
|
|
667
|
+
api_version (Optional[str]): The version of the API. If not provided, defaults to None.
|
|
668
|
+
|
|
669
|
+
Returns:
|
|
670
|
+
Dict: A dictionary containing the provided API configurations.
|
|
671
|
+
"""
|
|
672
|
+
config = {"api_key": api_key}
|
|
673
|
+
if base_url:
|
|
674
|
+
config["base_url"] = os.getenv(base_url, default=base_url)
|
|
675
|
+
if api_type:
|
|
676
|
+
config["api_type"] = os.getenv(api_type, default=api_type)
|
|
677
|
+
if api_version:
|
|
678
|
+
config["api_version"] = os.getenv(api_version, default=api_version)
|
|
679
|
+
return config
|
|
680
|
+
|
|
681
|
+
|
|
682
|
+
@export_module("autogen")
|
|
683
|
+
def config_list_from_dotenv(
|
|
684
|
+
dotenv_file_path: str | None = None,
|
|
685
|
+
model_api_key_map: dict[str, Any] | None = None,
|
|
686
|
+
filter_dict: dict[str, list[str | None] | set[str | None]] | None = None,
|
|
687
|
+
) -> list[dict[str, str | set[str]]]:
|
|
688
|
+
"""Load API configurations from a specified .env file or environment variables and construct a list of configurations.
|
|
689
|
+
|
|
690
|
+
This function will:
|
|
691
|
+
- Load API keys from a provided .env file or from existing environment variables.
|
|
692
|
+
- Create a configuration dictionary for each model using the API keys and additional configurations.
|
|
693
|
+
- Filter and return the configurations based on provided filters.
|
|
694
|
+
|
|
695
|
+
model_api_key_map will default to `{"gpt-4": "OPENAI_API_KEY", "gpt-3.5-turbo": "OPENAI_API_KEY"}` if none
|
|
696
|
+
|
|
697
|
+
Args:
|
|
698
|
+
dotenv_file_path (str, optional): The path to the .env file. Defaults to None.
|
|
699
|
+
model_api_key_map (str/dict, optional): A dictionary mapping models to their API key configurations.
|
|
700
|
+
If a string is provided as configuration, it is considered as an environment
|
|
701
|
+
variable name storing the API key.
|
|
702
|
+
If a dict is provided, it should contain at least 'api_key_env_var' key,
|
|
703
|
+
and optionally other API configurations like 'base_url', 'api_type', and 'api_version'.
|
|
704
|
+
Defaults to a basic map with 'gpt-4' and 'gpt-3.5-turbo' mapped to 'OPENAI_API_KEY'.
|
|
705
|
+
filter_dict (dict, optional): A dictionary containing the models to be loaded.
|
|
706
|
+
Containing a 'model' key mapped to a set of model names to be loaded.
|
|
707
|
+
Defaults to None, which loads all found configurations.
|
|
708
|
+
|
|
709
|
+
Returns:
|
|
710
|
+
List[Dict[str, Union[str, Set[str]]]]: A list of configuration dictionaries for each model.
|
|
711
|
+
|
|
712
|
+
Raises:
|
|
713
|
+
FileNotFoundError: If the specified .env file does not exist.
|
|
714
|
+
TypeError: If an unsupported type of configuration is provided in model_api_key_map.
|
|
715
|
+
"""
|
|
716
|
+
if dotenv_file_path:
|
|
717
|
+
dotenv_path = Path(dotenv_file_path)
|
|
718
|
+
if dotenv_path.exists():
|
|
719
|
+
load_dotenv(dotenv_path)
|
|
720
|
+
else:
|
|
721
|
+
logging.warning(f"The specified .env file {dotenv_path} does not exist.")
|
|
722
|
+
else:
|
|
723
|
+
dotenv_path_str = find_dotenv()
|
|
724
|
+
if not dotenv_path_str:
|
|
725
|
+
logging.warning("No .env file found. Loading configurations from environment variables.")
|
|
726
|
+
dotenv_path = Path(dotenv_path_str)
|
|
727
|
+
load_dotenv(dotenv_path)
|
|
728
|
+
|
|
729
|
+
# Ensure the model_api_key_map is not None to prevent TypeErrors during key assignment.
|
|
730
|
+
model_api_key_map = model_api_key_map or {}
|
|
731
|
+
|
|
732
|
+
# Ensure default models are always considered
|
|
733
|
+
default_models = ["gpt-4", "gpt-3.5-turbo"]
|
|
734
|
+
|
|
735
|
+
for model in default_models:
|
|
736
|
+
# Only assign default API key if the model is not present in the map.
|
|
737
|
+
# If model is present but set to invalid/empty, do not overwrite.
|
|
738
|
+
if model not in model_api_key_map:
|
|
739
|
+
model_api_key_map[model] = "OPENAI_API_KEY"
|
|
740
|
+
|
|
741
|
+
env_var = []
|
|
742
|
+
# Loop over the models and create configuration dictionaries
|
|
743
|
+
for model, config in model_api_key_map.items():
|
|
744
|
+
if isinstance(config, str):
|
|
745
|
+
api_key_env_var = config
|
|
746
|
+
config_dict = get_config(api_key=os.getenv(api_key_env_var))
|
|
747
|
+
elif isinstance(config, dict):
|
|
748
|
+
api_key = os.getenv(config.get("api_key_env_var", "OPENAI_API_KEY"))
|
|
749
|
+
config_without_key_var = {k: v for k, v in config.items() if k != "api_key_env_var"}
|
|
750
|
+
config_dict = get_config(api_key=api_key, **config_without_key_var)
|
|
751
|
+
else:
|
|
752
|
+
logging.warning(
|
|
753
|
+
"Unsupported configuration type encountered for a model. Please check your model_api_key_map."
|
|
754
|
+
)
|
|
755
|
+
|
|
756
|
+
if not config_dict["api_key"] or config_dict["api_key"].strip() == "":
|
|
757
|
+
logging.warning("API key not found or empty for a model. Please ensure path to .env file is correct.")
|
|
758
|
+
continue # Skip this configuration and continue with the next
|
|
759
|
+
|
|
760
|
+
# Add model to the configuration and append to the list
|
|
761
|
+
config_dict["model"] = model
|
|
762
|
+
env_var.append(config_dict)
|
|
763
|
+
|
|
764
|
+
fd, temp_name = tempfile.mkstemp()
|
|
765
|
+
try:
|
|
766
|
+
with os.fdopen(fd, "w+") as temp:
|
|
767
|
+
env_var_str = json.dumps(env_var)
|
|
768
|
+
temp.write(env_var_str)
|
|
769
|
+
temp.flush()
|
|
770
|
+
|
|
771
|
+
# Assuming config_list_from_json is a valid function from your code
|
|
772
|
+
config_list = config_list_from_json(env_or_file=temp_name, filter_dict=filter_dict)
|
|
773
|
+
finally:
|
|
774
|
+
# The file is deleted after using its name (to prevent windows build from breaking)
|
|
775
|
+
os.remove(temp_name)
|
|
776
|
+
|
|
777
|
+
if len(config_list) == 0:
|
|
778
|
+
logging.error("No configurations loaded.")
|
|
779
|
+
return []
|
|
780
|
+
|
|
781
|
+
logging.info(f"Models available: {[config['model'] for config in config_list]}")
|
|
782
|
+
return config_list
|
|
783
|
+
|
|
784
|
+
|
|
785
|
+
def retrieve_assistants_by_name(client: "OpenAI", name: str) -> list["Assistant"]:
|
|
786
|
+
"""Return the assistants with the given name from OAI assistant API"""
|
|
787
|
+
assistants = client.beta.assistants.list()
|
|
788
|
+
candidate_assistants = []
|
|
789
|
+
for assistant in assistants.data:
|
|
790
|
+
if assistant.name == name:
|
|
791
|
+
candidate_assistants.append(assistant)
|
|
792
|
+
return candidate_assistants
|
|
793
|
+
|
|
794
|
+
|
|
795
|
+
def detect_gpt_assistant_api_version() -> str:
|
|
796
|
+
"""Detect the openai assistant API version"""
|
|
797
|
+
oai_version = importlib.metadata.version("openai")
|
|
798
|
+
return "v1" if parse(oai_version) < parse("1.21") else "v2"
|
|
799
|
+
|
|
800
|
+
|
|
801
|
+
def create_gpt_vector_store(client: "OpenAI", name: str, fild_ids: list[str]) -> Any:
|
|
802
|
+
"""Create a openai vector store for gpt assistant"""
|
|
803
|
+
try:
|
|
804
|
+
vector_store = client.vector_stores.create(name=name)
|
|
805
|
+
except Exception as e:
|
|
806
|
+
raise AttributeError(f"Failed to create vector store, please install the latest OpenAI python package: {e}")
|
|
807
|
+
|
|
808
|
+
# poll the status of the file batch for completion.
|
|
809
|
+
batch = client.vector_stores.file_batches.create_and_poll(vector_store_id=vector_store.id, file_ids=fild_ids)
|
|
810
|
+
|
|
811
|
+
if batch.status == "in_progress":
|
|
812
|
+
time.sleep(1)
|
|
813
|
+
logging.debug(f"file batch status: {batch.file_counts}")
|
|
814
|
+
batch = client.vector_stores.file_batches.poll(vector_store_id=vector_store.id, batch_id=batch.id)
|
|
815
|
+
|
|
816
|
+
if batch.status == "completed":
|
|
817
|
+
return vector_store
|
|
818
|
+
|
|
819
|
+
raise ValueError(f"Failed to upload files to vector store {vector_store.id}:{batch.status}")
|
|
820
|
+
|
|
821
|
+
|
|
822
|
+
def create_gpt_assistant(
|
|
823
|
+
client: "OpenAI", name: str, instructions: str, model: str, assistant_config: dict[str, Any]
|
|
824
|
+
) -> "Assistant":
|
|
825
|
+
"""Create a openai gpt assistant"""
|
|
826
|
+
assistant_create_kwargs = {}
|
|
827
|
+
gpt_assistant_api_version = detect_gpt_assistant_api_version()
|
|
828
|
+
tools = assistant_config.get("tools", [])
|
|
829
|
+
|
|
830
|
+
if gpt_assistant_api_version == "v2":
|
|
831
|
+
tool_resources = assistant_config.get("tool_resources", {})
|
|
832
|
+
file_ids = assistant_config.get("file_ids")
|
|
833
|
+
if tool_resources.get("file_search") is not None and file_ids is not None:
|
|
834
|
+
raise ValueError(
|
|
835
|
+
"Cannot specify both `tool_resources['file_search']` tool and `file_ids` in the assistant config."
|
|
836
|
+
)
|
|
837
|
+
|
|
838
|
+
# Designed for backwards compatibility for the V1 API
|
|
839
|
+
# Instead of V1 AssistantFile, files are attached to Assistants using the tool_resources object.
|
|
840
|
+
for tool in tools:
|
|
841
|
+
if tool["type"] == "retrieval":
|
|
842
|
+
tool["type"] = "file_search"
|
|
843
|
+
if file_ids is not None:
|
|
844
|
+
# create a vector store for the file search tool
|
|
845
|
+
vs = create_gpt_vector_store(client, f"{name}-vectorestore", file_ids)
|
|
846
|
+
tool_resources["file_search"] = {
|
|
847
|
+
"vector_store_ids": [vs.id],
|
|
848
|
+
}
|
|
849
|
+
elif tool["type"] == "code_interpreter" and file_ids is not None:
|
|
850
|
+
tool_resources["code_interpreter"] = {
|
|
851
|
+
"file_ids": file_ids,
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
assistant_create_kwargs["tools"] = tools
|
|
855
|
+
if len(tool_resources) > 0:
|
|
856
|
+
assistant_create_kwargs["tool_resources"] = tool_resources
|
|
857
|
+
else:
|
|
858
|
+
# not support forwards compatibility
|
|
859
|
+
if "tool_resources" in assistant_config:
|
|
860
|
+
raise ValueError("`tool_resources` argument are not supported in the openai assistant V1 API.")
|
|
861
|
+
if any(tool["type"] == "file_search" for tool in tools):
|
|
862
|
+
raise ValueError(
|
|
863
|
+
"`file_search` tool are not supported in the openai assistant V1 API, please use `retrieval`."
|
|
864
|
+
)
|
|
865
|
+
assistant_create_kwargs["tools"] = tools
|
|
866
|
+
assistant_create_kwargs["file_ids"] = assistant_config.get("file_ids", [])
|
|
867
|
+
|
|
868
|
+
logging.info(f"Creating assistant with config: {assistant_create_kwargs}")
|
|
869
|
+
return client.beta.assistants.create(name=name, instructions=instructions, model=model, **assistant_create_kwargs)
|
|
870
|
+
|
|
871
|
+
|
|
872
|
+
def update_gpt_assistant(client: "OpenAI", assistant_id: str, assistant_config: dict[str, Any]) -> "Assistant":
|
|
873
|
+
"""Update openai gpt assistant"""
|
|
874
|
+
gpt_assistant_api_version = detect_gpt_assistant_api_version()
|
|
875
|
+
assistant_update_kwargs = {}
|
|
876
|
+
|
|
877
|
+
if assistant_config.get("tools") is not None:
|
|
878
|
+
assistant_update_kwargs["tools"] = assistant_config["tools"]
|
|
879
|
+
|
|
880
|
+
if assistant_config.get("instructions") is not None:
|
|
881
|
+
assistant_update_kwargs["instructions"] = assistant_config["instructions"]
|
|
882
|
+
|
|
883
|
+
if gpt_assistant_api_version == "v2":
|
|
884
|
+
if assistant_config.get("tool_resources") is not None:
|
|
885
|
+
assistant_update_kwargs["tool_resources"] = assistant_config["tool_resources"]
|
|
886
|
+
else:
|
|
887
|
+
if assistant_config.get("file_ids") is not None:
|
|
888
|
+
assistant_update_kwargs["file_ids"] = assistant_config["file_ids"]
|
|
889
|
+
|
|
890
|
+
return client.beta.assistants.update(assistant_id=assistant_id, **assistant_update_kwargs)
|
|
891
|
+
|
|
892
|
+
|
|
893
|
+
def _satisfies(config_value: Any, acceptable_values: Any) -> bool:
|
|
894
|
+
if isinstance(config_value, list):
|
|
895
|
+
return bool(set(config_value) & set(acceptable_values)) # Non-empty intersection
|
|
896
|
+
else:
|
|
897
|
+
return config_value in acceptable_values
|