ag2 0.9.1a1__py3-none-any.whl → 0.9.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/METADATA +272 -75
- ag2-0.9.2.dist-info/RECORD +406 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/WHEEL +1 -2
- autogen/__init__.py +89 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +427 -0
- autogen/_website/generate_mkdocs.py +1174 -0
- autogen/_website/notebook_processor.py +476 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +412 -0
- autogen/agentchat/__init__.py +44 -0
- autogen/agentchat/agent.py +182 -0
- autogen/agentchat/assistant_agent.py +85 -0
- autogen/agentchat/chat.py +309 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +429 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +566 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +187 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
- autogen/agentchat/contrib/rag/query_engine.py +74 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
- autogen/agentchat/contrib/swarm_agent.py +1425 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +232 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +303 -0
- autogen/agentchat/conversable_agent.py +4023 -0
- autogen/agentchat/group/__init__.py +64 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +41 -0
- autogen/agentchat/group/context_variables.py +192 -0
- autogen/agentchat/group/group_tool_executor.py +202 -0
- autogen/agentchat/group/group_utils.py +591 -0
- autogen/agentchat/group/handoffs.py +244 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +237 -0
- autogen/agentchat/group/on_condition.py +58 -0
- autogen/agentchat/group/on_context_condition.py +54 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +159 -0
- autogen/agentchat/group/patterns/manual.py +176 -0
- autogen/agentchat/group/patterns/pattern.py +288 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +26 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/group_chat_target.py +132 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +413 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1694 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
- autogen/agentchat/realtime/experimental/function_observer.py +85 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +111 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +73 -0
- autogen/agents/contrib/time/time_tool_agent.py +51 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
- autogen/agents/experimental/document_agent/document_agent.py +461 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +380 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
- autogen/agents/experimental/document_agent/parser_utils.py +130 -0
- autogen/agents/experimental/document_agent/url_utils.py +426 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +77 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +62 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +75 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +102 -0
- autogen/cache/in_memory_cache.py +58 -0
- autogen/cache/redis_cache.py +123 -0
- autogen/code_utils.py +596 -0
- autogen/coding/__init__.py +22 -0
- autogen/coding/base.py +119 -0
- autogen/coding/docker_commandline_code_executor.py +268 -0
- autogen/coding/factory.py +47 -0
- autogen/coding/func_with_reqs.py +202 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +167 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +231 -0
- autogen/coding/jupyter/jupyter_code_executor.py +160 -0
- autogen/coding/jupyter/local_jupyter_server.py +172 -0
- autogen/coding/local_commandline_code_executor.py +405 -0
- autogen/coding/markdown_code_extractor.py +45 -0
- autogen/coding/utils.py +56 -0
- autogen/doc_utils.py +34 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1013 -0
- autogen/events/base_event.py +99 -0
- autogen/events/client_events.py +167 -0
- autogen/events/helpers.py +36 -0
- autogen/events/print_event.py +46 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +80 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +225 -0
- autogen/fast_depends/core/model.py +576 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +29 -0
- autogen/fast_depends/dependencies/provider.py +39 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +280 -0
- autogen/fast_depends/utils.py +187 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +178 -0
- autogen/import_utils.py +526 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
- autogen/interop/langchain/langchain_tool.py +82 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +179 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
- autogen/interop/registry.py +69 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +56 -0
- autogen/io/run_response.py +293 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +213 -0
- autogen/json_utils.py +43 -0
- autogen/llm_config.py +382 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +128 -0
- autogen/logger/file_logger.py +261 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +523 -0
- autogen/math_utils.py +339 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/mcp_client.py +208 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +63 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +581 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +158 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +114 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +400 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +948 -0
- autogen/messages/base_message.py +107 -0
- autogen/messages/client_messages.py +171 -0
- autogen/messages/print_message.py +49 -0
- autogen/oai/__init__.py +53 -0
- autogen/oai/anthropic.py +714 -0
- autogen/oai/bedrock.py +628 -0
- autogen/oai/cerebras.py +299 -0
- autogen/oai/client.py +1444 -0
- autogen/oai/client_utils.py +169 -0
- autogen/oai/cohere.py +479 -0
- autogen/oai/gemini.py +998 -0
- autogen/oai/gemini_types.py +155 -0
- autogen/oai/groq.py +305 -0
- autogen/oai/mistral.py +303 -0
- autogen/oai/oai_models/__init__.py +11 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +87 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +86 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
- autogen/oai/oai_models/completion_usage.py +60 -0
- autogen/oai/ollama.py +643 -0
- autogen/oai/openai_utils.py +881 -0
- autogen/oai/together.py +370 -0
- autogen/retrieve_utils.py +491 -0
- autogen/runtime_logging.py +160 -0
- autogen/token_count_utils.py +267 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +41 -0
- autogen/tools/dependency_injection.py +254 -0
- autogen/tools/experimental/__init__.py +48 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +161 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +328 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1316 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +183 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
- autogen/tools/function_utils.py +411 -0
- autogen/tools/tool.py +187 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +69 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
- ag2-0.9.1a1.dist-info/RECORD +0 -6
- ag2-0.9.1a1.dist-info/top_level.txt +0 -1
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/LICENSE +0 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/NOTICE.md +0 -0
autogen/oai/together.py
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
"""Create an OpenAI-compatible client using Together.AI's API.
|
|
8
|
+
|
|
9
|
+
Example:
|
|
10
|
+
```python
|
|
11
|
+
llm_config = {
|
|
12
|
+
"config_list": [
|
|
13
|
+
{
|
|
14
|
+
"api_type": "together",
|
|
15
|
+
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
16
|
+
"api_key": os.environ.get("TOGETHER_API_KEY"),
|
|
17
|
+
}
|
|
18
|
+
]
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
agent = autogen.AssistantAgent("my_agent", llm_config=llm_config)
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
Install Together.AI python library using: pip install --upgrade together
|
|
25
|
+
|
|
26
|
+
Resources:
|
|
27
|
+
- https://docs.together.ai/docs/inference-python
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
from __future__ import annotations
|
|
31
|
+
|
|
32
|
+
import copy
|
|
33
|
+
import os
|
|
34
|
+
import time
|
|
35
|
+
import warnings
|
|
36
|
+
from typing import Any, Literal, Optional, Union
|
|
37
|
+
|
|
38
|
+
from pydantic import Field
|
|
39
|
+
|
|
40
|
+
from ..import_utils import optional_import_block, require_optional_import
|
|
41
|
+
from ..llm_config import LLMConfigEntry, register_llm_config
|
|
42
|
+
from .client_utils import should_hide_tools, validate_parameter
|
|
43
|
+
from .oai_models import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall, Choice, CompletionUsage
|
|
44
|
+
|
|
45
|
+
with optional_import_block():
|
|
46
|
+
from together import Together
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@register_llm_config
|
|
50
|
+
class TogetherLLMConfigEntry(LLMConfigEntry):
|
|
51
|
+
api_type: Literal["together"] = "together"
|
|
52
|
+
max_tokens: int = Field(default=512, ge=0)
|
|
53
|
+
stream: bool = False
|
|
54
|
+
temperature: Optional[float] = Field(default=None)
|
|
55
|
+
top_p: Optional[float] = Field(default=None)
|
|
56
|
+
top_k: Optional[int] = Field(default=None)
|
|
57
|
+
repetition_penalty: Optional[float] = Field(default=None)
|
|
58
|
+
presence_penalty: Optional[float] = Field(default=None, ge=-2, le=2)
|
|
59
|
+
frequency_penalty: Optional[float] = Field(default=None, ge=-2, le=2)
|
|
60
|
+
min_p: Optional[float] = Field(default=None, ge=0, le=1)
|
|
61
|
+
safety_model: Optional[str] = None
|
|
62
|
+
hide_tools: Literal["if_all_run", "if_any_run", "never"] = "never"
|
|
63
|
+
price: Optional[list[float]] = Field(default=None, min_length=2, max_length=2)
|
|
64
|
+
tool_choice: Optional[Union[str, dict[str, Union[str, dict[str, str]]]]] = (
|
|
65
|
+
None # dict is the tool to call: {"type": "function", "function": {"name": "my_function"}}
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def create_client(self):
|
|
69
|
+
raise NotImplementedError("TogetherLLMConfigEntry.create_client is not implemented.")
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class TogetherClient:
|
|
73
|
+
"""Client for Together.AI's API."""
|
|
74
|
+
|
|
75
|
+
def __init__(self, **kwargs):
|
|
76
|
+
"""Requires api_key or environment variable to be set
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
**kwargs: Additional keyword arguments to pass to the client.
|
|
80
|
+
"""
|
|
81
|
+
# Ensure we have the api_key upon instantiation
|
|
82
|
+
self.api_key = kwargs.get("api_key")
|
|
83
|
+
if not self.api_key:
|
|
84
|
+
self.api_key = os.getenv("TOGETHER_API_KEY")
|
|
85
|
+
|
|
86
|
+
if "response_format" in kwargs and kwargs["response_format"] is not None:
|
|
87
|
+
warnings.warn("response_format is not supported for Together.AI, it will be ignored.", UserWarning)
|
|
88
|
+
|
|
89
|
+
assert self.api_key, (
|
|
90
|
+
"Please include the api_key in your config list entry for Together.AI or set the TOGETHER_API_KEY env variable."
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
def message_retrieval(self, response) -> list:
|
|
94
|
+
"""Retrieve and return a list of strings or a list of Choice.Message from the response.
|
|
95
|
+
|
|
96
|
+
NOTE: if a list of Choice.Message is returned, it currently needs to contain the fields of OpenAI's ChatCompletion Message object,
|
|
97
|
+
since that is expected for function or tool calling in the rest of the codebase at the moment, unless a custom agent is being used.
|
|
98
|
+
"""
|
|
99
|
+
return [choice.message for choice in response.choices]
|
|
100
|
+
|
|
101
|
+
def cost(self, response) -> float:
|
|
102
|
+
return response.cost
|
|
103
|
+
|
|
104
|
+
@staticmethod
|
|
105
|
+
def get_usage(response) -> dict:
|
|
106
|
+
"""Return usage summary of the response using RESPONSE_USAGE_KEYS."""
|
|
107
|
+
# ... # pragma: no cover
|
|
108
|
+
return {
|
|
109
|
+
"prompt_tokens": response.usage.prompt_tokens,
|
|
110
|
+
"completion_tokens": response.usage.completion_tokens,
|
|
111
|
+
"total_tokens": response.usage.total_tokens,
|
|
112
|
+
"cost": response.cost,
|
|
113
|
+
"model": response.model,
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
def parse_params(self, params: dict[str, Any]) -> dict[str, Any]:
|
|
117
|
+
"""Loads the parameters for Together.AI API from the passed in parameters and returns a validated set. Checks types, ranges, and sets defaults"""
|
|
118
|
+
together_params = {}
|
|
119
|
+
|
|
120
|
+
# Check that we have what we need to use Together.AI's API
|
|
121
|
+
together_params["model"] = params.get("model")
|
|
122
|
+
assert together_params["model"], (
|
|
123
|
+
"Please specify the 'model' in your config list entry to nominate the Together.AI model to use."
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# Validate allowed Together.AI parameters
|
|
127
|
+
# https://github.com/togethercomputer/together-python/blob/94ffb30daf0ac3e078be986af7228f85f79bde99/src/together/resources/completions.py#L44
|
|
128
|
+
together_params["max_tokens"] = validate_parameter(params, "max_tokens", int, True, 512, (0, None), None)
|
|
129
|
+
together_params["stream"] = validate_parameter(params, "stream", bool, False, False, None, None)
|
|
130
|
+
together_params["temperature"] = validate_parameter(params, "temperature", (int, float), True, None, None, None)
|
|
131
|
+
together_params["top_p"] = validate_parameter(params, "top_p", (int, float), True, None, None, None)
|
|
132
|
+
together_params["top_k"] = validate_parameter(params, "top_k", int, True, None, None, None)
|
|
133
|
+
together_params["repetition_penalty"] = validate_parameter(
|
|
134
|
+
params, "repetition_penalty", float, True, None, None, None
|
|
135
|
+
)
|
|
136
|
+
together_params["presence_penalty"] = validate_parameter(
|
|
137
|
+
params, "presence_penalty", (int, float), True, None, (-2, 2), None
|
|
138
|
+
)
|
|
139
|
+
together_params["frequency_penalty"] = validate_parameter(
|
|
140
|
+
params, "frequency_penalty", (int, float), True, None, (-2, 2), None
|
|
141
|
+
)
|
|
142
|
+
together_params["min_p"] = validate_parameter(params, "min_p", (int, float), True, None, (0, 1), None)
|
|
143
|
+
together_params["safety_model"] = validate_parameter(
|
|
144
|
+
params, "safety_model", str, True, None, None, None
|
|
145
|
+
) # We won't enforce the available models as they are likely to change
|
|
146
|
+
|
|
147
|
+
# Check if they want to stream and use tools, which isn't currently supported (TODO)
|
|
148
|
+
if together_params["stream"] and "tools" in params:
|
|
149
|
+
warnings.warn(
|
|
150
|
+
"Streaming is not supported when using tools, streaming will be disabled.",
|
|
151
|
+
UserWarning,
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
together_params["stream"] = False
|
|
155
|
+
|
|
156
|
+
if "tool_choice" in params:
|
|
157
|
+
together_params["tool_choice"] = params["tool_choice"]
|
|
158
|
+
|
|
159
|
+
return together_params
|
|
160
|
+
|
|
161
|
+
@require_optional_import("together", "together")
|
|
162
|
+
def create(self, params: dict) -> ChatCompletion:
|
|
163
|
+
messages = params.get("messages", [])
|
|
164
|
+
|
|
165
|
+
# Convert AG2 messages to Together.AI messages
|
|
166
|
+
together_messages = oai_messages_to_together_messages(messages)
|
|
167
|
+
|
|
168
|
+
# Parse parameters to Together.AI API's parameters
|
|
169
|
+
together_params = self.parse_params(params)
|
|
170
|
+
|
|
171
|
+
# Add tools to the call if we have them and aren't hiding them
|
|
172
|
+
if "tools" in params:
|
|
173
|
+
hide_tools = validate_parameter(
|
|
174
|
+
params, "hide_tools", str, False, "never", None, ["if_all_run", "if_any_run", "never"]
|
|
175
|
+
)
|
|
176
|
+
if not should_hide_tools(together_messages, params["tools"], hide_tools):
|
|
177
|
+
together_params["tools"] = params["tools"]
|
|
178
|
+
|
|
179
|
+
together_params["messages"] = together_messages
|
|
180
|
+
|
|
181
|
+
# We use chat model by default
|
|
182
|
+
client = Together(api_key=self.api_key)
|
|
183
|
+
|
|
184
|
+
# Token counts will be returned
|
|
185
|
+
prompt_tokens = 0
|
|
186
|
+
completion_tokens = 0
|
|
187
|
+
total_tokens = 0
|
|
188
|
+
|
|
189
|
+
response = client.chat.completions.create(**together_params)
|
|
190
|
+
if together_params["stream"]:
|
|
191
|
+
# Read in the chunks as they stream
|
|
192
|
+
ans = ""
|
|
193
|
+
for chunk in response:
|
|
194
|
+
ans = ans + (chunk.choices[0].delta.content or "")
|
|
195
|
+
|
|
196
|
+
prompt_tokens = chunk.usage.prompt_tokens
|
|
197
|
+
completion_tokens = chunk.usage.completion_tokens
|
|
198
|
+
total_tokens = chunk.usage.total_tokens
|
|
199
|
+
else:
|
|
200
|
+
ans: str = response.choices[0].message.content
|
|
201
|
+
|
|
202
|
+
prompt_tokens = response.usage.prompt_tokens
|
|
203
|
+
completion_tokens = response.usage.completion_tokens
|
|
204
|
+
total_tokens = response.usage.total_tokens
|
|
205
|
+
|
|
206
|
+
if response.choices[0].finish_reason == "tool_calls":
|
|
207
|
+
together_finish = "tool_calls"
|
|
208
|
+
tool_calls = []
|
|
209
|
+
for tool_call in response.choices[0].message.tool_calls:
|
|
210
|
+
tool_calls.append(
|
|
211
|
+
ChatCompletionMessageToolCall(
|
|
212
|
+
id=tool_call.id,
|
|
213
|
+
function={"name": tool_call.function.name, "arguments": tool_call.function.arguments},
|
|
214
|
+
type="function",
|
|
215
|
+
)
|
|
216
|
+
)
|
|
217
|
+
else:
|
|
218
|
+
together_finish = "stop"
|
|
219
|
+
tool_calls = None
|
|
220
|
+
|
|
221
|
+
# 3. convert output
|
|
222
|
+
message = ChatCompletionMessage(
|
|
223
|
+
role="assistant",
|
|
224
|
+
content=response.choices[0].message.content,
|
|
225
|
+
function_call=None,
|
|
226
|
+
tool_calls=tool_calls,
|
|
227
|
+
)
|
|
228
|
+
choices = [Choice(finish_reason=together_finish, index=0, message=message)]
|
|
229
|
+
|
|
230
|
+
response_oai = ChatCompletion(
|
|
231
|
+
id=response.id,
|
|
232
|
+
model=together_params["model"],
|
|
233
|
+
created=int(time.time()),
|
|
234
|
+
object="chat.completion",
|
|
235
|
+
choices=choices,
|
|
236
|
+
usage=CompletionUsage(
|
|
237
|
+
prompt_tokens=prompt_tokens,
|
|
238
|
+
completion_tokens=completion_tokens,
|
|
239
|
+
total_tokens=total_tokens,
|
|
240
|
+
),
|
|
241
|
+
cost=calculate_together_cost(prompt_tokens, completion_tokens, together_params["model"]),
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
return response_oai
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def oai_messages_to_together_messages(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
248
|
+
"""Convert messages from OAI format to Together.AI format.
|
|
249
|
+
We correct for any specific role orders and types.
|
|
250
|
+
"""
|
|
251
|
+
together_messages = copy.deepcopy(messages)
|
|
252
|
+
|
|
253
|
+
# If we have a message with role='tool', which occurs when a function is executed, change it to 'user'
|
|
254
|
+
for msg in together_messages:
|
|
255
|
+
if "role" in msg and msg["role"] == "tool":
|
|
256
|
+
msg["role"] = "user"
|
|
257
|
+
|
|
258
|
+
return together_messages
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
# MODELS AND COSTS
|
|
262
|
+
chat_lang_code_model_sizes = {
|
|
263
|
+
"zero-one-ai/Yi-34B-Chat": 34,
|
|
264
|
+
"allenai/OLMo-7B-Instruct": 7,
|
|
265
|
+
"allenai/OLMo-7B-Twin-2T": 7,
|
|
266
|
+
"allenai/OLMo-7B": 7,
|
|
267
|
+
"Austism/chronos-hermes-13b": 13,
|
|
268
|
+
"deepseek-ai/deepseek-coder-33b-instruct": 33,
|
|
269
|
+
"deepseek-ai/deepseek-llm-67b-chat": 67,
|
|
270
|
+
"garage-bAInd/Platypus2-70B-instruct": 70,
|
|
271
|
+
"google/gemma-2b-it": 2,
|
|
272
|
+
"google/gemma-7b-it": 7,
|
|
273
|
+
"Gryphe/MythoMax-L2-13b": 13,
|
|
274
|
+
"lmsys/vicuna-13b-v1.5": 13,
|
|
275
|
+
"lmsys/vicuna-7b-v1.5": 7,
|
|
276
|
+
"codellama/CodeLlama-13b-Instruct-hf": 13,
|
|
277
|
+
"codellama/CodeLlama-34b-Instruct-hf": 34,
|
|
278
|
+
"codellama/CodeLlama-70b-Instruct-hf": 70,
|
|
279
|
+
"codellama/CodeLlama-7b-Instruct-hf": 7,
|
|
280
|
+
"meta-llama/Llama-2-70b-chat-hf": 70,
|
|
281
|
+
"meta-llama/Llama-2-13b-chat-hf": 13,
|
|
282
|
+
"meta-llama/Llama-2-7b-chat-hf": 7,
|
|
283
|
+
"meta-llama/Llama-3-8b-chat-hf": 8,
|
|
284
|
+
"meta-llama/Llama-3-70b-chat-hf": 70,
|
|
285
|
+
"mistralai/Mistral-7B-Instruct-v0.1": 7,
|
|
286
|
+
"mistralai/Mistral-7B-Instruct-v0.2": 7,
|
|
287
|
+
"mistralai/Mistral-7B-Instruct-v0.3": 7,
|
|
288
|
+
"NousResearch/Nous-Capybara-7B-V1p9": 7,
|
|
289
|
+
"NousResearch/Nous-Hermes-llama-2-7b": 7,
|
|
290
|
+
"NousResearch/Nous-Hermes-Llama2-13b": 13,
|
|
291
|
+
"NousResearch/Nous-Hermes-2-Yi-34B": 34,
|
|
292
|
+
"openchat/openchat-3.5-1210": 7,
|
|
293
|
+
"Open-Orca/Mistral-7B-OpenOrca": 7,
|
|
294
|
+
"Qwen/Qwen1.5-0.5B-Chat": 0.5,
|
|
295
|
+
"Qwen/Qwen1.5-1.8B-Chat": 1.8,
|
|
296
|
+
"Qwen/Qwen1.5-4B-Chat": 4,
|
|
297
|
+
"Qwen/Qwen1.5-7B-Chat": 7,
|
|
298
|
+
"Qwen/Qwen1.5-14B-Chat": 14,
|
|
299
|
+
"Qwen/Qwen1.5-32B-Chat": 32,
|
|
300
|
+
"Qwen/Qwen1.5-72B-Chat": 72,
|
|
301
|
+
"Qwen/Qwen1.5-110B-Chat": 110,
|
|
302
|
+
"Qwen/Qwen2-72B-Instruct": 72,
|
|
303
|
+
"snorkelai/Snorkel-Mistral-PairRM-DPO": 7,
|
|
304
|
+
"togethercomputer/alpaca-7b": 7,
|
|
305
|
+
"teknium/OpenHermes-2-Mistral-7B": 7,
|
|
306
|
+
"teknium/OpenHermes-2p5-Mistral-7B": 7,
|
|
307
|
+
"togethercomputer/Llama-2-7B-32K-Instruct": 7,
|
|
308
|
+
"togethercomputer/RedPajama-INCITE-Chat-3B-v1": 3,
|
|
309
|
+
"togethercomputer/RedPajama-INCITE-7B-Chat": 7,
|
|
310
|
+
"togethercomputer/StripedHyena-Nous-7B": 7,
|
|
311
|
+
"Undi95/ReMM-SLERP-L2-13B": 13,
|
|
312
|
+
"Undi95/Toppy-M-7B": 7,
|
|
313
|
+
"WizardLM/WizardLM-13B-V1.2": 13,
|
|
314
|
+
"upstage/SOLAR-10.7B-Instruct-v1.0": 11,
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
# Cost per million tokens based on up to X Billion parameters, e.g. up 4B is $0.1/million
|
|
318
|
+
chat_lang_code_model_costs = {4: 0.1, 8: 0.2, 21: 0.3, 41: 0.8, 80: 0.9, 110: 1.8}
|
|
319
|
+
|
|
320
|
+
mixture_model_sizes = {
|
|
321
|
+
"cognitivecomputations/dolphin-2.5-mixtral-8x7b": 56,
|
|
322
|
+
"databricks/dbrx-instruct": 132,
|
|
323
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1": 47,
|
|
324
|
+
"mistralai/Mixtral-8x22B-Instruct-v0.1": 141,
|
|
325
|
+
"NousResearch/Nous-Hermes-2-Mistral-7B-DPO": 7,
|
|
326
|
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": 47,
|
|
327
|
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT": 47,
|
|
328
|
+
"Snowflake/snowflake-arctic-instruct": 480,
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
# Cost per million tokens based on up to X Billion parameters, e.g. up 56B is $0.6/million
|
|
332
|
+
mixture_costs = {56: 0.6, 176: 1.2, 480: 2.4}
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def calculate_together_cost(input_tokens: int, output_tokens: int, model_name: str) -> float:
|
|
336
|
+
"""Cost calculation for inference"""
|
|
337
|
+
if model_name in chat_lang_code_model_sizes or model_name in mixture_model_sizes:
|
|
338
|
+
cost_per_mil = 0
|
|
339
|
+
|
|
340
|
+
# Chat, Language, Code models
|
|
341
|
+
if model_name in chat_lang_code_model_sizes:
|
|
342
|
+
size_in_b = chat_lang_code_model_sizes[model_name]
|
|
343
|
+
|
|
344
|
+
for top_size in chat_lang_code_model_costs:
|
|
345
|
+
if size_in_b <= top_size:
|
|
346
|
+
cost_per_mil = chat_lang_code_model_costs[top_size]
|
|
347
|
+
break
|
|
348
|
+
|
|
349
|
+
else:
|
|
350
|
+
# Mixture-of-experts
|
|
351
|
+
size_in_b = mixture_model_sizes[model_name]
|
|
352
|
+
|
|
353
|
+
for top_size in mixture_costs:
|
|
354
|
+
if size_in_b <= top_size:
|
|
355
|
+
cost_per_mil = mixture_costs[top_size]
|
|
356
|
+
break
|
|
357
|
+
|
|
358
|
+
if cost_per_mil == 0:
|
|
359
|
+
warnings.warn("Model size doesn't align with cost structure.", UserWarning)
|
|
360
|
+
|
|
361
|
+
return cost_per_mil * ((input_tokens + output_tokens) / 1e6)
|
|
362
|
+
|
|
363
|
+
else:
|
|
364
|
+
# Model is not in our list of models, can't determine the cost
|
|
365
|
+
warnings.warn(
|
|
366
|
+
"The model isn't catered for costing, to apply costs you can use the 'price' key on your config_list.",
|
|
367
|
+
UserWarning,
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
return 0
|