ag2 0.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ag2-0.10.2.dist-info/METADATA +819 -0
- ag2-0.10.2.dist-info/RECORD +423 -0
- ag2-0.10.2.dist-info/WHEEL +4 -0
- ag2-0.10.2.dist-info/licenses/LICENSE +201 -0
- ag2-0.10.2.dist-info/licenses/NOTICE.md +19 -0
- autogen/__init__.py +88 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +426 -0
- autogen/_website/generate_mkdocs.py +1216 -0
- autogen/_website/notebook_processor.py +475 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +413 -0
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +86 -0
- autogen/a2a/client.py +357 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +207 -0
- autogen/agentchat/__init__.py +47 -0
- autogen/agentchat/agent.py +180 -0
- autogen/agentchat/assistant_agent.py +86 -0
- autogen/agentchat/chat.py +325 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +432 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +578 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +215 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +514 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +334 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +167 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +263 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +189 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +268 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +195 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +319 -0
- autogen/agentchat/contrib/rag/query_engine.py +76 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +59 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +704 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +200 -0
- autogen/agentchat/contrib/swarm_agent.py +1404 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +224 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +316 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +405 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +551 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +927 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +304 -0
- autogen/agentchat/conversable_agent.py +4307 -0
- autogen/agentchat/group/__init__.py +67 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +39 -0
- autogen/agentchat/group/context_variables.py +182 -0
- autogen/agentchat/group/events/transition_events.py +111 -0
- autogen/agentchat/group/group_tool_executor.py +324 -0
- autogen/agentchat/group/group_utils.py +659 -0
- autogen/agentchat/group/guardrails.py +179 -0
- autogen/agentchat/group/handoffs.py +303 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +291 -0
- autogen/agentchat/group/on_condition.py +55 -0
- autogen/agentchat/group/on_context_condition.py +51 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +160 -0
- autogen/agentchat/group/patterns/manual.py +177 -0
- autogen/agentchat/group/patterns/pattern.py +295 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +24 -0
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +140 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/function_target.py +245 -0
- autogen/agentchat/group/targets/group_chat_target.py +133 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +424 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1832 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +191 -0
- autogen/agentchat/realtime/experimental/function_observer.py +84 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +533 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +114 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +74 -0
- autogen/agents/contrib/time/time_tool_agent.py +52 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +301 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +113 -0
- autogen/agents/experimental/document_agent/document_agent.py +643 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +376 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +214 -0
- autogen/agents/experimental/document_agent/parser_utils.py +134 -0
- autogen/agents/experimental/document_agent/url_utils.py +417 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +76 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +70 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +88 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +71 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +97 -0
- autogen/cache/in_memory_cache.py +54 -0
- autogen/cache/redis_cache.py +119 -0
- autogen/code_utils.py +598 -0
- autogen/coding/__init__.py +30 -0
- autogen/coding/base.py +120 -0
- autogen/coding/docker_commandline_code_executor.py +283 -0
- autogen/coding/factory.py +56 -0
- autogen/coding/func_with_reqs.py +203 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +160 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +224 -0
- autogen/coding/jupyter/jupyter_code_executor.py +154 -0
- autogen/coding/jupyter/local_jupyter_server.py +164 -0
- autogen/coding/local_commandline_code_executor.py +341 -0
- autogen/coding/markdown_code_extractor.py +44 -0
- autogen/coding/utils.py +55 -0
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/doc_utils.py +35 -0
- autogen/environments/__init__.py +10 -0
- autogen/environments/docker_python_environment.py +365 -0
- autogen/environments/python_environment.py +125 -0
- autogen/environments/system_python_environment.py +85 -0
- autogen/environments/venv_python_environment.py +220 -0
- autogen/environments/working_directory.py +74 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1016 -0
- autogen/events/base_event.py +100 -0
- autogen/events/client_events.py +168 -0
- autogen/events/helpers.py +44 -0
- autogen/events/print_event.py +45 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +75 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +206 -0
- autogen/fast_depends/core/model.py +527 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +30 -0
- autogen/fast_depends/dependencies/provider.py +40 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +272 -0
- autogen/fast_depends/utils.py +177 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +173 -0
- autogen/import_utils.py +539 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +156 -0
- autogen/interop/langchain/langchain_tool.py +78 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +178 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +172 -0
- autogen/interop/registry.py +70 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +61 -0
- autogen/io/run_response.py +294 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +214 -0
- autogen/json_utils.py +42 -0
- autogen/llm_clients/MIGRATION_TO_V2.md +782 -0
- autogen/llm_clients/__init__.py +77 -0
- autogen/llm_clients/client_v2.py +122 -0
- autogen/llm_clients/models/__init__.py +55 -0
- autogen/llm_clients/models/content_blocks.py +389 -0
- autogen/llm_clients/models/unified_message.py +145 -0
- autogen/llm_clients/models/unified_response.py +83 -0
- autogen/llm_clients/openai_completions_client.py +444 -0
- autogen/llm_config/__init__.py +11 -0
- autogen/llm_config/client.py +59 -0
- autogen/llm_config/config.py +461 -0
- autogen/llm_config/entry.py +169 -0
- autogen/llm_config/types.py +37 -0
- autogen/llm_config/utils.py +223 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +129 -0
- autogen/logger/file_logger.py +262 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +524 -0
- autogen/math_utils.py +338 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/helpers.py +45 -0
- autogen/mcp/mcp_client.py +349 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +62 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +577 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +166 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +110 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +399 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +946 -0
- autogen/messages/base_message.py +108 -0
- autogen/messages/client_messages.py +172 -0
- autogen/messages/print_message.py +48 -0
- autogen/oai/__init__.py +61 -0
- autogen/oai/anthropic.py +1516 -0
- autogen/oai/bedrock.py +800 -0
- autogen/oai/cerebras.py +302 -0
- autogen/oai/client.py +1658 -0
- autogen/oai/client_utils.py +196 -0
- autogen/oai/cohere.py +494 -0
- autogen/oai/gemini.py +1045 -0
- autogen/oai/gemini_types.py +156 -0
- autogen/oai/groq.py +319 -0
- autogen/oai/mistral.py +311 -0
- autogen/oai/oai_models/__init__.py +23 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +86 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +97 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +60 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +62 -0
- autogen/oai/oai_models/completion_usage.py +59 -0
- autogen/oai/ollama.py +657 -0
- autogen/oai/openai_responses.py +451 -0
- autogen/oai/openai_utils.py +897 -0
- autogen/oai/together.py +387 -0
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +197 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/retrieve_utils.py +490 -0
- autogen/runtime_logging.py +161 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/token_count_utils.py +280 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +40 -0
- autogen/tools/dependency_injection.py +249 -0
- autogen/tools/experimental/__init__.py +54 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +154 -0
- autogen/tools/experimental/code_execution/__init__.py +7 -0
- autogen/tools/experimental/code_execution/python_code_execution.py +86 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +150 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +329 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +103 -0
- autogen/tools/experimental/firecrawl/__init__.py +7 -0
- autogen/tools/experimental/firecrawl/firecrawl_tool.py +836 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +284 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +385 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +271 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +249 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1311 -0
- autogen/tools/experimental/searxng/__init__.py +7 -0
- autogen/tools/experimental/searxng/searxng_search.py +142 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +176 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +120 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +284 -0
- autogen/tools/function_utils.py +412 -0
- autogen/tools/tool.py +188 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +72 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from typing import Any
|
|
6
|
+
from uuid import uuid4
|
|
7
|
+
|
|
8
|
+
from pydantic_core import to_json
|
|
9
|
+
|
|
10
|
+
from autogen.events.agent_events import FunctionCall
|
|
11
|
+
from autogen.events.agent_events import ToolCall as RawToolCall
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ToolCall:
|
|
15
|
+
"""Represents a tool call with a specified tool name and arguments.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
tool_name: Tool name to call. Tool should be rigestered in Agent you send message.
|
|
19
|
+
arguments: keyword arguments to pass to the tool.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self, tool_name: str, /, **arguments: Any) -> None:
|
|
23
|
+
self.tool_message = RawToolCall(
|
|
24
|
+
id=f"call_{uuid4()}",
|
|
25
|
+
type="function",
|
|
26
|
+
function=FunctionCall(name=tool_name, arguments=to_json(arguments).decode()),
|
|
27
|
+
).model_dump()
|
|
28
|
+
|
|
29
|
+
def to_message(self) -> dict[str, Any]:
|
|
30
|
+
"""Convert the tool call to a message format suitable for API calls.
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
A dictionary containing the tool call in message format,
|
|
34
|
+
ready to be used in API requests or message queues.
|
|
35
|
+
"""
|
|
36
|
+
return tools_message(self)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def tools_message(*tool_calls: ToolCall) -> dict[str, Any]:
|
|
40
|
+
"""Convert multiple tool calls into a message format suitable for API calls.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
*tool_calls: One or more ToolCall objects to convert.
|
|
44
|
+
"""
|
|
45
|
+
return {"content": None, "tool_calls": [c.tool_message for c in tool_calls]}
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from collections.abc import Iterable
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from types import TracebackType
|
|
8
|
+
from typing import Any, TypedDict
|
|
9
|
+
|
|
10
|
+
from autogen import ConversableAgent, ModelClient
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TestAgent:
|
|
14
|
+
"""A context manager for testing ConversableAgent instances with predefined messages.
|
|
15
|
+
|
|
16
|
+
This class allows you to temporarily replace an agent's LLM client with a fake client
|
|
17
|
+
that returns predefined messages. It's useful for testing agent behavior without
|
|
18
|
+
making actual API calls.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
agent (ConversableAgent): The agent to be tested.
|
|
22
|
+
messages (Iterable[str | dict[str, Any]]): An iterable of messages to be returned by the fake client.
|
|
23
|
+
suppress_messages_end (bool): Whether to suppress StopIteration exceptions from the fake client.
|
|
24
|
+
|
|
25
|
+
Example:
|
|
26
|
+
>>> with TestAgent(agent, ["Hello", "How are you?"]) as test_agent:
|
|
27
|
+
... # Agent will respond with "Hello" then "How are you?"
|
|
28
|
+
... pass
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
agent: ConversableAgent,
|
|
34
|
+
messages: Iterable[str | dict[str, Any]] = (),
|
|
35
|
+
*,
|
|
36
|
+
suppress_messages_end: bool = False,
|
|
37
|
+
) -> None:
|
|
38
|
+
self.agent = agent
|
|
39
|
+
|
|
40
|
+
self.__original_human_input = self.agent.human_input_mode
|
|
41
|
+
|
|
42
|
+
self.__original_client = agent.client
|
|
43
|
+
self.__fake_client = FakeClient(messages)
|
|
44
|
+
|
|
45
|
+
self.suppress_messages_end = suppress_messages_end
|
|
46
|
+
|
|
47
|
+
def __enter__(self) -> None:
|
|
48
|
+
self.agent.human_input_mode = "NEVER"
|
|
49
|
+
|
|
50
|
+
self.__original_client = self.agent.client
|
|
51
|
+
self.agent.client = self.__fake_client # type: ignore[assignment]
|
|
52
|
+
return None
|
|
53
|
+
|
|
54
|
+
def __exit__(
|
|
55
|
+
self,
|
|
56
|
+
exc_type: type[BaseException] | None,
|
|
57
|
+
exc_value: BaseException | None,
|
|
58
|
+
traceback: TracebackType | None,
|
|
59
|
+
) -> None | bool:
|
|
60
|
+
self.agent.human_input_mode = self.__original_human_input
|
|
61
|
+
|
|
62
|
+
self.agent.client = self.__original_client
|
|
63
|
+
|
|
64
|
+
if isinstance(exc_value, StopIteration):
|
|
65
|
+
# suppress fake client iterator ending
|
|
66
|
+
return self.suppress_messages_end
|
|
67
|
+
return None
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class FakeClient:
|
|
71
|
+
def __init__(self, messages: Iterable[str | dict[str, Any]]) -> None:
|
|
72
|
+
# do not unpack messages to allow endless generators pass
|
|
73
|
+
self.choice_iterator = iter(map(convert_fake_message, messages))
|
|
74
|
+
|
|
75
|
+
self.total_usage_summary = None
|
|
76
|
+
self.actual_usage_summary = None
|
|
77
|
+
|
|
78
|
+
def create(self, **params: Any) -> ModelClient.ModelClientResponseProtocol:
|
|
79
|
+
choice = next(self.choice_iterator)
|
|
80
|
+
return FakeClientResponse(choices=[choice])
|
|
81
|
+
|
|
82
|
+
def extract_text_or_completion_object(
|
|
83
|
+
self,
|
|
84
|
+
response: "FakeClientResponse",
|
|
85
|
+
) -> list[str] | list["FakeMessage"]:
|
|
86
|
+
return response.message_retrieval_function()
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def convert_fake_message(message: str | dict[str, Any]) -> "FakeChoice":
|
|
90
|
+
if isinstance(message, str):
|
|
91
|
+
return FakeChoice({"content": message})
|
|
92
|
+
else:
|
|
93
|
+
return FakeChoice({"role": "assistant", **message}) # type: ignore[typeddict-item]
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class FakeMessage(TypedDict):
|
|
97
|
+
content: str | dict[str, Any]
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
@dataclass
|
|
101
|
+
class FakeChoice(ModelClient.ModelClientResponseProtocol.Choice):
|
|
102
|
+
message: FakeMessage # type: ignore[assignment]
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
@dataclass
|
|
106
|
+
class FakeClientResponse(ModelClient.ModelClientResponseProtocol):
|
|
107
|
+
choices: list[FakeChoice]
|
|
108
|
+
model: str = "fake-model"
|
|
109
|
+
|
|
110
|
+
def message_retrieval_function(self) -> list[str] | list[FakeMessage]:
|
|
111
|
+
return [c.message for c in self.choices]
|
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
import re
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
import tiktoken
|
|
14
|
+
|
|
15
|
+
from .agentchat.contrib.img_utils import num_tokens_from_gpt_image
|
|
16
|
+
from .import_utils import optional_import_block
|
|
17
|
+
|
|
18
|
+
# if PIL is not imported, we will redefine num_tokens_from_gpt_image to return 0 tokens for images
|
|
19
|
+
# Otherwise, it would raise an ImportError
|
|
20
|
+
with optional_import_block() as result:
|
|
21
|
+
import PIL # noqa: F401
|
|
22
|
+
|
|
23
|
+
pil_imported = result.is_successful
|
|
24
|
+
if not pil_imported:
|
|
25
|
+
|
|
26
|
+
def num_tokens_from_gpt_image(*args, **kwargs):
|
|
27
|
+
return 0
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
logger.img_dependency_warned = False # member variable to track if the warning has been logged
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def get_max_token_limit(model: str = "gpt-3.5-turbo-0613") -> int:
|
|
35
|
+
# Handle common azure model names/aliases
|
|
36
|
+
model = re.sub(r"^gpt\-?35", "gpt-3.5", model)
|
|
37
|
+
model = re.sub(r"^gpt4", "gpt-4", model)
|
|
38
|
+
|
|
39
|
+
max_token_limit = {
|
|
40
|
+
"gpt-3.5-turbo": 16385,
|
|
41
|
+
"gpt-3.5-turbo-0125": 16385,
|
|
42
|
+
"gpt-3.5-turbo-0301": 4096,
|
|
43
|
+
"gpt-3.5-turbo-0613": 4096,
|
|
44
|
+
"gpt-3.5-turbo-instruct": 4096,
|
|
45
|
+
"gpt-3.5-turbo-16k": 16385,
|
|
46
|
+
"gpt-3.5-turbo-16k-0613": 16385,
|
|
47
|
+
"gpt-3.5-turbo-1106": 16385,
|
|
48
|
+
"gpt-4": 8192,
|
|
49
|
+
"gpt-4-turbo": 128000,
|
|
50
|
+
"gpt-4-turbo-2024-04-09": 128000,
|
|
51
|
+
"gpt-4-32k": 32768,
|
|
52
|
+
"gpt-4-32k-0314": 32768, # deprecate in Sep
|
|
53
|
+
"gpt-4-0314": 8192, # deprecate in Sep
|
|
54
|
+
"gpt-4-0613": 8192,
|
|
55
|
+
"gpt-4-32k-0613": 32768,
|
|
56
|
+
"gpt-4-1106-preview": 128000,
|
|
57
|
+
"gpt-4-0125-preview": 128000,
|
|
58
|
+
"gpt-4-turbo-preview": 128000,
|
|
59
|
+
"gpt-4-vision-preview": 128000,
|
|
60
|
+
"gpt-4o": 128000,
|
|
61
|
+
"gpt-4o-2024-05-13": 128000,
|
|
62
|
+
"gpt-4o-2024-08-06": 128000,
|
|
63
|
+
"gpt-4o-2024-11-20": 128000,
|
|
64
|
+
"gpt-4o-mini": 128000,
|
|
65
|
+
"gpt-4o-mini-2024-07-18": 128000,
|
|
66
|
+
"gpt-5": 128000,
|
|
67
|
+
"gpt-5-mini": 128000,
|
|
68
|
+
"gpt-5-nano": 128000,
|
|
69
|
+
"gpt-5-pro": 128000,
|
|
70
|
+
"gpt-5-search-api": 128000,
|
|
71
|
+
"gpt-5.1": 128000,
|
|
72
|
+
"gpt-5.1-chat-latest": 128000,
|
|
73
|
+
"gpt-5.1-codex": 128000,
|
|
74
|
+
"gpt-5.1-codex-mini": 128000,
|
|
75
|
+
"codex-mini-latest": 128000,
|
|
76
|
+
}
|
|
77
|
+
return max_token_limit[model]
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def percentile_used(input, model="gpt-3.5-turbo-0613"):
|
|
81
|
+
return count_token(input) / get_max_token_limit(model)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def token_left(input: str | list[str] | dict[str, Any], model="gpt-3.5-turbo-0613") -> int:
|
|
85
|
+
"""Count number of tokens left for an OpenAI model.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
input: (str, list, dict): Input to the model.
|
|
89
|
+
model: (str): Model name.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
int: Number of tokens left that the model can use for completion.
|
|
93
|
+
"""
|
|
94
|
+
return get_max_token_limit(model) - count_token(input, model=model)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def count_token(input: str | list[str] | dict[str, Any], model: str = "gpt-3.5-turbo-0613") -> int:
|
|
98
|
+
"""Count number of tokens used by an OpenAI model.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
input: (str, list, dict): Input to the model.
|
|
102
|
+
model: (str): Model name.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
int: Number of tokens from the input.
|
|
106
|
+
"""
|
|
107
|
+
if isinstance(input, str):
|
|
108
|
+
return _num_token_from_text(input, model=model)
|
|
109
|
+
elif isinstance(input, (list, dict)):
|
|
110
|
+
return _num_token_from_messages(input, model=model)
|
|
111
|
+
else:
|
|
112
|
+
raise ValueError(f"input must be str, list or dict, but we got {type(input)}")
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _num_token_from_text(text: str, model: str = "gpt-3.5-turbo-0613"):
|
|
116
|
+
"""Return the number of tokens used by a string."""
|
|
117
|
+
try:
|
|
118
|
+
encoding = tiktoken.encoding_for_model(model)
|
|
119
|
+
except KeyError:
|
|
120
|
+
logger.warning(f"Model {model} not found. Using cl100k_base encoding.")
|
|
121
|
+
encoding = tiktoken.get_encoding("cl100k_base")
|
|
122
|
+
return len(encoding.encode(text))
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def _num_token_from_messages(messages: list[str] | dict[str, Any], model="gpt-3.5-turbo-0613"):
|
|
126
|
+
"""Return the number of tokens used by a list of messages.
|
|
127
|
+
|
|
128
|
+
retrieved from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb/
|
|
129
|
+
"""
|
|
130
|
+
if isinstance(messages, dict):
|
|
131
|
+
messages = [messages]
|
|
132
|
+
|
|
133
|
+
try:
|
|
134
|
+
encoding = tiktoken.encoding_for_model(model)
|
|
135
|
+
except KeyError:
|
|
136
|
+
logger.warning(f"Model {model} not found. Using cl100k_base encoding.")
|
|
137
|
+
encoding = tiktoken.get_encoding("cl100k_base")
|
|
138
|
+
if model in {
|
|
139
|
+
"gpt-3.5-turbo-0613",
|
|
140
|
+
"gpt-3.5-turbo-16k-0613",
|
|
141
|
+
"gpt-4-0314",
|
|
142
|
+
"gpt-4-32k-0314",
|
|
143
|
+
"gpt-4-0613",
|
|
144
|
+
"gpt-4-32k-0613",
|
|
145
|
+
"gpt-4-turbo-preview",
|
|
146
|
+
"gpt-4-vision-preview",
|
|
147
|
+
"gpt-4o",
|
|
148
|
+
"gpt-4o-2024-05-13",
|
|
149
|
+
"gpt-4o-2024-08-06",
|
|
150
|
+
"gpt-4o-2024-11-20",
|
|
151
|
+
"gpt-4o-mini",
|
|
152
|
+
"gpt-4o-mini-2024-07-18",
|
|
153
|
+
}:
|
|
154
|
+
tokens_per_message = 3
|
|
155
|
+
tokens_per_name = 1
|
|
156
|
+
elif model == "gpt-3.5-turbo-0301":
|
|
157
|
+
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
|
158
|
+
tokens_per_name = -1 # if there's a name, the role is omitted
|
|
159
|
+
elif "gpt-3.5-turbo" in model:
|
|
160
|
+
logger.info("gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
|
|
161
|
+
return _num_token_from_messages(messages, model="gpt-3.5-turbo-0613")
|
|
162
|
+
elif "gpt-4" in model:
|
|
163
|
+
logger.info("gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
|
164
|
+
return _num_token_from_messages(messages, model="gpt-4-0613")
|
|
165
|
+
elif "gpt-5" in model:
|
|
166
|
+
logger.info("gpt-5 may update over time. Returning num tokens assuming gpt-4-0613.")
|
|
167
|
+
return _num_token_from_messages(messages, model="gpt-4-0613")
|
|
168
|
+
elif "gemini" in model:
|
|
169
|
+
logger.info("Gemini is not supported in tiktoken. Returning num tokens assuming gpt-4-0613.")
|
|
170
|
+
return _num_token_from_messages(messages, model="gpt-4-0613")
|
|
171
|
+
elif "claude" in model:
|
|
172
|
+
logger.info("Claude is not supported in tiktoken. Returning num tokens assuming gpt-4-0613.")
|
|
173
|
+
return _num_token_from_messages(messages, model="gpt-4-0613")
|
|
174
|
+
elif "mistral-" in model or "mixtral-" in model:
|
|
175
|
+
logger.info("Mistral.AI models are not supported in tiktoken. Returning num tokens assuming gpt-4-0613.")
|
|
176
|
+
return _num_token_from_messages(messages, model="gpt-4-0613")
|
|
177
|
+
elif "deepseek" in model:
|
|
178
|
+
logger.info("Deepseek models are not supported in tiktoken. Returning num tokens assuming gpt-4-0613.")
|
|
179
|
+
return _num_token_from_messages(messages, model="gpt-4-0613")
|
|
180
|
+
else:
|
|
181
|
+
raise NotImplementedError(
|
|
182
|
+
f"""_num_token_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
|
183
|
+
)
|
|
184
|
+
num_tokens = 0
|
|
185
|
+
for message in messages:
|
|
186
|
+
num_tokens += tokens_per_message
|
|
187
|
+
for key, value in message.items():
|
|
188
|
+
if value is None:
|
|
189
|
+
continue
|
|
190
|
+
|
|
191
|
+
# handle content if images are in GPT-4-vision
|
|
192
|
+
if key == "content" and isinstance(value, list):
|
|
193
|
+
for part in value:
|
|
194
|
+
if not isinstance(part, dict) or "type" not in part:
|
|
195
|
+
continue
|
|
196
|
+
if part["type"] == "text":
|
|
197
|
+
num_tokens += len(encoding.encode(part["text"]))
|
|
198
|
+
if "image_url" in part:
|
|
199
|
+
assert "url" in part["image_url"]
|
|
200
|
+
if not pil_imported and not logger.img_dependency_warned:
|
|
201
|
+
logger.warning(
|
|
202
|
+
"img_utils or PIL not imported. Skipping image token count."
|
|
203
|
+
"Please install autogen with [lmm] option.",
|
|
204
|
+
)
|
|
205
|
+
logger.img_dependency_warned = True
|
|
206
|
+
is_low_quality = "detail" in part["image_url"] and part["image_url"]["detail"] == "low"
|
|
207
|
+
try:
|
|
208
|
+
num_tokens += num_tokens_from_gpt_image(
|
|
209
|
+
image_data=part["image_url"]["url"], model=model, low_quality=is_low_quality
|
|
210
|
+
)
|
|
211
|
+
except ValueError as e:
|
|
212
|
+
logger.warning(f"Error in num_tokens_from_gpt_image: {e}")
|
|
213
|
+
continue
|
|
214
|
+
|
|
215
|
+
# function calls
|
|
216
|
+
if not isinstance(value, str):
|
|
217
|
+
try:
|
|
218
|
+
value = json.dumps(value)
|
|
219
|
+
except TypeError:
|
|
220
|
+
logger.warning(
|
|
221
|
+
f"Value {value} is not a string and cannot be converted to json. It is a type: {type(value)} Skipping."
|
|
222
|
+
)
|
|
223
|
+
continue
|
|
224
|
+
|
|
225
|
+
num_tokens += len(encoding.encode(value))
|
|
226
|
+
if key == "name":
|
|
227
|
+
num_tokens += tokens_per_name
|
|
228
|
+
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
|
229
|
+
return num_tokens
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def num_tokens_from_functions(functions, model="gpt-3.5-turbo-0613") -> int:
|
|
233
|
+
"""Return the number of tokens used by a list of functions.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
functions: (list): List of function descriptions that will be passed in model.
|
|
237
|
+
model: (str): Model name.
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
int: Number of tokens from the function descriptions.
|
|
241
|
+
"""
|
|
242
|
+
try:
|
|
243
|
+
encoding = tiktoken.encoding_for_model(model)
|
|
244
|
+
except KeyError:
|
|
245
|
+
logger.warning(f"Model {model} not found. Using cl100k_base encoding.")
|
|
246
|
+
encoding = tiktoken.get_encoding("cl100k_base")
|
|
247
|
+
|
|
248
|
+
num_tokens = 0
|
|
249
|
+
for function in functions:
|
|
250
|
+
function_tokens = len(encoding.encode(function["name"]))
|
|
251
|
+
function_tokens += len(encoding.encode(function["description"]))
|
|
252
|
+
function_tokens -= 2
|
|
253
|
+
if "parameters" in function:
|
|
254
|
+
parameters = function["parameters"]
|
|
255
|
+
if "properties" in parameters:
|
|
256
|
+
for properties_key in parameters["properties"]:
|
|
257
|
+
function_tokens += len(encoding.encode(properties_key))
|
|
258
|
+
v = parameters["properties"][properties_key]
|
|
259
|
+
for field in v:
|
|
260
|
+
if field == "type":
|
|
261
|
+
function_tokens += 2
|
|
262
|
+
function_tokens += len(encoding.encode(v["type"]))
|
|
263
|
+
elif field == "description":
|
|
264
|
+
function_tokens += 2
|
|
265
|
+
function_tokens += len(encoding.encode(v["description"]))
|
|
266
|
+
elif field == "enum":
|
|
267
|
+
function_tokens -= 3
|
|
268
|
+
for o in v["enum"]:
|
|
269
|
+
function_tokens += 3
|
|
270
|
+
function_tokens += len(encoding.encode(o))
|
|
271
|
+
else:
|
|
272
|
+
logger.warning(f"Not supported field {field}")
|
|
273
|
+
function_tokens += 11
|
|
274
|
+
if len(parameters["properties"]) == 0:
|
|
275
|
+
function_tokens -= 2
|
|
276
|
+
|
|
277
|
+
num_tokens += function_tokens
|
|
278
|
+
|
|
279
|
+
num_tokens += 12
|
|
280
|
+
return num_tokens
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from .dependency_injection import BaseContext, ChatContext, Depends
|
|
6
|
+
from .function_utils import get_function_schema, load_basemodels_if_needed, serialize_to_str
|
|
7
|
+
from .tool import Tool, tool
|
|
8
|
+
from .toolkit import Toolkit
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"BaseContext",
|
|
12
|
+
"ChatContext",
|
|
13
|
+
"Depends",
|
|
14
|
+
"Tool",
|
|
15
|
+
"Toolkit",
|
|
16
|
+
"get_function_schema",
|
|
17
|
+
"load_basemodels_if_needed",
|
|
18
|
+
"serialize_to_str",
|
|
19
|
+
"tool",
|
|
20
|
+
]
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from typing import Annotated
|
|
7
|
+
|
|
8
|
+
from autogen.tools import Tool
|
|
9
|
+
|
|
10
|
+
from ....doc_utils import export_module
|
|
11
|
+
|
|
12
|
+
__all__ = ["TimeTool"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@export_module("autogen.tools.contrib") # API Reference: autogen > tools > contrib > TimeAgent
|
|
16
|
+
class TimeTool(Tool):
|
|
17
|
+
"""Outputs the current date and time of the computer."""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
*,
|
|
22
|
+
date_time_format: str = "%Y-%m-%d %H:%M:%S", # This is a parameter that is unique to this tool
|
|
23
|
+
):
|
|
24
|
+
"""Get the date and time of the computer.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
date_time_format (str, optional): The format of the date and time. Defaults to "%Y-%m-%d %H:%M:%S".
|
|
28
|
+
"""
|
|
29
|
+
self._date_time_format = date_time_format
|
|
30
|
+
|
|
31
|
+
async def get_date_and_time(
|
|
32
|
+
date_time_format: Annotated[str, "date/time Python format"] = self._date_time_format,
|
|
33
|
+
) -> str:
|
|
34
|
+
return datetime.now().strftime(date_time_format)
|
|
35
|
+
|
|
36
|
+
super().__init__(
|
|
37
|
+
name="date_time",
|
|
38
|
+
description="Get the current computer's date and time.",
|
|
39
|
+
func_or_tool=get_date_and_time,
|
|
40
|
+
)
|