ag2 0.9.1a1__py3-none-any.whl → 0.9.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/METADATA +272 -75
- ag2-0.9.2.dist-info/RECORD +406 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/WHEEL +1 -2
- autogen/__init__.py +89 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +427 -0
- autogen/_website/generate_mkdocs.py +1174 -0
- autogen/_website/notebook_processor.py +476 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +412 -0
- autogen/agentchat/__init__.py +44 -0
- autogen/agentchat/agent.py +182 -0
- autogen/agentchat/assistant_agent.py +85 -0
- autogen/agentchat/chat.py +309 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +429 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +566 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +187 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
- autogen/agentchat/contrib/rag/query_engine.py +74 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
- autogen/agentchat/contrib/swarm_agent.py +1425 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +232 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +303 -0
- autogen/agentchat/conversable_agent.py +4023 -0
- autogen/agentchat/group/__init__.py +64 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +41 -0
- autogen/agentchat/group/context_variables.py +192 -0
- autogen/agentchat/group/group_tool_executor.py +202 -0
- autogen/agentchat/group/group_utils.py +591 -0
- autogen/agentchat/group/handoffs.py +244 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +237 -0
- autogen/agentchat/group/on_condition.py +58 -0
- autogen/agentchat/group/on_context_condition.py +54 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +159 -0
- autogen/agentchat/group/patterns/manual.py +176 -0
- autogen/agentchat/group/patterns/pattern.py +288 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +26 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/group_chat_target.py +132 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +413 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1694 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
- autogen/agentchat/realtime/experimental/function_observer.py +85 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +111 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +73 -0
- autogen/agents/contrib/time/time_tool_agent.py +51 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
- autogen/agents/experimental/document_agent/document_agent.py +461 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +380 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
- autogen/agents/experimental/document_agent/parser_utils.py +130 -0
- autogen/agents/experimental/document_agent/url_utils.py +426 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +77 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +62 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +75 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +102 -0
- autogen/cache/in_memory_cache.py +58 -0
- autogen/cache/redis_cache.py +123 -0
- autogen/code_utils.py +596 -0
- autogen/coding/__init__.py +22 -0
- autogen/coding/base.py +119 -0
- autogen/coding/docker_commandline_code_executor.py +268 -0
- autogen/coding/factory.py +47 -0
- autogen/coding/func_with_reqs.py +202 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +167 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +231 -0
- autogen/coding/jupyter/jupyter_code_executor.py +160 -0
- autogen/coding/jupyter/local_jupyter_server.py +172 -0
- autogen/coding/local_commandline_code_executor.py +405 -0
- autogen/coding/markdown_code_extractor.py +45 -0
- autogen/coding/utils.py +56 -0
- autogen/doc_utils.py +34 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1013 -0
- autogen/events/base_event.py +99 -0
- autogen/events/client_events.py +167 -0
- autogen/events/helpers.py +36 -0
- autogen/events/print_event.py +46 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +80 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +225 -0
- autogen/fast_depends/core/model.py +576 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +29 -0
- autogen/fast_depends/dependencies/provider.py +39 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +280 -0
- autogen/fast_depends/utils.py +187 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +178 -0
- autogen/import_utils.py +526 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
- autogen/interop/langchain/langchain_tool.py +82 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +179 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
- autogen/interop/registry.py +69 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +56 -0
- autogen/io/run_response.py +293 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +213 -0
- autogen/json_utils.py +43 -0
- autogen/llm_config.py +382 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +128 -0
- autogen/logger/file_logger.py +261 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +523 -0
- autogen/math_utils.py +339 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/mcp_client.py +208 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +63 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +581 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +158 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +114 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +400 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +948 -0
- autogen/messages/base_message.py +107 -0
- autogen/messages/client_messages.py +171 -0
- autogen/messages/print_message.py +49 -0
- autogen/oai/__init__.py +53 -0
- autogen/oai/anthropic.py +714 -0
- autogen/oai/bedrock.py +628 -0
- autogen/oai/cerebras.py +299 -0
- autogen/oai/client.py +1444 -0
- autogen/oai/client_utils.py +169 -0
- autogen/oai/cohere.py +479 -0
- autogen/oai/gemini.py +998 -0
- autogen/oai/gemini_types.py +155 -0
- autogen/oai/groq.py +305 -0
- autogen/oai/mistral.py +303 -0
- autogen/oai/oai_models/__init__.py +11 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +87 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +86 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
- autogen/oai/oai_models/completion_usage.py +60 -0
- autogen/oai/ollama.py +643 -0
- autogen/oai/openai_utils.py +881 -0
- autogen/oai/together.py +370 -0
- autogen/retrieve_utils.py +491 -0
- autogen/runtime_logging.py +160 -0
- autogen/token_count_utils.py +267 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +41 -0
- autogen/tools/dependency_injection.py +254 -0
- autogen/tools/experimental/__init__.py +48 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +161 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +328 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1316 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +183 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
- autogen/tools/function_utils.py +411 -0
- autogen/tools/tool.py +187 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +69 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
- ag2-0.9.1a1.dist-info/RECORD +0 -6
- ag2-0.9.1a1.dist-info/top_level.txt +0 -1
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/LICENSE +0 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/NOTICE.md +0 -0
|
@@ -0,0 +1,566 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
import copy
|
|
8
|
+
import sys
|
|
9
|
+
from typing import Any, Optional, Protocol, Union
|
|
10
|
+
|
|
11
|
+
import tiktoken
|
|
12
|
+
from termcolor import colored
|
|
13
|
+
|
|
14
|
+
from .... import token_count_utils
|
|
15
|
+
from ....cache import AbstractCache, Cache
|
|
16
|
+
from ....types import MessageContentType
|
|
17
|
+
from . import transforms_util
|
|
18
|
+
from .text_compressors import LLMLingua, TextCompressor
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class MessageTransform(Protocol):
|
|
22
|
+
"""Defines a contract for message transformation.
|
|
23
|
+
|
|
24
|
+
Classes implementing this protocol should provide an `apply_transform` method
|
|
25
|
+
that takes a list of messages and returns the transformed list.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def apply_transform(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
29
|
+
"""Applies a transformation to a list of messages.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
messages: A list of dictionaries representing messages.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
A new list of dictionaries containing the transformed messages.
|
|
36
|
+
"""
|
|
37
|
+
...
|
|
38
|
+
|
|
39
|
+
def get_logs(
|
|
40
|
+
self, pre_transform_messages: list[dict[str, Any]], post_transform_messages: list[dict[str, Any]]
|
|
41
|
+
) -> tuple[str, bool]:
|
|
42
|
+
"""Creates the string including the logs of the transformation
|
|
43
|
+
|
|
44
|
+
Alongside the string, it returns a boolean indicating whether the transformation had an effect or not.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
pre_transform_messages: A list of dictionaries representing messages before the transformation.
|
|
48
|
+
post_transform_messages: A list of dictionaries representig messages after the transformation.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
A tuple with a string with the logs and a flag indicating whether the transformation had an effect or not.
|
|
52
|
+
"""
|
|
53
|
+
...
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class MessageHistoryLimiter:
|
|
57
|
+
"""Limits the number of messages considered by an agent for response generation.
|
|
58
|
+
|
|
59
|
+
This transform keeps only the most recent messages up to the specified maximum number of messages (max_messages).
|
|
60
|
+
It trims the conversation history by removing older messages, retaining only the most recent messages.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(self, max_messages: Optional[int] = None, keep_first_message: bool = False):
|
|
64
|
+
"""Args:
|
|
65
|
+
max_messages Optional[int]: Maximum number of messages to keep in the context. Must be greater than 0 if not None.
|
|
66
|
+
keep_first_message bool: Whether to keep the original first message in the conversation history.
|
|
67
|
+
Defaults to False.
|
|
68
|
+
"""
|
|
69
|
+
self._validate_max_messages(max_messages)
|
|
70
|
+
self._max_messages = max_messages
|
|
71
|
+
self._keep_first_message = keep_first_message
|
|
72
|
+
|
|
73
|
+
def apply_transform(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
74
|
+
"""Truncates the conversation history to the specified maximum number of messages.
|
|
75
|
+
|
|
76
|
+
This method returns a new list containing the most recent messages up to the specified
|
|
77
|
+
maximum number of messages (max_messages). If max_messages is None, it returns the
|
|
78
|
+
original list of messages unmodified.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
messages (List[Dict]): The list of messages representing the conversation history.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
List[Dict]: A new list containing the most recent messages up to the specified maximum.
|
|
85
|
+
"""
|
|
86
|
+
if self._max_messages is None or len(messages) <= self._max_messages:
|
|
87
|
+
return messages
|
|
88
|
+
|
|
89
|
+
truncated_messages = []
|
|
90
|
+
remaining_count = self._max_messages
|
|
91
|
+
|
|
92
|
+
# Start with the first message if we need to keep it
|
|
93
|
+
if self._keep_first_message:
|
|
94
|
+
truncated_messages = [messages[0]]
|
|
95
|
+
remaining_count -= 1
|
|
96
|
+
|
|
97
|
+
# Loop through messages in reverse
|
|
98
|
+
for i in range(len(messages) - 1, 0, -1):
|
|
99
|
+
if remaining_count > 1:
|
|
100
|
+
truncated_messages.insert(1 if self._keep_first_message else 0, messages[i])
|
|
101
|
+
if remaining_count == 1: # noqa: SIM102
|
|
102
|
+
# If there's only 1 slot left and it's a 'tools' message, ignore it.
|
|
103
|
+
if messages[i].get("role") != "tool":
|
|
104
|
+
truncated_messages.insert(1, messages[i])
|
|
105
|
+
|
|
106
|
+
remaining_count -= 1
|
|
107
|
+
if remaining_count == 0:
|
|
108
|
+
break
|
|
109
|
+
|
|
110
|
+
return truncated_messages
|
|
111
|
+
|
|
112
|
+
def get_logs(
|
|
113
|
+
self, pre_transform_messages: list[dict[str, Any]], post_transform_messages: list[dict[str, Any]]
|
|
114
|
+
) -> tuple[str, bool]:
|
|
115
|
+
pre_transform_messages_len = len(pre_transform_messages)
|
|
116
|
+
post_transform_messages_len = len(post_transform_messages)
|
|
117
|
+
|
|
118
|
+
if post_transform_messages_len < pre_transform_messages_len:
|
|
119
|
+
logs_str = (
|
|
120
|
+
f"Removed {pre_transform_messages_len - post_transform_messages_len} messages. "
|
|
121
|
+
f"Number of messages reduced from {pre_transform_messages_len} to {post_transform_messages_len}."
|
|
122
|
+
)
|
|
123
|
+
return logs_str, True
|
|
124
|
+
return "No messages were removed.", False
|
|
125
|
+
|
|
126
|
+
def _validate_max_messages(self, max_messages: Optional[int]):
|
|
127
|
+
if max_messages is not None and max_messages < 1:
|
|
128
|
+
raise ValueError("max_messages must be None or greater than 1")
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class MessageTokenLimiter:
|
|
132
|
+
"""Truncates messages to meet token limits for efficient processing and response generation.
|
|
133
|
+
|
|
134
|
+
This transformation applies two levels of truncation to the conversation history:
|
|
135
|
+
|
|
136
|
+
1. Truncates each individual message to the maximum number of tokens specified by max_tokens_per_message.
|
|
137
|
+
2. Truncates the overall conversation history to the maximum number of tokens specified by max_tokens.
|
|
138
|
+
|
|
139
|
+
NOTE: Tokens are counted using the encoder for the specified model. Different models may yield different token
|
|
140
|
+
counts for the same text.
|
|
141
|
+
|
|
142
|
+
NOTE: For multimodal LLMs, the token count may be inaccurate as it does not account for the non-text input
|
|
143
|
+
(e.g images).
|
|
144
|
+
|
|
145
|
+
The truncation process follows these steps in order:
|
|
146
|
+
|
|
147
|
+
1. The minimum tokens threshold (`min_tokens`) is checked (0 by default). If the total number of tokens in messages
|
|
148
|
+
is less than this threshold, then the messages are returned as is. In other case, the following process is applied.
|
|
149
|
+
2. Messages are processed in reverse order (newest to oldest).
|
|
150
|
+
3. Individual messages are truncated based on max_tokens_per_message. For multimodal messages containing both text
|
|
151
|
+
and other types of content, only the text content is truncated.
|
|
152
|
+
4. The overall conversation history is truncated based on the max_tokens limit. Once the accumulated token count
|
|
153
|
+
exceeds this limit, the current message being processed get truncated to meet the total token count and any
|
|
154
|
+
remaining messages get discarded.
|
|
155
|
+
5. The truncated conversation history is reconstructed by prepending the messages to a new list to preserve the
|
|
156
|
+
original message order.
|
|
157
|
+
"""
|
|
158
|
+
|
|
159
|
+
def __init__(
|
|
160
|
+
self,
|
|
161
|
+
max_tokens_per_message: Optional[int] = None,
|
|
162
|
+
max_tokens: Optional[int] = None,
|
|
163
|
+
min_tokens: Optional[int] = None,
|
|
164
|
+
model: str = "gpt-3.5-turbo-0613",
|
|
165
|
+
filter_dict: Optional[dict[str, Any]] = None,
|
|
166
|
+
exclude_filter: bool = True,
|
|
167
|
+
):
|
|
168
|
+
"""Args:
|
|
169
|
+
max_tokens_per_message (None or int): Maximum number of tokens to keep in each message.
|
|
170
|
+
Must be greater than or equal to 0 if not None.
|
|
171
|
+
max_tokens (Optional[int]): Maximum number of tokens to keep in the chat history.
|
|
172
|
+
Must be greater than or equal to 0 if not None.
|
|
173
|
+
min_tokens (Optional[int]): Minimum number of tokens in messages to apply the transformation.
|
|
174
|
+
Must be greater than or equal to 0 if not None.
|
|
175
|
+
model (str): The target OpenAI model for tokenization alignment.
|
|
176
|
+
filter_dict (None or dict): A dictionary to filter out messages that you want/don't want to compress.
|
|
177
|
+
If None, no filters will be applied.
|
|
178
|
+
exclude_filter (bool): If exclude filter is True (the default value), messages that match the filter will be
|
|
179
|
+
excluded from token truncation. If False, messages that match the filter will be truncated.
|
|
180
|
+
"""
|
|
181
|
+
self._model = model
|
|
182
|
+
self._max_tokens_per_message = self._validate_max_tokens(max_tokens_per_message)
|
|
183
|
+
self._max_tokens = self._validate_max_tokens(max_tokens)
|
|
184
|
+
self._min_tokens = self._validate_min_tokens(min_tokens, max_tokens)
|
|
185
|
+
self._filter_dict = filter_dict
|
|
186
|
+
self._exclude_filter = exclude_filter
|
|
187
|
+
|
|
188
|
+
def apply_transform(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
189
|
+
"""Applies token truncation to the conversation history.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
messages (List[Dict]): The list of messages representing the conversation history.
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
List[Dict]: A new list containing the truncated messages up to the specified token limits.
|
|
196
|
+
"""
|
|
197
|
+
assert self._max_tokens_per_message is not None
|
|
198
|
+
assert self._max_tokens is not None
|
|
199
|
+
assert self._min_tokens is not None
|
|
200
|
+
|
|
201
|
+
# if the total number of tokens in the messages is less than the min_tokens, return the messages as is
|
|
202
|
+
if not transforms_util.min_tokens_reached(messages, self._min_tokens):
|
|
203
|
+
return messages
|
|
204
|
+
|
|
205
|
+
temp_messages = copy.deepcopy(messages)
|
|
206
|
+
processed_messages = []
|
|
207
|
+
processed_messages_tokens = 0
|
|
208
|
+
|
|
209
|
+
for msg in reversed(temp_messages):
|
|
210
|
+
# Some messages may not have content.
|
|
211
|
+
if not transforms_util.is_content_right_type(msg.get("content")):
|
|
212
|
+
processed_messages.insert(0, msg)
|
|
213
|
+
continue
|
|
214
|
+
|
|
215
|
+
if not transforms_util.should_transform_message(msg, self._filter_dict, self._exclude_filter):
|
|
216
|
+
processed_messages.insert(0, msg)
|
|
217
|
+
processed_messages_tokens += transforms_util.count_text_tokens(msg["content"])
|
|
218
|
+
continue
|
|
219
|
+
|
|
220
|
+
expected_tokens_remained = self._max_tokens - processed_messages_tokens - self._max_tokens_per_message
|
|
221
|
+
|
|
222
|
+
# If adding this message would exceed the token limit, truncate the last message to meet the total token
|
|
223
|
+
# limit and discard all remaining messages
|
|
224
|
+
if expected_tokens_remained < 0:
|
|
225
|
+
msg["content"] = self._truncate_str_to_tokens(
|
|
226
|
+
msg["content"], self._max_tokens - processed_messages_tokens
|
|
227
|
+
)
|
|
228
|
+
processed_messages.insert(0, msg)
|
|
229
|
+
break
|
|
230
|
+
|
|
231
|
+
msg["content"] = self._truncate_str_to_tokens(msg["content"], self._max_tokens_per_message)
|
|
232
|
+
msg_tokens = transforms_util.count_text_tokens(msg["content"])
|
|
233
|
+
|
|
234
|
+
# prepend the message to the list to preserve order
|
|
235
|
+
processed_messages_tokens += msg_tokens
|
|
236
|
+
processed_messages.insert(0, msg)
|
|
237
|
+
|
|
238
|
+
return processed_messages
|
|
239
|
+
|
|
240
|
+
def get_logs(
|
|
241
|
+
self, pre_transform_messages: list[dict[str, Any]], post_transform_messages: list[dict[str, Any]]
|
|
242
|
+
) -> tuple[str, bool]:
|
|
243
|
+
pre_transform_messages_tokens = sum(
|
|
244
|
+
transforms_util.count_text_tokens(msg["content"]) for msg in pre_transform_messages if "content" in msg
|
|
245
|
+
)
|
|
246
|
+
post_transform_messages_tokens = sum(
|
|
247
|
+
transforms_util.count_text_tokens(msg["content"]) for msg in post_transform_messages if "content" in msg
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
if post_transform_messages_tokens < pre_transform_messages_tokens:
|
|
251
|
+
logs_str = (
|
|
252
|
+
f"Truncated {pre_transform_messages_tokens - post_transform_messages_tokens} tokens. "
|
|
253
|
+
f"Number of tokens reduced from {pre_transform_messages_tokens} to {post_transform_messages_tokens}"
|
|
254
|
+
)
|
|
255
|
+
return logs_str, True
|
|
256
|
+
return "No tokens were truncated.", False
|
|
257
|
+
|
|
258
|
+
def _truncate_str_to_tokens(self, contents: Union[str, list], n_tokens: int) -> Union[str, list]:
|
|
259
|
+
if isinstance(contents, str):
|
|
260
|
+
return self._truncate_tokens(contents, n_tokens)
|
|
261
|
+
elif isinstance(contents, list):
|
|
262
|
+
return self._truncate_multimodal_text(contents, n_tokens)
|
|
263
|
+
else:
|
|
264
|
+
raise ValueError(f"Contents must be a string or a list of dictionaries. Received type: {type(contents)}")
|
|
265
|
+
|
|
266
|
+
def _truncate_multimodal_text(self, contents: list[dict[str, Any]], n_tokens: int) -> list[dict[str, Any]]:
|
|
267
|
+
"""Truncates text content within a list of multimodal elements, preserving the overall structure."""
|
|
268
|
+
tmp_contents = []
|
|
269
|
+
for content in contents:
|
|
270
|
+
if content["type"] == "text":
|
|
271
|
+
truncated_text = self._truncate_tokens(content["text"], n_tokens)
|
|
272
|
+
tmp_contents.append({"type": "text", "text": truncated_text})
|
|
273
|
+
else:
|
|
274
|
+
tmp_contents.append(content)
|
|
275
|
+
return tmp_contents
|
|
276
|
+
|
|
277
|
+
def _truncate_tokens(self, text: str, n_tokens: int) -> str:
|
|
278
|
+
encoding = tiktoken.encoding_for_model(self._model) # Get the appropriate tokenizer
|
|
279
|
+
|
|
280
|
+
encoded_tokens = encoding.encode(text)
|
|
281
|
+
truncated_tokens = encoded_tokens[:n_tokens]
|
|
282
|
+
truncated_text = encoding.decode(truncated_tokens) # Decode back to text
|
|
283
|
+
|
|
284
|
+
return truncated_text
|
|
285
|
+
|
|
286
|
+
def _validate_max_tokens(self, max_tokens: Optional[int] = None) -> Optional[int]:
|
|
287
|
+
if max_tokens is not None and max_tokens < 0:
|
|
288
|
+
raise ValueError("max_tokens and max_tokens_per_message must be None or greater than or equal to 0")
|
|
289
|
+
|
|
290
|
+
try:
|
|
291
|
+
allowed_tokens = token_count_utils.get_max_token_limit(self._model)
|
|
292
|
+
except Exception:
|
|
293
|
+
print(colored(f"Model {self._model} not found in token_count_utils.", "yellow"))
|
|
294
|
+
allowed_tokens = None
|
|
295
|
+
|
|
296
|
+
if max_tokens is not None and allowed_tokens is not None and max_tokens > allowed_tokens:
|
|
297
|
+
print(
|
|
298
|
+
colored(
|
|
299
|
+
f"Max token was set to {max_tokens}, but {self._model} can only accept {allowed_tokens} tokens. Capping it to {allowed_tokens}.",
|
|
300
|
+
"yellow",
|
|
301
|
+
)
|
|
302
|
+
)
|
|
303
|
+
return allowed_tokens
|
|
304
|
+
|
|
305
|
+
return max_tokens if max_tokens is not None else sys.maxsize
|
|
306
|
+
|
|
307
|
+
def _validate_min_tokens(self, min_tokens: Optional[int], max_tokens: Optional[int]) -> int:
|
|
308
|
+
if min_tokens is None:
|
|
309
|
+
return 0
|
|
310
|
+
if min_tokens < 0:
|
|
311
|
+
raise ValueError("min_tokens must be None or greater than or equal to 0.")
|
|
312
|
+
if max_tokens is not None and min_tokens > max_tokens:
|
|
313
|
+
raise ValueError("min_tokens must not be more than max_tokens.")
|
|
314
|
+
return min_tokens
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
class TextMessageCompressor:
|
|
318
|
+
"""A transform for compressing text messages in a conversation history.
|
|
319
|
+
|
|
320
|
+
It uses a specified text compression method to reduce the token count of messages, which can lead to more efficient
|
|
321
|
+
processing and response generation by downstream models.
|
|
322
|
+
"""
|
|
323
|
+
|
|
324
|
+
def __init__(
|
|
325
|
+
self,
|
|
326
|
+
text_compressor: Optional[TextCompressor] = None,
|
|
327
|
+
min_tokens: Optional[int] = None,
|
|
328
|
+
compression_params: dict = dict(),
|
|
329
|
+
cache: Optional[AbstractCache] = None,
|
|
330
|
+
filter_dict: Optional[dict[str, Any]] = None,
|
|
331
|
+
exclude_filter: bool = True,
|
|
332
|
+
):
|
|
333
|
+
"""Args:
|
|
334
|
+
text_compressor (TextCompressor or None): An instance of a class that implements the TextCompressor
|
|
335
|
+
protocol. If None, it defaults to LLMLingua.
|
|
336
|
+
min_tokens (int or None): Minimum number of tokens in messages to apply the transformation. Must be greater
|
|
337
|
+
than or equal to 0 if not None. If None, no threshold-based compression is applied.
|
|
338
|
+
compression_args (dict): A dictionary of arguments for the compression method. Defaults to an empty
|
|
339
|
+
dictionary.
|
|
340
|
+
cache (None or AbstractCache): The cache client to use to store and retrieve previously compressed messages.
|
|
341
|
+
If None, no caching will be used.
|
|
342
|
+
filter_dict (None or dict): A dictionary to filter out messages that you want/don't want to compress.
|
|
343
|
+
If None, no filters will be applied.
|
|
344
|
+
exclude_filter (bool): If exclude filter is True (the default value), messages that match the filter will be
|
|
345
|
+
excluded from compression. If False, messages that match the filter will be compressed.
|
|
346
|
+
"""
|
|
347
|
+
if text_compressor is None:
|
|
348
|
+
text_compressor = LLMLingua()
|
|
349
|
+
|
|
350
|
+
self._validate_min_tokens(min_tokens)
|
|
351
|
+
|
|
352
|
+
self._text_compressor = text_compressor
|
|
353
|
+
self._min_tokens = min_tokens
|
|
354
|
+
self._compression_args = compression_params
|
|
355
|
+
self._filter_dict = filter_dict
|
|
356
|
+
self._exclude_filter = exclude_filter
|
|
357
|
+
|
|
358
|
+
if cache is None:
|
|
359
|
+
self._cache = Cache.disk()
|
|
360
|
+
else:
|
|
361
|
+
self._cache = cache
|
|
362
|
+
|
|
363
|
+
# Optimizing savings calculations to optimize log generation
|
|
364
|
+
self._recent_tokens_savings = 0
|
|
365
|
+
|
|
366
|
+
def apply_transform(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
367
|
+
"""Applies compression to messages in a conversation history based on the specified configuration.
|
|
368
|
+
|
|
369
|
+
The function processes each message according to the `compression_args` and `min_tokens` settings, applying
|
|
370
|
+
the specified compression configuration and returning a new list of messages with reduced token counts
|
|
371
|
+
where possible.
|
|
372
|
+
|
|
373
|
+
Args:
|
|
374
|
+
messages (List[Dict]): A list of message dictionaries to be compressed.
|
|
375
|
+
|
|
376
|
+
Returns:
|
|
377
|
+
List[Dict]: A list of dictionaries with the message content compressed according to the configured
|
|
378
|
+
method and scope.
|
|
379
|
+
"""
|
|
380
|
+
# Make sure there is at least one message
|
|
381
|
+
if not messages:
|
|
382
|
+
return messages
|
|
383
|
+
|
|
384
|
+
# if the total number of tokens in the messages is less than the min_tokens, return the messages as is
|
|
385
|
+
if not transforms_util.min_tokens_reached(messages, self._min_tokens):
|
|
386
|
+
return messages
|
|
387
|
+
|
|
388
|
+
total_savings = 0
|
|
389
|
+
processed_messages = messages.copy()
|
|
390
|
+
for message in processed_messages:
|
|
391
|
+
# Some messages may not have content.
|
|
392
|
+
if not transforms_util.is_content_right_type(message.get("content")):
|
|
393
|
+
continue
|
|
394
|
+
|
|
395
|
+
if not transforms_util.should_transform_message(message, self._filter_dict, self._exclude_filter):
|
|
396
|
+
continue
|
|
397
|
+
|
|
398
|
+
if transforms_util.is_content_text_empty(message["content"]):
|
|
399
|
+
continue
|
|
400
|
+
|
|
401
|
+
cache_key = transforms_util.cache_key(message["content"], self._min_tokens)
|
|
402
|
+
cached_content = transforms_util.cache_content_get(self._cache, cache_key)
|
|
403
|
+
if cached_content is not None:
|
|
404
|
+
message["content"], savings = cached_content
|
|
405
|
+
else:
|
|
406
|
+
message["content"], savings = self._compress(message["content"])
|
|
407
|
+
|
|
408
|
+
transforms_util.cache_content_set(self._cache, cache_key, message["content"], savings)
|
|
409
|
+
|
|
410
|
+
assert isinstance(savings, int)
|
|
411
|
+
total_savings += savings
|
|
412
|
+
|
|
413
|
+
self._recent_tokens_savings = total_savings
|
|
414
|
+
return processed_messages
|
|
415
|
+
|
|
416
|
+
def get_logs(
|
|
417
|
+
self, pre_transform_messages: list[dict[str, Any]], post_transform_messages: list[dict[str, Any]]
|
|
418
|
+
) -> tuple[str, bool]:
|
|
419
|
+
if self._recent_tokens_savings > 0:
|
|
420
|
+
return f"{self._recent_tokens_savings} tokens saved with text compression.", True
|
|
421
|
+
else:
|
|
422
|
+
return "No tokens saved with text compression.", False
|
|
423
|
+
|
|
424
|
+
def _compress(self, content: MessageContentType) -> tuple[MessageContentType, int]:
|
|
425
|
+
"""Compresses the given text or multimodal content using the specified compression method."""
|
|
426
|
+
if isinstance(content, str):
|
|
427
|
+
return self._compress_text(content)
|
|
428
|
+
elif isinstance(content, list):
|
|
429
|
+
return self._compress_multimodal(content)
|
|
430
|
+
else:
|
|
431
|
+
return content, 0
|
|
432
|
+
|
|
433
|
+
def _compress_multimodal(self, content: MessageContentType) -> tuple[MessageContentType, int]:
|
|
434
|
+
tokens_saved = 0
|
|
435
|
+
for item in content:
|
|
436
|
+
if isinstance(item, dict) and "text" in item:
|
|
437
|
+
item["text"], savings = self._compress_text(item["text"])
|
|
438
|
+
tokens_saved += savings
|
|
439
|
+
|
|
440
|
+
elif isinstance(item, str):
|
|
441
|
+
item, savings = self._compress_text(item)
|
|
442
|
+
tokens_saved += savings
|
|
443
|
+
|
|
444
|
+
return content, tokens_saved
|
|
445
|
+
|
|
446
|
+
def _compress_text(self, text: str) -> tuple[str, int]:
|
|
447
|
+
"""Compresses the given text using the specified compression method."""
|
|
448
|
+
compressed_text = self._text_compressor.compress_text(text, **self._compression_args)
|
|
449
|
+
|
|
450
|
+
savings = 0
|
|
451
|
+
if "origin_tokens" in compressed_text and "compressed_tokens" in compressed_text:
|
|
452
|
+
savings = compressed_text["origin_tokens"] - compressed_text["compressed_tokens"]
|
|
453
|
+
|
|
454
|
+
return compressed_text["compressed_prompt"], savings
|
|
455
|
+
|
|
456
|
+
def _validate_min_tokens(self, min_tokens: Optional[int]):
|
|
457
|
+
if min_tokens is not None and min_tokens <= 0:
|
|
458
|
+
raise ValueError("min_tokens must be greater than 0 or None")
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
class TextMessageContentName:
|
|
462
|
+
"""A transform for including the agent's name in the content of a message.
|
|
463
|
+
|
|
464
|
+
How to create and apply the transform:
|
|
465
|
+
# Imports
|
|
466
|
+
from autogen.agentchat.contrib.capabilities import transform_messages, transforms
|
|
467
|
+
|
|
468
|
+
# Create Transform
|
|
469
|
+
name_transform = transforms.TextMessageContentName(position="start", format_string="'{name}' said:\n")
|
|
470
|
+
|
|
471
|
+
# Create the TransformMessages
|
|
472
|
+
context_handling = transform_messages.TransformMessages(
|
|
473
|
+
transforms=[
|
|
474
|
+
name_transform
|
|
475
|
+
]
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
# Add it to an agent so when they run inference it will apply to the messages
|
|
479
|
+
context_handling.add_to_agent(my_agent)
|
|
480
|
+
"""
|
|
481
|
+
|
|
482
|
+
def __init__(
|
|
483
|
+
self,
|
|
484
|
+
position: str = "start",
|
|
485
|
+
format_string: str = "{name}:\n",
|
|
486
|
+
deduplicate: bool = True,
|
|
487
|
+
filter_dict: Optional[dict[str, Any]] = None,
|
|
488
|
+
exclude_filter: bool = True,
|
|
489
|
+
):
|
|
490
|
+
"""Args:
|
|
491
|
+
position (str): The position to add the name to the content. The possible options are 'start' or 'end'. Defaults to 'start'.
|
|
492
|
+
format_string (str): The f-string to format the message name with. Use '{name}' as a placeholder for the agent's name. Defaults to '{name}:\n' and must contain '{name}'.
|
|
493
|
+
deduplicate (bool): Whether to deduplicate the formatted string so it doesn't appear twice (sometimes the LLM will add it to new messages itself). Defaults to True.
|
|
494
|
+
filter_dict (None or dict): A dictionary to filter out messages that you want/don't want to compress.
|
|
495
|
+
If None, no filters will be applied.
|
|
496
|
+
exclude_filter (bool): If exclude filter is True (the default value), messages that match the filter will be
|
|
497
|
+
excluded from compression. If False, messages that match the filter will be compressed.
|
|
498
|
+
"""
|
|
499
|
+
assert isinstance(position, str) and position in ["start", "end"]
|
|
500
|
+
assert isinstance(format_string, str) and "{name}" in format_string
|
|
501
|
+
assert isinstance(deduplicate, bool) and deduplicate is not None
|
|
502
|
+
|
|
503
|
+
self._position = position
|
|
504
|
+
self._format_string = format_string
|
|
505
|
+
self._deduplicate = deduplicate
|
|
506
|
+
self._filter_dict = filter_dict
|
|
507
|
+
self._exclude_filter = exclude_filter
|
|
508
|
+
|
|
509
|
+
# Track the number of messages changed for logging
|
|
510
|
+
self._messages_changed = 0
|
|
511
|
+
|
|
512
|
+
def apply_transform(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
513
|
+
"""Applies the name change to the message based on the position and format string.
|
|
514
|
+
|
|
515
|
+
Args:
|
|
516
|
+
messages (List[Dict]): A list of message dictionaries.
|
|
517
|
+
|
|
518
|
+
Returns:
|
|
519
|
+
List[Dict]: A list of dictionaries with the message content updated with names.
|
|
520
|
+
"""
|
|
521
|
+
# Make sure there is at least one message
|
|
522
|
+
if not messages:
|
|
523
|
+
return messages
|
|
524
|
+
|
|
525
|
+
messages_changed = 0
|
|
526
|
+
processed_messages = copy.deepcopy(messages)
|
|
527
|
+
for message in processed_messages:
|
|
528
|
+
# Some messages may not have content.
|
|
529
|
+
if not transforms_util.is_content_right_type(
|
|
530
|
+
message.get("content")
|
|
531
|
+
) or not transforms_util.is_content_right_type(message.get("name")):
|
|
532
|
+
continue
|
|
533
|
+
|
|
534
|
+
if not transforms_util.should_transform_message(message, self._filter_dict, self._exclude_filter):
|
|
535
|
+
continue
|
|
536
|
+
|
|
537
|
+
if transforms_util.is_content_text_empty(message["content"]) or transforms_util.is_content_text_empty(
|
|
538
|
+
message["name"]
|
|
539
|
+
):
|
|
540
|
+
continue
|
|
541
|
+
|
|
542
|
+
# Get and format the name in the content
|
|
543
|
+
content = message["content"]
|
|
544
|
+
formatted_name = self._format_string.format(name=message["name"])
|
|
545
|
+
|
|
546
|
+
if self._position == "start":
|
|
547
|
+
if not self._deduplicate or not content.startswith(formatted_name):
|
|
548
|
+
message["content"] = f"{formatted_name}{content}"
|
|
549
|
+
|
|
550
|
+
messages_changed += 1
|
|
551
|
+
else:
|
|
552
|
+
if not self._deduplicate or not content.endswith(formatted_name):
|
|
553
|
+
message["content"] = f"{content}{formatted_name}"
|
|
554
|
+
|
|
555
|
+
messages_changed += 1
|
|
556
|
+
|
|
557
|
+
self._messages_changed = messages_changed
|
|
558
|
+
return processed_messages
|
|
559
|
+
|
|
560
|
+
def get_logs(
|
|
561
|
+
self, pre_transform_messages: list[dict[str, Any]], post_transform_messages: list[dict[str, Any]]
|
|
562
|
+
) -> tuple[str, bool]:
|
|
563
|
+
if self._messages_changed > 0:
|
|
564
|
+
return f"{self._messages_changed} message(s) changed to incorporate name.", True
|
|
565
|
+
else:
|
|
566
|
+
return "No messages changed to incorporate name.", False
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from collections.abc import Hashable
|
|
8
|
+
from typing import Any, Optional
|
|
9
|
+
|
|
10
|
+
from .... import token_count_utils
|
|
11
|
+
from ....cache.abstract_cache_base import AbstractCache
|
|
12
|
+
from ....oai.openai_utils import filter_config
|
|
13
|
+
from ....types import MessageContentType
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def cache_key(content: MessageContentType, *args: Hashable) -> str:
|
|
17
|
+
"""Calculates the cache key for the given message content and any other hashable args.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
content (MessageContentType): The message content to calculate the cache key for.
|
|
21
|
+
*args: Any additional hashable args to include in the cache key.
|
|
22
|
+
"""
|
|
23
|
+
str_keys = [str(key) for key in (content, *args)]
|
|
24
|
+
return "".join(str_keys)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def cache_content_get(cache: Optional[AbstractCache], key: str) -> Optional[tuple[MessageContentType, ...]]:
|
|
28
|
+
"""Retrieves cached content from the cache.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
cache (None or AbstractCache): The cache to retrieve the content from. If None, the cache is ignored.
|
|
32
|
+
key (str): The key to retrieve the content from.
|
|
33
|
+
"""
|
|
34
|
+
if cache:
|
|
35
|
+
cached_value = cache.get(key)
|
|
36
|
+
if cached_value:
|
|
37
|
+
return cached_value
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def cache_content_set(cache: Optional[AbstractCache], key: str, content: MessageContentType, *extra_values):
|
|
41
|
+
"""Sets content into the cache.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
cache (None or AbstractCache): The cache to set the content into. If None, the cache is ignored.
|
|
45
|
+
key (str): The key to set the content into.
|
|
46
|
+
content (MessageContentType): The message content to set into the cache.
|
|
47
|
+
*extra_values: Additional values to be passed to the cache.
|
|
48
|
+
"""
|
|
49
|
+
if cache:
|
|
50
|
+
cache_value = (content, *extra_values)
|
|
51
|
+
cache.set(key, cache_value)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def min_tokens_reached(messages: list[dict[str, Any]], min_tokens: Optional[int]) -> bool:
|
|
55
|
+
"""Returns True if the total number of tokens in the messages is greater than or equal to the specified value.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
messages (List[Dict]): A list of messages to check.
|
|
59
|
+
min_tokens (None or int): The minimum number of tokens to check for.
|
|
60
|
+
"""
|
|
61
|
+
if not min_tokens:
|
|
62
|
+
return True
|
|
63
|
+
|
|
64
|
+
messages_tokens = sum(count_text_tokens(msg["content"]) for msg in messages if "content" in msg)
|
|
65
|
+
return messages_tokens >= min_tokens
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def count_text_tokens(content: MessageContentType) -> int:
|
|
69
|
+
"""Calculates the number of text tokens in the given message content.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
content (MessageContentType): The message content to calculate the number of text tokens for.
|
|
73
|
+
"""
|
|
74
|
+
token_count = 0
|
|
75
|
+
if isinstance(content, str):
|
|
76
|
+
token_count = token_count_utils.count_token(content)
|
|
77
|
+
elif isinstance(content, list):
|
|
78
|
+
for item in content:
|
|
79
|
+
if isinstance(item, str):
|
|
80
|
+
token_count += token_count_utils.count_token(item)
|
|
81
|
+
else:
|
|
82
|
+
token_count += count_text_tokens(item.get("text", ""))
|
|
83
|
+
return token_count
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def is_content_right_type(content: Any) -> bool:
|
|
87
|
+
"""A helper function to check if the passed in content is of the right type."""
|
|
88
|
+
return isinstance(content, (str, list))
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def is_content_text_empty(content: MessageContentType) -> bool:
|
|
92
|
+
"""Checks if the content of the message does not contain any text.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
content (MessageContentType): The message content to check.
|
|
96
|
+
"""
|
|
97
|
+
if isinstance(content, str):
|
|
98
|
+
return content == ""
|
|
99
|
+
elif isinstance(content, list):
|
|
100
|
+
texts = []
|
|
101
|
+
for item in content:
|
|
102
|
+
if isinstance(item, str):
|
|
103
|
+
texts.append(item)
|
|
104
|
+
elif isinstance(item, dict):
|
|
105
|
+
texts.append(item.get("text", ""))
|
|
106
|
+
return not any(texts)
|
|
107
|
+
else:
|
|
108
|
+
return True
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def should_transform_message(message: dict[str, Any], filter_dict: Optional[dict[str, Any]], exclude: bool) -> bool:
|
|
112
|
+
"""Validates whether the transform should be applied according to the filter dictionary.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
message (Dict[str, Any]): The message to validate.
|
|
116
|
+
filter_dict (None or Dict[str, Any]): The filter dictionary to validate against. If None, the transform is always applied.
|
|
117
|
+
exclude (bool): Whether to exclude messages that match the filter dictionary.
|
|
118
|
+
"""
|
|
119
|
+
if not filter_dict:
|
|
120
|
+
return True
|
|
121
|
+
|
|
122
|
+
return len(filter_config([message], filter_dict, exclude)) > 0
|