ag2 0.9.1a1__py3-none-any.whl → 0.9.1.post0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info}/METADATA +264 -73
- ag2-0.9.1.post0.dist-info/RECORD +392 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info}/WHEEL +1 -2
- autogen/__init__.py +89 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +427 -0
- autogen/_website/generate_mkdocs.py +1174 -0
- autogen/_website/notebook_processor.py +476 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +412 -0
- autogen/agentchat/__init__.py +44 -0
- autogen/agentchat/agent.py +182 -0
- autogen/agentchat/assistant_agent.py +85 -0
- autogen/agentchat/chat.py +309 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +429 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +566 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +187 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
- autogen/agentchat/contrib/rag/query_engine.py +74 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
- autogen/agentchat/contrib/swarm_agent.py +1425 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +232 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +303 -0
- autogen/agentchat/conversable_agent.py +4020 -0
- autogen/agentchat/group/__init__.py +64 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +41 -0
- autogen/agentchat/group/context_variables.py +192 -0
- autogen/agentchat/group/group_tool_executor.py +202 -0
- autogen/agentchat/group/group_utils.py +591 -0
- autogen/agentchat/group/handoffs.py +244 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +237 -0
- autogen/agentchat/group/on_condition.py +58 -0
- autogen/agentchat/group/on_context_condition.py +54 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +159 -0
- autogen/agentchat/group/patterns/manual.py +176 -0
- autogen/agentchat/group/patterns/pattern.py +288 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +26 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/group_chat_target.py +132 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +413 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1694 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
- autogen/agentchat/realtime/experimental/function_observer.py +85 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +111 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +73 -0
- autogen/agents/contrib/time/time_tool_agent.py +51 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
- autogen/agents/experimental/document_agent/document_agent.py +461 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +380 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
- autogen/agents/experimental/document_agent/parser_utils.py +130 -0
- autogen/agents/experimental/document_agent/url_utils.py +426 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +77 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +62 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +75 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +102 -0
- autogen/cache/in_memory_cache.py +58 -0
- autogen/cache/redis_cache.py +123 -0
- autogen/code_utils.py +596 -0
- autogen/coding/__init__.py +22 -0
- autogen/coding/base.py +119 -0
- autogen/coding/docker_commandline_code_executor.py +268 -0
- autogen/coding/factory.py +47 -0
- autogen/coding/func_with_reqs.py +202 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +167 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +231 -0
- autogen/coding/jupyter/jupyter_code_executor.py +160 -0
- autogen/coding/jupyter/local_jupyter_server.py +172 -0
- autogen/coding/local_commandline_code_executor.py +405 -0
- autogen/coding/markdown_code_extractor.py +45 -0
- autogen/coding/utils.py +56 -0
- autogen/doc_utils.py +34 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1010 -0
- autogen/events/base_event.py +99 -0
- autogen/events/client_events.py +167 -0
- autogen/events/helpers.py +36 -0
- autogen/events/print_event.py +46 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +80 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +225 -0
- autogen/fast_depends/core/model.py +576 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +29 -0
- autogen/fast_depends/dependencies/provider.py +39 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +280 -0
- autogen/fast_depends/utils.py +187 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +178 -0
- autogen/import_utils.py +526 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
- autogen/interop/langchain/langchain_tool.py +82 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +113 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
- autogen/interop/registry.py +69 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +56 -0
- autogen/io/run_response.py +293 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +213 -0
- autogen/json_utils.py +43 -0
- autogen/llm_config.py +379 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +128 -0
- autogen/logger/file_logger.py +261 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +523 -0
- autogen/math_utils.py +339 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/mcp_client.py +208 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +948 -0
- autogen/messages/base_message.py +107 -0
- autogen/messages/client_messages.py +171 -0
- autogen/messages/print_message.py +49 -0
- autogen/oai/__init__.py +53 -0
- autogen/oai/anthropic.py +714 -0
- autogen/oai/bedrock.py +628 -0
- autogen/oai/cerebras.py +299 -0
- autogen/oai/client.py +1435 -0
- autogen/oai/client_utils.py +169 -0
- autogen/oai/cohere.py +479 -0
- autogen/oai/gemini.py +990 -0
- autogen/oai/gemini_types.py +129 -0
- autogen/oai/groq.py +305 -0
- autogen/oai/mistral.py +303 -0
- autogen/oai/oai_models/__init__.py +11 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +87 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +86 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
- autogen/oai/oai_models/completion_usage.py +60 -0
- autogen/oai/ollama.py +643 -0
- autogen/oai/openai_utils.py +881 -0
- autogen/oai/together.py +370 -0
- autogen/retrieve_utils.py +491 -0
- autogen/runtime_logging.py +160 -0
- autogen/token_count_utils.py +267 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +41 -0
- autogen/tools/dependency_injection.py +254 -0
- autogen/tools/experimental/__init__.py +43 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +161 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +328 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +183 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
- autogen/tools/function_utils.py +411 -0
- autogen/tools/tool.py +187 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- ag2-0.9.1a1.dist-info/RECORD +0 -6
- ag2-0.9.1a1.dist-info/top_level.txt +0 -1
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/LICENSE +0 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/NOTICE.md +0 -0
|
@@ -0,0 +1,703 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
import hashlib
|
|
8
|
+
import os
|
|
9
|
+
import re
|
|
10
|
+
import uuid
|
|
11
|
+
from typing import Any, Callable, Literal, Optional, Union
|
|
12
|
+
|
|
13
|
+
from ...code_utils import extract_code
|
|
14
|
+
from ...formatting_utils import colored
|
|
15
|
+
from ...import_utils import optional_import_block, require_optional_import
|
|
16
|
+
from ...retrieve_utils import (
|
|
17
|
+
TEXT_FORMATS,
|
|
18
|
+
create_vector_db_from_dir,
|
|
19
|
+
get_files_from_dir,
|
|
20
|
+
query_vector_db,
|
|
21
|
+
split_files_to_chunks,
|
|
22
|
+
)
|
|
23
|
+
from ...token_count_utils import count_token
|
|
24
|
+
from .. import UserProxyAgent
|
|
25
|
+
from ..agent import Agent
|
|
26
|
+
from ..contrib.vectordb.base import Document, QueryResults, VectorDB, VectorDBFactory
|
|
27
|
+
from ..contrib.vectordb.utils import (
|
|
28
|
+
chroma_results_to_query_results,
|
|
29
|
+
filter_results_by_distance,
|
|
30
|
+
get_logger,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
__all__ = ["RetrieveUserProxyAgent"]
|
|
34
|
+
|
|
35
|
+
with optional_import_block():
|
|
36
|
+
import chromadb
|
|
37
|
+
from IPython import get_ipython
|
|
38
|
+
|
|
39
|
+
logger = get_logger(__name__)
|
|
40
|
+
|
|
41
|
+
PROMPT_DEFAULT = """You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the
|
|
42
|
+
context provided by the user. You should follow the following steps to answer a question:
|
|
43
|
+
Step 1, you estimate the user's intent based on the question and context. The intent can be a code generation task or
|
|
44
|
+
a question answering task.
|
|
45
|
+
Step 2, you reply based on the intent.
|
|
46
|
+
If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.
|
|
47
|
+
If user's intent is code generation, you must obey the following rules:
|
|
48
|
+
Rule 1. You MUST NOT install any packages because all the packages needed are already installed.
|
|
49
|
+
Rule 2. You must follow the formats below to write your code:
|
|
50
|
+
```language
|
|
51
|
+
# your code
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
If user's intent is question answering, you must give as short an answer as possible.
|
|
55
|
+
|
|
56
|
+
User's question is: {input_question}
|
|
57
|
+
|
|
58
|
+
Context is: {input_context}
|
|
59
|
+
|
|
60
|
+
The source of the context is: {input_sources}
|
|
61
|
+
|
|
62
|
+
If you can answer the question, in the end of your answer, add the source of the context in the format of `Sources: source1, source2, ...`.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
PROMPT_CODE = """You're a retrieve augmented coding assistant. You answer user's questions based on your own knowledge and the
|
|
66
|
+
context provided by the user.
|
|
67
|
+
If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.
|
|
68
|
+
For code generation, you must obey the following rules:
|
|
69
|
+
Rule 1. You MUST NOT install any packages because all the packages needed are already installed.
|
|
70
|
+
Rule 2. You must follow the formats below to write your code:
|
|
71
|
+
```language
|
|
72
|
+
# your code
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
User's question is: {input_question}
|
|
76
|
+
|
|
77
|
+
Context is: {input_context}
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
PROMPT_QA = """You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the
|
|
81
|
+
context provided by the user.
|
|
82
|
+
If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.
|
|
83
|
+
You must give as short an answer as possible.
|
|
84
|
+
|
|
85
|
+
User's question is: {input_question}
|
|
86
|
+
|
|
87
|
+
Context is: {input_context}
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
HASH_LENGTH = int(os.environ.get("HASH_LENGTH", 8))
|
|
91
|
+
UPDATE_CONTEXT_IN_PROMPT = "you should reply exactly `UPDATE CONTEXT`"
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
@require_optional_import(["chromadb", "IPython"], "retrievechat")
|
|
95
|
+
class RetrieveUserProxyAgent(UserProxyAgent):
|
|
96
|
+
"""(In preview) The Retrieval-Augmented User Proxy retrieves document chunks based on the embedding
|
|
97
|
+
similarity, and sends them along with the question to the Retrieval-Augmented Assistant
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
def __init__(
|
|
101
|
+
self,
|
|
102
|
+
name="RetrieveChatAgent", # default set to RetrieveChatAgent
|
|
103
|
+
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "ALWAYS",
|
|
104
|
+
is_termination_msg: Optional[Callable[[dict[str, Any]], bool]] = None,
|
|
105
|
+
retrieve_config: Optional[dict[str, Any]] = None, # config for the retrieve agent
|
|
106
|
+
**kwargs: Any,
|
|
107
|
+
):
|
|
108
|
+
r"""Args:
|
|
109
|
+
name (str): name of the agent.
|
|
110
|
+
|
|
111
|
+
human_input_mode (str): whether to ask for human inputs every time a message is received.
|
|
112
|
+
Possible values are "ALWAYS", "TERMINATE", "NEVER".
|
|
113
|
+
1. When "ALWAYS", the agent prompts for human input every time a message is received.
|
|
114
|
+
Under this mode, the conversation stops when the human input is "exit",
|
|
115
|
+
or when is_termination_msg is True and there is no human input.
|
|
116
|
+
2. When "TERMINATE", the agent only prompts for human input only when a termination
|
|
117
|
+
message is received or the number of auto reply reaches
|
|
118
|
+
the max_consecutive_auto_reply.
|
|
119
|
+
3. When "NEVER", the agent will never prompt for human input. Under this mode, the
|
|
120
|
+
conversation stops when the number of auto reply reaches the
|
|
121
|
+
max_consecutive_auto_reply or when is_termination_msg is True.
|
|
122
|
+
|
|
123
|
+
is_termination_msg (function): a function that takes a message in the form of a dictionary
|
|
124
|
+
and returns a boolean value indicating if this received message is a termination message.
|
|
125
|
+
The dict can contain the following keys: "content", "role", "name", "function_call".
|
|
126
|
+
|
|
127
|
+
retrieve_config (dict or None): config for the retrieve agent.
|
|
128
|
+
|
|
129
|
+
To use default config, set to None. Otherwise, set to a dictionary with the
|
|
130
|
+
following keys:
|
|
131
|
+
- `task` (Optional, str) - the task of the retrieve chat. Possible values are
|
|
132
|
+
"code", "qa" and "default". System prompt will be different for different tasks.
|
|
133
|
+
The default value is `default`, which supports both code and qa, and provides
|
|
134
|
+
source information in the end of the response.
|
|
135
|
+
- `vector_db` (Optional, Union[str, VectorDB]) - the vector db for the retrieve chat.
|
|
136
|
+
If it's a string, it should be the type of the vector db, such as "chroma"; otherwise,
|
|
137
|
+
it should be an instance of the VectorDB protocol. Default is "chroma".
|
|
138
|
+
Set `None` to use the deprecated `client`.
|
|
139
|
+
- `db_config` (Optional, Dict) - the config for the vector db. Default is `{}`. Please make
|
|
140
|
+
sure you understand the config for the vector db you are using, otherwise, leave it as `{}`.
|
|
141
|
+
Only valid when `vector_db` is a string.
|
|
142
|
+
- `client` (Optional, chromadb.Client) - the chromadb client. If key not provided, a
|
|
143
|
+
default client `chromadb.Client()` will be used. If you want to use other
|
|
144
|
+
vector db, extend this class and override the `retrieve_docs` function.
|
|
145
|
+
*[Deprecated]* use `vector_db` instead.
|
|
146
|
+
- `docs_path` (Optional, Union[str, List[str]]) - the path to the docs directory. It
|
|
147
|
+
can also be the path to a single file, the url to a single file or a list
|
|
148
|
+
of directories, files and urls. Default is None, which works only if the
|
|
149
|
+
collection is already created.
|
|
150
|
+
- `extra_docs` (Optional, bool) - when true, allows adding documents with unique IDs
|
|
151
|
+
without overwriting existing ones; when false, it replaces existing documents
|
|
152
|
+
using default IDs, risking collection overwrite., when set to true it enables
|
|
153
|
+
the system to assign unique IDs starting from "length+i" for new document
|
|
154
|
+
chunks, preventing the replacement of existing documents and facilitating the
|
|
155
|
+
addition of more content to the collection..
|
|
156
|
+
By default, "extra_docs" is set to false, starting document IDs from zero.
|
|
157
|
+
This poses a risk as new documents might overwrite existing ones, potentially
|
|
158
|
+
causing unintended loss or alteration of data in the collection.
|
|
159
|
+
*[Deprecated]* use `new_docs` when use `vector_db` instead of `client`.
|
|
160
|
+
- `new_docs` (Optional, bool) - when True, only adds new documents to the collection;
|
|
161
|
+
when False, updates existing documents and adds new ones. Default is True.
|
|
162
|
+
Document id is used to determine if a document is new or existing. By default, the
|
|
163
|
+
id is the hash value of the content.
|
|
164
|
+
- `model` (Optional, str) - the model to use for the retrieve chat.
|
|
165
|
+
If key not provided, a default model `gpt-4` will be used.
|
|
166
|
+
- `chunk_token_size` (Optional, int) - the chunk token size for the retrieve chat.
|
|
167
|
+
If key not provided, a default size `max_tokens * 0.4` will be used.
|
|
168
|
+
- `context_max_tokens` (Optional, int) - the context max token size for the
|
|
169
|
+
retrieve chat.
|
|
170
|
+
If key not provided, a default size `max_tokens * 0.8` will be used.
|
|
171
|
+
- `chunk_mode` (Optional, str) - the chunk mode for the retrieve chat. Possible values
|
|
172
|
+
are "multi_lines" and "one_line". If key not provided, a default mode
|
|
173
|
+
`multi_lines` will be used.
|
|
174
|
+
- `must_break_at_empty_line` (Optional, bool) - chunk will only break at empty line
|
|
175
|
+
if True. Default is True.
|
|
176
|
+
If chunk_mode is "one_line", this parameter will be ignored.
|
|
177
|
+
- `embedding_model` (Optional, str) - the embedding model to use for the retrieve chat.
|
|
178
|
+
If key not provided, a default model `all-MiniLM-L6-v2` will be used. All available
|
|
179
|
+
models can be found at `https://www.sbert.net/docs/sentence_transformer/pretrained_models.html`.
|
|
180
|
+
The default model is a fast model. If you want to use a high performance model,
|
|
181
|
+
`all-mpnet-base-v2` is recommended.
|
|
182
|
+
*[Deprecated]* no need when use `vector_db` instead of `client`.
|
|
183
|
+
- `embedding_function` (Optional, Callable) - the embedding function for creating the
|
|
184
|
+
vector db. Default is None, SentenceTransformer with the given `embedding_model`
|
|
185
|
+
will be used. If you want to use OpenAI, Cohere, HuggingFace or other embedding
|
|
186
|
+
functions, you can pass it here,
|
|
187
|
+
follow the examples in `https://docs.trychroma.com/embeddings`.
|
|
188
|
+
- `customized_prompt` (Optional, str) - the customized prompt for the retrieve chat.
|
|
189
|
+
Default is None.
|
|
190
|
+
- `customized_answer_prefix` (Optional, str) - the customized answer prefix for the
|
|
191
|
+
retrieve chat. Default is "".
|
|
192
|
+
If not "" and the customized_answer_prefix is not in the answer,
|
|
193
|
+
`Update Context` will be triggered.
|
|
194
|
+
- `update_context` (Optional, bool) - if False, will not apply `Update Context` for
|
|
195
|
+
interactive retrieval. Default is True.
|
|
196
|
+
- `collection_name` (Optional, str) - the name of the collection.
|
|
197
|
+
If key not provided, a default name `ag2-docs` will be used.
|
|
198
|
+
- `get_or_create` (Optional, bool) - Whether to get the collection if it exists. Default is False.
|
|
199
|
+
- `overwrite` (Optional, bool) - Whether to overwrite the collection if it exists. Default is False.
|
|
200
|
+
Case 1. if the collection does not exist, create the collection.
|
|
201
|
+
Case 2. the collection exists, if overwrite is True, it will overwrite the collection.
|
|
202
|
+
Case 3. the collection exists and overwrite is False, if get_or_create is True, it will get the collection,
|
|
203
|
+
otherwise it raise a ValueError.
|
|
204
|
+
- `custom_token_count_function` (Optional, Callable) - a custom function to count the
|
|
205
|
+
number of tokens in a string.
|
|
206
|
+
The function should take (text:str, model:str) as input and return the
|
|
207
|
+
token_count(int). the retrieve_config["model"] will be passed in the function.
|
|
208
|
+
Default is autogen.token_count_utils.count_token that uses tiktoken, which may
|
|
209
|
+
not be accurate for non-OpenAI models.
|
|
210
|
+
- `custom_text_split_function` (Optional, Callable) - a custom function to split a
|
|
211
|
+
string into a list of strings.
|
|
212
|
+
Default is None, will use the default function in
|
|
213
|
+
`autogen.retrieve_utils.split_text_to_chunks`.
|
|
214
|
+
- `custom_text_types` (Optional, List[str]) - a list of file types to be processed.
|
|
215
|
+
Default is `autogen.retrieve_utils.TEXT_FORMATS`.
|
|
216
|
+
This only applies to files under the directories in `docs_path`. Explicitly
|
|
217
|
+
included files and urls will be chunked regardless of their types.
|
|
218
|
+
- `recursive` (Optional, bool) - whether to search documents recursively in the
|
|
219
|
+
docs_path. Default is True.
|
|
220
|
+
- `distance_threshold` (Optional, float) - the threshold for the distance score, only
|
|
221
|
+
distance smaller than it will be returned. Will be ignored if < 0. Default is -1.
|
|
222
|
+
|
|
223
|
+
`**kwargs` (dict): other kwargs in [UserProxyAgent](https://docs.ag2.ai/latest/docs/api-reference/autogen/UserProxyAgent).
|
|
224
|
+
|
|
225
|
+
Example:
|
|
226
|
+
Example of overriding retrieve_docs - If you have set up a customized vector db, and it's
|
|
227
|
+
not compatible with chromadb, you can easily plug in it with below code.
|
|
228
|
+
*[Deprecated]* use `vector_db` instead. You can extend VectorDB and pass it to the agent.
|
|
229
|
+
```python
|
|
230
|
+
class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent):
|
|
231
|
+
def query_vector_db(
|
|
232
|
+
self,
|
|
233
|
+
query_texts: List[str],
|
|
234
|
+
n_results: int = 10,
|
|
235
|
+
search_string: str = "",
|
|
236
|
+
**kwargs: Any,
|
|
237
|
+
) -> Dict[str, Union[List[str], List[List[str]]]]:
|
|
238
|
+
# define your own query function here
|
|
239
|
+
pass
|
|
240
|
+
|
|
241
|
+
def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = "", **kwargs):
|
|
242
|
+
results = self.query_vector_db(
|
|
243
|
+
query_texts=[problem],
|
|
244
|
+
n_results=n_results,
|
|
245
|
+
search_string=search_string,
|
|
246
|
+
**kwargs: Any,
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
self._results = results
|
|
250
|
+
print("doc_ids: ", results["ids"])
|
|
251
|
+
```
|
|
252
|
+
"""
|
|
253
|
+
super().__init__(
|
|
254
|
+
name=name,
|
|
255
|
+
human_input_mode=human_input_mode,
|
|
256
|
+
**kwargs,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
self._retrieve_config = {} if retrieve_config is None else retrieve_config
|
|
260
|
+
self._task = self._retrieve_config.get("task", "default")
|
|
261
|
+
self._vector_db = self._retrieve_config.get("vector_db", "chroma")
|
|
262
|
+
self._db_config = self._retrieve_config.get("db_config", {})
|
|
263
|
+
self._docs_path = self._retrieve_config.get("docs_path", None)
|
|
264
|
+
self._extra_docs = self._retrieve_config.get("extra_docs", False)
|
|
265
|
+
self._new_docs = self._retrieve_config.get("new_docs", True)
|
|
266
|
+
self._collection_name = self._retrieve_config.get("collection_name", "ag2-docs")
|
|
267
|
+
if "docs_path" not in self._retrieve_config:
|
|
268
|
+
logger.warning(
|
|
269
|
+
"docs_path is not provided in retrieve_config. "
|
|
270
|
+
f"Will raise ValueError if the collection `{self._collection_name}` doesn't exist. "
|
|
271
|
+
"Set docs_path to None to suppress this warning."
|
|
272
|
+
)
|
|
273
|
+
self._model = self._retrieve_config.get("model", "gpt-4")
|
|
274
|
+
self._max_tokens = self.get_max_tokens(self._model)
|
|
275
|
+
self._chunk_token_size = int(self._retrieve_config.get("chunk_token_size", self._max_tokens * 0.4))
|
|
276
|
+
self._chunk_mode = self._retrieve_config.get("chunk_mode", "multi_lines")
|
|
277
|
+
self._must_break_at_empty_line = self._retrieve_config.get("must_break_at_empty_line", True)
|
|
278
|
+
self._embedding_model = self._retrieve_config.get("embedding_model", "all-MiniLM-L6-v2")
|
|
279
|
+
self._embedding_function = self._retrieve_config.get("embedding_function", None)
|
|
280
|
+
self.customized_prompt = self._retrieve_config.get("customized_prompt", None)
|
|
281
|
+
self.customized_answer_prefix = self._retrieve_config.get("customized_answer_prefix", "").upper()
|
|
282
|
+
self.update_context = self._retrieve_config.get("update_context", True)
|
|
283
|
+
self._get_or_create = self._retrieve_config.get("get_or_create", False) if self._docs_path is not None else True
|
|
284
|
+
self._overwrite = self._retrieve_config.get("overwrite", False)
|
|
285
|
+
self.custom_token_count_function = self._retrieve_config.get("custom_token_count_function", count_token)
|
|
286
|
+
self.custom_text_split_function = self._retrieve_config.get("custom_text_split_function", None)
|
|
287
|
+
self._custom_text_types = self._retrieve_config.get("custom_text_types", TEXT_FORMATS)
|
|
288
|
+
self._recursive = self._retrieve_config.get("recursive", True)
|
|
289
|
+
self._context_max_tokens = self._retrieve_config.get("context_max_tokens", self._max_tokens * 0.8)
|
|
290
|
+
self._collection = self._docs_path is None # whether the collection is created
|
|
291
|
+
self._ipython = get_ipython()
|
|
292
|
+
self._doc_idx = -1 # the index of the current used doc
|
|
293
|
+
self._results = [] # the results of the current query
|
|
294
|
+
self._intermediate_answers = set() # the intermediate answers
|
|
295
|
+
self._doc_contents = [] # the contents of the current used doc
|
|
296
|
+
self._doc_ids = [] # the ids of the current used doc
|
|
297
|
+
self._current_docs_in_context = [] # the ids of the current context sources
|
|
298
|
+
self._search_string = "" # the search string used in the current query
|
|
299
|
+
self._distance_threshold = self._retrieve_config.get("distance_threshold", -1)
|
|
300
|
+
# update the termination message function
|
|
301
|
+
self._is_termination_msg = (
|
|
302
|
+
self._is_termination_msg_retrievechat if is_termination_msg is None else is_termination_msg
|
|
303
|
+
)
|
|
304
|
+
if isinstance(self._vector_db, str):
|
|
305
|
+
if not isinstance(self._db_config, dict):
|
|
306
|
+
raise ValueError("`db_config` should be a dictionary.")
|
|
307
|
+
if "embedding_function" in self._retrieve_config:
|
|
308
|
+
self._db_config["embedding_function"] = self._embedding_function
|
|
309
|
+
self._vector_db = VectorDBFactory.create_vector_db(db_type=self._vector_db, **self._db_config)
|
|
310
|
+
self._client = self._retrieve_config.get("client", None)
|
|
311
|
+
if self._client is None and hasattr(self._vector_db, "client"):
|
|
312
|
+
# Since the client arg is deprecated, let's check
|
|
313
|
+
# if the `vector_db` instance has a 'client' attribute.
|
|
314
|
+
self._client = getattr(self._vector_db, "client", None)
|
|
315
|
+
if self._client is None:
|
|
316
|
+
self._client = chromadb.Client()
|
|
317
|
+
self.register_reply(Agent, RetrieveUserProxyAgent._generate_retrieve_user_reply, position=2)
|
|
318
|
+
self.register_hook(
|
|
319
|
+
hookable_method="process_message_before_send",
|
|
320
|
+
hook=self._check_update_context_before_send,
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
def _init_db(self):
|
|
324
|
+
if not self._vector_db:
|
|
325
|
+
return
|
|
326
|
+
|
|
327
|
+
is_to_chunk = False # whether to chunk the raw files
|
|
328
|
+
if self._new_docs:
|
|
329
|
+
is_to_chunk = True
|
|
330
|
+
if not self._docs_path:
|
|
331
|
+
try:
|
|
332
|
+
self._vector_db.get_collection(self._collection_name)
|
|
333
|
+
logger.warning(f"`docs_path` is not provided. Use the existing collection `{self._collection_name}`.")
|
|
334
|
+
self._overwrite = False
|
|
335
|
+
self._get_or_create = True
|
|
336
|
+
is_to_chunk = False
|
|
337
|
+
except ValueError:
|
|
338
|
+
raise ValueError(
|
|
339
|
+
"`docs_path` is not provided. "
|
|
340
|
+
f"The collection `{self._collection_name}` doesn't exist either. "
|
|
341
|
+
"Please provide `docs_path` or create the collection first."
|
|
342
|
+
)
|
|
343
|
+
elif self._get_or_create and not self._overwrite:
|
|
344
|
+
try:
|
|
345
|
+
self._vector_db.get_collection(self._collection_name)
|
|
346
|
+
logger.info(f"Use the existing collection `{self._collection_name}`.", color="green")
|
|
347
|
+
except ValueError:
|
|
348
|
+
is_to_chunk = True
|
|
349
|
+
else:
|
|
350
|
+
is_to_chunk = True
|
|
351
|
+
|
|
352
|
+
self._vector_db.active_collection = self._vector_db.create_collection(
|
|
353
|
+
self._collection_name, overwrite=self._overwrite, get_or_create=self._get_or_create
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
docs = None
|
|
357
|
+
if is_to_chunk:
|
|
358
|
+
if self.custom_text_split_function is not None:
|
|
359
|
+
chunks, sources = split_files_to_chunks(
|
|
360
|
+
get_files_from_dir(self._docs_path, self._custom_text_types, self._recursive),
|
|
361
|
+
custom_text_split_function=self.custom_text_split_function,
|
|
362
|
+
)
|
|
363
|
+
else:
|
|
364
|
+
chunks, sources = split_files_to_chunks(
|
|
365
|
+
get_files_from_dir(self._docs_path, self._custom_text_types, self._recursive),
|
|
366
|
+
self._chunk_token_size,
|
|
367
|
+
self._chunk_mode,
|
|
368
|
+
self._must_break_at_empty_line,
|
|
369
|
+
)
|
|
370
|
+
logger.info(f"Found {len(chunks)} chunks.")
|
|
371
|
+
|
|
372
|
+
if self._new_docs:
|
|
373
|
+
all_docs_ids = {
|
|
374
|
+
doc["id"]
|
|
375
|
+
for doc in self._vector_db.get_docs_by_ids(ids=None, collection_name=self._collection_name)
|
|
376
|
+
}
|
|
377
|
+
else:
|
|
378
|
+
all_docs_ids = set()
|
|
379
|
+
|
|
380
|
+
chunk_ids = (
|
|
381
|
+
[hashlib.blake2b(chunk.encode("utf-8")).hexdigest()[:HASH_LENGTH] for chunk in chunks]
|
|
382
|
+
if self._vector_db.type != "qdrant"
|
|
383
|
+
else [str(uuid.UUID(hex=hashlib.md5(chunk.encode("utf-8")).hexdigest())) for chunk in chunks]
|
|
384
|
+
)
|
|
385
|
+
chunk_ids_set = set(chunk_ids)
|
|
386
|
+
chunk_ids_set_idx = [chunk_ids.index(hash_value) for hash_value in chunk_ids_set]
|
|
387
|
+
docs = [
|
|
388
|
+
Document(id=chunk_ids[idx], content=chunks[idx], metadata=sources[idx])
|
|
389
|
+
for idx in chunk_ids_set_idx
|
|
390
|
+
if chunk_ids[idx] not in all_docs_ids
|
|
391
|
+
]
|
|
392
|
+
|
|
393
|
+
self._vector_db.insert_docs(docs=docs, collection_name=self._collection_name, upsert=True)
|
|
394
|
+
|
|
395
|
+
def _is_termination_msg_retrievechat(self, message):
|
|
396
|
+
"""Check if a message is a termination message.
|
|
397
|
+
For code generation, terminate when no code block is detected. Currently only detect python code blocks.
|
|
398
|
+
For question answering, terminate when don't update context, i.e., answer is given.
|
|
399
|
+
"""
|
|
400
|
+
if isinstance(message, dict):
|
|
401
|
+
message = message.get("content")
|
|
402
|
+
if message is None:
|
|
403
|
+
return False
|
|
404
|
+
cb = extract_code(message)
|
|
405
|
+
contain_code = False
|
|
406
|
+
for c in cb:
|
|
407
|
+
# todo: support more languages
|
|
408
|
+
if c[0] == "python":
|
|
409
|
+
contain_code = True
|
|
410
|
+
break
|
|
411
|
+
update_context_case1, update_context_case2 = self._check_update_context(message)
|
|
412
|
+
return not (contain_code or update_context_case1 or update_context_case2)
|
|
413
|
+
|
|
414
|
+
def _check_update_context_before_send(self, sender, message, recipient, silent):
|
|
415
|
+
if not isinstance(message, (str, dict)):
|
|
416
|
+
return message
|
|
417
|
+
elif isinstance(message, dict):
|
|
418
|
+
msg_text = message.get("content", message)
|
|
419
|
+
else:
|
|
420
|
+
msg_text = message
|
|
421
|
+
|
|
422
|
+
if msg_text.strip().upper() == "UPDATE CONTEXT":
|
|
423
|
+
doc_contents = self._get_context(self._results)
|
|
424
|
+
|
|
425
|
+
# Always use self.problem as the query text to retrieve docs, but each time we replace the context with the
|
|
426
|
+
# next similar docs in the retrieved doc results.
|
|
427
|
+
if not doc_contents:
|
|
428
|
+
for _tmp_retrieve_count in range(1, 5):
|
|
429
|
+
self._reset(intermediate=True)
|
|
430
|
+
self.retrieve_docs(
|
|
431
|
+
self.problem, self.n_results * (2 * _tmp_retrieve_count + 1), self._search_string
|
|
432
|
+
)
|
|
433
|
+
doc_contents = self._get_context(self._results)
|
|
434
|
+
if doc_contents or self.n_results * (2 * _tmp_retrieve_count + 1) >= len(self._results[0]):
|
|
435
|
+
break
|
|
436
|
+
msg_text = self._generate_message(doc_contents, task=self._task)
|
|
437
|
+
|
|
438
|
+
if isinstance(message, dict):
|
|
439
|
+
message["content"] = msg_text
|
|
440
|
+
return message
|
|
441
|
+
|
|
442
|
+
@staticmethod
|
|
443
|
+
def get_max_tokens(model="gpt-3.5-turbo"):
|
|
444
|
+
if "32k" in model:
|
|
445
|
+
return 32000
|
|
446
|
+
elif "16k" in model:
|
|
447
|
+
return 16000
|
|
448
|
+
elif "gpt-4" in model:
|
|
449
|
+
return 8000
|
|
450
|
+
else:
|
|
451
|
+
return 4000
|
|
452
|
+
|
|
453
|
+
def _reset(self, intermediate=False):
|
|
454
|
+
self._doc_idx = -1 # the index of the current used doc
|
|
455
|
+
self._results = [] # the results of the current query
|
|
456
|
+
if not intermediate:
|
|
457
|
+
self._intermediate_answers = set() # the intermediate answers
|
|
458
|
+
self._doc_contents = [] # the contents of the current used doc
|
|
459
|
+
self._doc_ids = [] # the ids of the current used doc
|
|
460
|
+
|
|
461
|
+
def _get_context(self, results: QueryResults):
|
|
462
|
+
doc_contents = ""
|
|
463
|
+
self._current_docs_in_context = []
|
|
464
|
+
current_tokens = 0
|
|
465
|
+
_doc_idx = self._doc_idx
|
|
466
|
+
_tmp_retrieve_count = 0
|
|
467
|
+
for idx, doc in enumerate(results[0]):
|
|
468
|
+
doc = doc[0]
|
|
469
|
+
if idx <= _doc_idx:
|
|
470
|
+
continue
|
|
471
|
+
if doc["id"] in self._doc_ids:
|
|
472
|
+
continue
|
|
473
|
+
_doc_tokens = self.custom_token_count_function(doc["content"], self._model)
|
|
474
|
+
if _doc_tokens > self._context_max_tokens:
|
|
475
|
+
func_print = f"Skip doc_id {doc['id']} as it is too long to fit in the context."
|
|
476
|
+
print(colored(func_print, "green"), flush=True)
|
|
477
|
+
self._doc_idx = idx
|
|
478
|
+
continue
|
|
479
|
+
if current_tokens + _doc_tokens > self._context_max_tokens:
|
|
480
|
+
break
|
|
481
|
+
func_print = f"Adding content of doc {doc['id']} to context."
|
|
482
|
+
print(colored(func_print, "green"), flush=True)
|
|
483
|
+
current_tokens += _doc_tokens
|
|
484
|
+
doc_contents += doc["content"] + "\n"
|
|
485
|
+
_metadata = doc.get("metadata")
|
|
486
|
+
if isinstance(_metadata, dict):
|
|
487
|
+
self._current_docs_in_context.append(_metadata.get("source", ""))
|
|
488
|
+
self._doc_idx = idx
|
|
489
|
+
self._doc_ids.append(doc["id"])
|
|
490
|
+
self._doc_contents.append(doc["content"])
|
|
491
|
+
_tmp_retrieve_count += 1
|
|
492
|
+
if _tmp_retrieve_count >= self.n_results:
|
|
493
|
+
break
|
|
494
|
+
return doc_contents
|
|
495
|
+
|
|
496
|
+
def _generate_message(self, doc_contents, task="default"):
|
|
497
|
+
if not doc_contents:
|
|
498
|
+
print(colored("No more context, will terminate.", "green"), flush=True)
|
|
499
|
+
return "TERMINATE"
|
|
500
|
+
if self.customized_prompt:
|
|
501
|
+
message = self.customized_prompt.format(input_question=self.problem, input_context=doc_contents)
|
|
502
|
+
elif task.upper() == "CODE":
|
|
503
|
+
message = PROMPT_CODE.format(input_question=self.problem, input_context=doc_contents)
|
|
504
|
+
elif task.upper() == "QA":
|
|
505
|
+
message = PROMPT_QA.format(input_question=self.problem, input_context=doc_contents)
|
|
506
|
+
elif task.upper() == "DEFAULT":
|
|
507
|
+
message = PROMPT_DEFAULT.format(
|
|
508
|
+
input_question=self.problem, input_context=doc_contents, input_sources=self._current_docs_in_context
|
|
509
|
+
)
|
|
510
|
+
else:
|
|
511
|
+
raise NotImplementedError(f"task {task} is not implemented.")
|
|
512
|
+
return message
|
|
513
|
+
|
|
514
|
+
def _check_update_context(self, message):
|
|
515
|
+
if isinstance(message, dict):
|
|
516
|
+
message = message.get("content", "")
|
|
517
|
+
elif not isinstance(message, str):
|
|
518
|
+
message = ""
|
|
519
|
+
update_context_case1 = "UPDATE CONTEXT" in message.upper() and UPDATE_CONTEXT_IN_PROMPT not in message
|
|
520
|
+
update_context_case2 = self.customized_answer_prefix and self.customized_answer_prefix not in message.upper()
|
|
521
|
+
return update_context_case1, update_context_case2
|
|
522
|
+
|
|
523
|
+
def _generate_retrieve_user_reply(
|
|
524
|
+
self,
|
|
525
|
+
messages: Optional[list[dict[str, Any]]] = None,
|
|
526
|
+
sender: Optional[Agent] = None,
|
|
527
|
+
config: Optional[Any] = None,
|
|
528
|
+
) -> tuple[bool, Optional[Union[str, dict[str, Any]]]]:
|
|
529
|
+
"""In this function, we will update the context and reset the conversation based on different conditions.
|
|
530
|
+
We'll update the context and reset the conversation if update_context is True and either of the following:
|
|
531
|
+
(1) the last message contains "UPDATE CONTEXT",
|
|
532
|
+
(2) the last message doesn't contain "UPDATE CONTEXT" and the customized_answer_prefix is not in the message.
|
|
533
|
+
"""
|
|
534
|
+
if config is None:
|
|
535
|
+
config = self
|
|
536
|
+
if messages is None:
|
|
537
|
+
messages = self._oai_messages[sender]
|
|
538
|
+
message = messages[-1]
|
|
539
|
+
update_context_case1, update_context_case2 = self._check_update_context(message)
|
|
540
|
+
if (update_context_case1 or update_context_case2) and self.update_context:
|
|
541
|
+
print(colored("Updating context and resetting conversation.", "green"), flush=True)
|
|
542
|
+
# extract the first sentence in the response as the intermediate answer
|
|
543
|
+
_message = message.get("content", "").split("\n")[0].strip()
|
|
544
|
+
_intermediate_info = re.split(r"(?<=[.!?])\s+", _message)
|
|
545
|
+
self._intermediate_answers.add(_intermediate_info[0])
|
|
546
|
+
|
|
547
|
+
if update_context_case1:
|
|
548
|
+
# try to get more context from the current retrieved doc results because the results may be too long to fit
|
|
549
|
+
# in the LLM context.
|
|
550
|
+
doc_contents = self._get_context(self._results)
|
|
551
|
+
|
|
552
|
+
# Always use self.problem as the query text to retrieve docs, but each time we replace the context with the
|
|
553
|
+
# next similar docs in the retrieved doc results.
|
|
554
|
+
if not doc_contents:
|
|
555
|
+
for _tmp_retrieve_count in range(1, 5):
|
|
556
|
+
self._reset(intermediate=True)
|
|
557
|
+
self.retrieve_docs(
|
|
558
|
+
self.problem, self.n_results * (2 * _tmp_retrieve_count + 1), self._search_string
|
|
559
|
+
)
|
|
560
|
+
doc_contents = self._get_context(self._results)
|
|
561
|
+
if doc_contents or self.n_results * (2 * _tmp_retrieve_count + 1) >= len(self._results[0]):
|
|
562
|
+
break
|
|
563
|
+
elif update_context_case2:
|
|
564
|
+
# Use the current intermediate info as the query text to retrieve docs, and each time we append the top similar
|
|
565
|
+
# docs in the retrieved doc results to the context.
|
|
566
|
+
for _tmp_retrieve_count in range(5):
|
|
567
|
+
self._reset(intermediate=True)
|
|
568
|
+
self.retrieve_docs(
|
|
569
|
+
_intermediate_info[0], self.n_results * (2 * _tmp_retrieve_count + 1), self._search_string
|
|
570
|
+
)
|
|
571
|
+
self._get_context(self._results)
|
|
572
|
+
doc_contents = "\n".join(self._doc_contents) # + "\n" + "\n".join(self._intermediate_answers)
|
|
573
|
+
if doc_contents or self.n_results * (2 * _tmp_retrieve_count + 1) >= len(self._results[0]):
|
|
574
|
+
break
|
|
575
|
+
|
|
576
|
+
self.clear_history()
|
|
577
|
+
sender.clear_history()
|
|
578
|
+
return True, self._generate_message(doc_contents, task=self._task)
|
|
579
|
+
else:
|
|
580
|
+
return False, None
|
|
581
|
+
|
|
582
|
+
def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""):
|
|
583
|
+
"""Retrieve docs based on the given problem and assign the results to the class property `_results`.
|
|
584
|
+
The retrieved docs should be type of `QueryResults` which is a list of tuples containing the document and
|
|
585
|
+
the distance.
|
|
586
|
+
|
|
587
|
+
Args:
|
|
588
|
+
problem (str): the problem to be solved.
|
|
589
|
+
n_results (int): the number of results to be retrieved. Default is 20.
|
|
590
|
+
search_string (str): only docs that contain an exact match of this string will be retrieved. Default is "".
|
|
591
|
+
Not used if the vector_db doesn't support it.
|
|
592
|
+
|
|
593
|
+
Returns:
|
|
594
|
+
None.
|
|
595
|
+
"""
|
|
596
|
+
if isinstance(self._vector_db, VectorDB):
|
|
597
|
+
if not self._collection or not self._get_or_create:
|
|
598
|
+
print("Trying to create collection.")
|
|
599
|
+
self._init_db()
|
|
600
|
+
self._collection = True
|
|
601
|
+
self._get_or_create = True
|
|
602
|
+
|
|
603
|
+
kwargs = {}
|
|
604
|
+
if hasattr(self._vector_db, "type") and self._vector_db.type == "chroma":
|
|
605
|
+
kwargs["where_document"] = {"$contains": search_string} if search_string else None
|
|
606
|
+
results = self._vector_db.retrieve_docs(
|
|
607
|
+
queries=[problem],
|
|
608
|
+
n_results=n_results,
|
|
609
|
+
collection_name=self._collection_name,
|
|
610
|
+
distance_threshold=self._distance_threshold,
|
|
611
|
+
**kwargs,
|
|
612
|
+
)
|
|
613
|
+
self._search_string = search_string
|
|
614
|
+
self._results = results
|
|
615
|
+
print("VectorDB returns doc_ids: ", [[r[0]["id"] for r in rr] for rr in results])
|
|
616
|
+
return
|
|
617
|
+
|
|
618
|
+
if not self._collection or not self._get_or_create:
|
|
619
|
+
print("Trying to create collection.")
|
|
620
|
+
self._client = create_vector_db_from_dir(
|
|
621
|
+
dir_path=self._docs_path,
|
|
622
|
+
max_tokens=self._chunk_token_size,
|
|
623
|
+
client=self._client,
|
|
624
|
+
collection_name=self._collection_name,
|
|
625
|
+
chunk_mode=self._chunk_mode,
|
|
626
|
+
must_break_at_empty_line=self._must_break_at_empty_line,
|
|
627
|
+
embedding_model=self._embedding_model,
|
|
628
|
+
get_or_create=self._get_or_create,
|
|
629
|
+
embedding_function=self._embedding_function,
|
|
630
|
+
custom_text_split_function=self.custom_text_split_function,
|
|
631
|
+
custom_text_types=self._custom_text_types,
|
|
632
|
+
recursive=self._recursive,
|
|
633
|
+
extra_docs=self._extra_docs,
|
|
634
|
+
)
|
|
635
|
+
self._collection = True
|
|
636
|
+
self._get_or_create = True
|
|
637
|
+
|
|
638
|
+
results = query_vector_db(
|
|
639
|
+
query_texts=[problem],
|
|
640
|
+
n_results=n_results,
|
|
641
|
+
search_string=search_string,
|
|
642
|
+
client=self._client,
|
|
643
|
+
collection_name=self._collection_name,
|
|
644
|
+
embedding_model=self._embedding_model,
|
|
645
|
+
embedding_function=self._embedding_function,
|
|
646
|
+
)
|
|
647
|
+
results["contents"] = results.pop("documents")
|
|
648
|
+
results = chroma_results_to_query_results(results, "distances")
|
|
649
|
+
results = filter_results_by_distance(results, self._distance_threshold)
|
|
650
|
+
|
|
651
|
+
self._search_string = search_string
|
|
652
|
+
self._results = results
|
|
653
|
+
print("doc_ids: ", [[r[0]["id"] for r in rr] for rr in results])
|
|
654
|
+
|
|
655
|
+
@staticmethod
|
|
656
|
+
def message_generator(sender, recipient, context):
|
|
657
|
+
"""Generate an initial message with the given context for the RetrieveUserProxyAgent.
|
|
658
|
+
|
|
659
|
+
Args:
|
|
660
|
+
sender (Agent): the sender agent. It should be the instance of RetrieveUserProxyAgent.
|
|
661
|
+
recipient (Agent): the recipient agent. Usually it's the assistant agent.
|
|
662
|
+
context (dict): the context for the message generation. It should contain the following keys:
|
|
663
|
+
- `problem` (str) - the problem to be solved.
|
|
664
|
+
- `n_results` (int) - the number of results to be retrieved. Default is 20.
|
|
665
|
+
- `search_string` (str) - only docs that contain an exact match of this string will be retrieved. Default is "".
|
|
666
|
+
|
|
667
|
+
Returns:
|
|
668
|
+
str: the generated message ready to be sent to the recipient agent.
|
|
669
|
+
"""
|
|
670
|
+
sender._reset()
|
|
671
|
+
|
|
672
|
+
problem = context.get("problem", "")
|
|
673
|
+
n_results = context.get("n_results", 20)
|
|
674
|
+
search_string = context.get("search_string", "")
|
|
675
|
+
|
|
676
|
+
sender.retrieve_docs(problem, n_results, search_string)
|
|
677
|
+
sender.problem = problem
|
|
678
|
+
sender.n_results = n_results
|
|
679
|
+
doc_contents = sender._get_context(sender._results)
|
|
680
|
+
message = sender._generate_message(doc_contents, sender._task)
|
|
681
|
+
return message
|
|
682
|
+
|
|
683
|
+
def run_code(self, code, **kwargs):
|
|
684
|
+
lang = kwargs.get("lang")
|
|
685
|
+
if code.startswith("!") or code.startswith("pip") or lang in ["bash", "shell", "sh"]:
|
|
686
|
+
return (
|
|
687
|
+
0,
|
|
688
|
+
"You MUST NOT install any packages because all the packages needed are already installed.",
|
|
689
|
+
None,
|
|
690
|
+
)
|
|
691
|
+
if self._ipython is None or lang != "python":
|
|
692
|
+
return super().run_code(code, **kwargs)
|
|
693
|
+
else:
|
|
694
|
+
result = self._ipython.run_cell(code)
|
|
695
|
+
log = str(result.result)
|
|
696
|
+
exitcode = 0 if result.success else 1
|
|
697
|
+
if result.error_before_exec is not None:
|
|
698
|
+
log += f"\n{result.error_before_exec}"
|
|
699
|
+
exitcode = 1
|
|
700
|
+
if result.error_in_exec is not None:
|
|
701
|
+
log += f"\n{result.error_in_exec}"
|
|
702
|
+
exitcode = 1
|
|
703
|
+
return exitcode, log, None
|