ag2 0.9.1a1__py3-none-any.whl → 0.9.1.post0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info}/METADATA +264 -73
- ag2-0.9.1.post0.dist-info/RECORD +392 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info}/WHEEL +1 -2
- autogen/__init__.py +89 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +427 -0
- autogen/_website/generate_mkdocs.py +1174 -0
- autogen/_website/notebook_processor.py +476 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +412 -0
- autogen/agentchat/__init__.py +44 -0
- autogen/agentchat/agent.py +182 -0
- autogen/agentchat/assistant_agent.py +85 -0
- autogen/agentchat/chat.py +309 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +429 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +566 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +187 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
- autogen/agentchat/contrib/rag/query_engine.py +74 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
- autogen/agentchat/contrib/swarm_agent.py +1425 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +232 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +303 -0
- autogen/agentchat/conversable_agent.py +4020 -0
- autogen/agentchat/group/__init__.py +64 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +41 -0
- autogen/agentchat/group/context_variables.py +192 -0
- autogen/agentchat/group/group_tool_executor.py +202 -0
- autogen/agentchat/group/group_utils.py +591 -0
- autogen/agentchat/group/handoffs.py +244 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +237 -0
- autogen/agentchat/group/on_condition.py +58 -0
- autogen/agentchat/group/on_context_condition.py +54 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +159 -0
- autogen/agentchat/group/patterns/manual.py +176 -0
- autogen/agentchat/group/patterns/pattern.py +288 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +26 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/group_chat_target.py +132 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +413 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1694 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
- autogen/agentchat/realtime/experimental/function_observer.py +85 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +111 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +73 -0
- autogen/agents/contrib/time/time_tool_agent.py +51 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
- autogen/agents/experimental/document_agent/document_agent.py +461 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +380 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
- autogen/agents/experimental/document_agent/parser_utils.py +130 -0
- autogen/agents/experimental/document_agent/url_utils.py +426 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +77 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +62 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +75 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +102 -0
- autogen/cache/in_memory_cache.py +58 -0
- autogen/cache/redis_cache.py +123 -0
- autogen/code_utils.py +596 -0
- autogen/coding/__init__.py +22 -0
- autogen/coding/base.py +119 -0
- autogen/coding/docker_commandline_code_executor.py +268 -0
- autogen/coding/factory.py +47 -0
- autogen/coding/func_with_reqs.py +202 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +167 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +231 -0
- autogen/coding/jupyter/jupyter_code_executor.py +160 -0
- autogen/coding/jupyter/local_jupyter_server.py +172 -0
- autogen/coding/local_commandline_code_executor.py +405 -0
- autogen/coding/markdown_code_extractor.py +45 -0
- autogen/coding/utils.py +56 -0
- autogen/doc_utils.py +34 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1010 -0
- autogen/events/base_event.py +99 -0
- autogen/events/client_events.py +167 -0
- autogen/events/helpers.py +36 -0
- autogen/events/print_event.py +46 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +80 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +225 -0
- autogen/fast_depends/core/model.py +576 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +29 -0
- autogen/fast_depends/dependencies/provider.py +39 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +280 -0
- autogen/fast_depends/utils.py +187 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +178 -0
- autogen/import_utils.py +526 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
- autogen/interop/langchain/langchain_tool.py +82 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +113 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
- autogen/interop/registry.py +69 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +56 -0
- autogen/io/run_response.py +293 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +213 -0
- autogen/json_utils.py +43 -0
- autogen/llm_config.py +379 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +128 -0
- autogen/logger/file_logger.py +261 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +523 -0
- autogen/math_utils.py +339 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/mcp_client.py +208 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +948 -0
- autogen/messages/base_message.py +107 -0
- autogen/messages/client_messages.py +171 -0
- autogen/messages/print_message.py +49 -0
- autogen/oai/__init__.py +53 -0
- autogen/oai/anthropic.py +714 -0
- autogen/oai/bedrock.py +628 -0
- autogen/oai/cerebras.py +299 -0
- autogen/oai/client.py +1435 -0
- autogen/oai/client_utils.py +169 -0
- autogen/oai/cohere.py +479 -0
- autogen/oai/gemini.py +990 -0
- autogen/oai/gemini_types.py +129 -0
- autogen/oai/groq.py +305 -0
- autogen/oai/mistral.py +303 -0
- autogen/oai/oai_models/__init__.py +11 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +87 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +86 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
- autogen/oai/oai_models/completion_usage.py +60 -0
- autogen/oai/ollama.py +643 -0
- autogen/oai/openai_utils.py +881 -0
- autogen/oai/together.py +370 -0
- autogen/retrieve_utils.py +491 -0
- autogen/runtime_logging.py +160 -0
- autogen/token_count_utils.py +267 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +41 -0
- autogen/tools/dependency_injection.py +254 -0
- autogen/tools/experimental/__init__.py +43 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +161 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +328 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +183 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
- autogen/tools/function_utils.py +411 -0
- autogen/tools/tool.py +187 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- ag2-0.9.1a1.dist-info/RECORD +0 -6
- ag2-0.9.1a1.dist-info/top_level.txt +0 -1
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/LICENSE +0 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.1.post0.dist-info/licenses}/NOTICE.md +0 -0
|
@@ -0,0 +1,491 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
import glob
|
|
8
|
+
import hashlib
|
|
9
|
+
import logging
|
|
10
|
+
import os
|
|
11
|
+
import re
|
|
12
|
+
from typing import Any, Callable, Optional, Union
|
|
13
|
+
from urllib.parse import urlparse
|
|
14
|
+
|
|
15
|
+
import requests
|
|
16
|
+
|
|
17
|
+
from .import_utils import optional_import_block, require_optional_import
|
|
18
|
+
from .token_count_utils import count_token
|
|
19
|
+
|
|
20
|
+
with optional_import_block():
|
|
21
|
+
import chromadb
|
|
22
|
+
import markdownify
|
|
23
|
+
from bs4 import BeautifulSoup
|
|
24
|
+
|
|
25
|
+
if chromadb.__version__ < "0.4.15":
|
|
26
|
+
from chromadb.api import API
|
|
27
|
+
else:
|
|
28
|
+
from chromadb.api import ClientAPI as API # noqa: N814
|
|
29
|
+
import chromadb.utils.embedding_functions as ef
|
|
30
|
+
import pypdf
|
|
31
|
+
from chromadb.api.types import QueryResult
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
with optional_import_block() as result:
|
|
35
|
+
from unstructured.partition.auto import partition
|
|
36
|
+
|
|
37
|
+
HAS_UNSTRUCTURED = result.is_successful
|
|
38
|
+
|
|
39
|
+
logger = logging.getLogger(__name__)
|
|
40
|
+
TEXT_FORMATS = [
|
|
41
|
+
"txt",
|
|
42
|
+
"json",
|
|
43
|
+
"csv",
|
|
44
|
+
"tsv",
|
|
45
|
+
"md",
|
|
46
|
+
"html",
|
|
47
|
+
"htm",
|
|
48
|
+
"rtf",
|
|
49
|
+
"rst",
|
|
50
|
+
"jsonl",
|
|
51
|
+
"log",
|
|
52
|
+
"xml",
|
|
53
|
+
"yaml",
|
|
54
|
+
"yml",
|
|
55
|
+
"pdf",
|
|
56
|
+
"mdx",
|
|
57
|
+
]
|
|
58
|
+
UNSTRUCTURED_FORMATS = [
|
|
59
|
+
"doc",
|
|
60
|
+
"docx",
|
|
61
|
+
"epub",
|
|
62
|
+
"msg",
|
|
63
|
+
"odt",
|
|
64
|
+
"org",
|
|
65
|
+
"pdf",
|
|
66
|
+
"ppt",
|
|
67
|
+
"pptx",
|
|
68
|
+
"rtf",
|
|
69
|
+
"rst",
|
|
70
|
+
"xlsx",
|
|
71
|
+
] # These formats will be parsed by the 'unstructured' library, if installed.
|
|
72
|
+
if HAS_UNSTRUCTURED:
|
|
73
|
+
TEXT_FORMATS += UNSTRUCTURED_FORMATS
|
|
74
|
+
TEXT_FORMATS = list(set(TEXT_FORMATS))
|
|
75
|
+
VALID_CHUNK_MODES = frozenset({"one_line", "multi_lines"})
|
|
76
|
+
RAG_MINIMUM_MESSAGE_LENGTH = int(os.environ.get("RAG_MINIMUM_MESSAGE_LENGTH", 5))
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def split_text_to_chunks(
|
|
80
|
+
text: str,
|
|
81
|
+
max_tokens: int = 4000,
|
|
82
|
+
chunk_mode: str = "multi_lines",
|
|
83
|
+
must_break_at_empty_line: bool = True,
|
|
84
|
+
overlap: int = 0, # number of overlapping lines
|
|
85
|
+
):
|
|
86
|
+
"""Split a long text into chunks of max_tokens."""
|
|
87
|
+
if chunk_mode not in VALID_CHUNK_MODES:
|
|
88
|
+
raise AssertionError
|
|
89
|
+
if chunk_mode == "one_line":
|
|
90
|
+
must_break_at_empty_line = False
|
|
91
|
+
overlap = 0
|
|
92
|
+
chunks = []
|
|
93
|
+
lines = text.split("\n")
|
|
94
|
+
num_lines = len(lines)
|
|
95
|
+
if num_lines < 3 and must_break_at_empty_line:
|
|
96
|
+
logger.warning("The input text has less than 3 lines. Set `must_break_at_empty_line` to `False`")
|
|
97
|
+
must_break_at_empty_line = False
|
|
98
|
+
lines_tokens = [count_token(line) for line in lines]
|
|
99
|
+
sum_tokens = sum(lines_tokens)
|
|
100
|
+
while sum_tokens > max_tokens:
|
|
101
|
+
estimated_line_cut = 2 if chunk_mode == "one_line" else max(int(max_tokens / sum_tokens * len(lines)), 2)
|
|
102
|
+
cnt = 0
|
|
103
|
+
prev = ""
|
|
104
|
+
for cnt in reversed(range(estimated_line_cut)):
|
|
105
|
+
if must_break_at_empty_line and lines[cnt].strip() != "":
|
|
106
|
+
continue
|
|
107
|
+
if sum(lines_tokens[:cnt]) <= max_tokens:
|
|
108
|
+
prev = "\n".join(lines[:cnt])
|
|
109
|
+
break
|
|
110
|
+
if cnt == 0:
|
|
111
|
+
logger.warning(
|
|
112
|
+
f"max_tokens is too small to fit a single line of text. Breaking this line:\n\t{lines[0][:100]} ..."
|
|
113
|
+
)
|
|
114
|
+
if not must_break_at_empty_line:
|
|
115
|
+
split_len = max(
|
|
116
|
+
int(max_tokens / (lines_tokens[0] * 0.9 * len(lines[0]) + 0.1)), RAG_MINIMUM_MESSAGE_LENGTH
|
|
117
|
+
)
|
|
118
|
+
prev = lines[0][:split_len]
|
|
119
|
+
lines[0] = lines[0][split_len:]
|
|
120
|
+
lines_tokens[0] = count_token(lines[0])
|
|
121
|
+
else:
|
|
122
|
+
logger.warning("Failed to split docs with must_break_at_empty_line being True, set to False.")
|
|
123
|
+
must_break_at_empty_line = False
|
|
124
|
+
(
|
|
125
|
+
chunks.append(prev) if len(prev) >= RAG_MINIMUM_MESSAGE_LENGTH else None
|
|
126
|
+
) # don't add chunks less than RAG_MINIMUM_MESSAGE_LENGTH characters
|
|
127
|
+
lines = lines[cnt - overlap if cnt > overlap else cnt :]
|
|
128
|
+
lines_tokens = lines_tokens[cnt - overlap if cnt > overlap else cnt :]
|
|
129
|
+
sum_tokens = sum(lines_tokens)
|
|
130
|
+
text_to_chunk = "\n".join(lines).strip()
|
|
131
|
+
(
|
|
132
|
+
chunks.append(text_to_chunk) if len(text_to_chunk) >= RAG_MINIMUM_MESSAGE_LENGTH else None
|
|
133
|
+
) # don't add chunks less than RAG_MINIMUM_MESSAGE_LENGTH characters
|
|
134
|
+
return chunks
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
@require_optional_import("pypdf", "retrievechat")
|
|
138
|
+
def extract_text_from_pdf(file: str) -> str:
|
|
139
|
+
"""Extract text from PDF files"""
|
|
140
|
+
text = ""
|
|
141
|
+
with open(file, "rb") as f:
|
|
142
|
+
reader = pypdf.PdfReader(f)
|
|
143
|
+
if reader.is_encrypted: # Check if the PDF is encrypted
|
|
144
|
+
try:
|
|
145
|
+
reader.decrypt("")
|
|
146
|
+
except pypdf.errors.FileNotDecryptedError as e:
|
|
147
|
+
logger.warning(f"Could not decrypt PDF {file}, {e}")
|
|
148
|
+
return text # Return empty text if PDF could not be decrypted
|
|
149
|
+
|
|
150
|
+
for page_num in range(len(reader.pages)):
|
|
151
|
+
page = reader.pages[page_num]
|
|
152
|
+
text += page.extract_text()
|
|
153
|
+
|
|
154
|
+
if not text.strip(): # Debugging line to check if text is empty
|
|
155
|
+
logger.warning(f"Could not decrypt PDF {file}")
|
|
156
|
+
|
|
157
|
+
return text
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def split_files_to_chunks(
|
|
161
|
+
files: list[Union[tuple[str, str], str]],
|
|
162
|
+
max_tokens: int = 4000,
|
|
163
|
+
chunk_mode: str = "multi_lines",
|
|
164
|
+
must_break_at_empty_line: bool = True,
|
|
165
|
+
custom_text_split_function: Optional[Callable[[str], list[str]]] = None,
|
|
166
|
+
) -> tuple[list[str], list[dict[str, Any]]]:
|
|
167
|
+
"""Split a list of files into chunks of max_tokens."""
|
|
168
|
+
chunks = []
|
|
169
|
+
sources = []
|
|
170
|
+
|
|
171
|
+
for file in files:
|
|
172
|
+
if isinstance(file, tuple):
|
|
173
|
+
url = file[1]
|
|
174
|
+
file = file[0]
|
|
175
|
+
else:
|
|
176
|
+
url = None
|
|
177
|
+
_, file_extension = os.path.splitext(file)
|
|
178
|
+
file_extension = file_extension.lower()
|
|
179
|
+
|
|
180
|
+
if HAS_UNSTRUCTURED and file_extension[1:] in UNSTRUCTURED_FORMATS:
|
|
181
|
+
text = partition(file)
|
|
182
|
+
text = "\n".join([t.text for t in text]) if len(text) > 0 else ""
|
|
183
|
+
elif file_extension == ".pdf":
|
|
184
|
+
text = extract_text_from_pdf(file)
|
|
185
|
+
else: # For non-PDF text-based files
|
|
186
|
+
with open(file, encoding="utf-8", errors="ignore") as f:
|
|
187
|
+
text = f.read()
|
|
188
|
+
|
|
189
|
+
if not text.strip(): # Debugging line to check if text is empty after reading
|
|
190
|
+
logger.warning(f"No text available in file: {file}")
|
|
191
|
+
continue # Skip to the next file if no text is available
|
|
192
|
+
|
|
193
|
+
if custom_text_split_function is not None:
|
|
194
|
+
tmp_chunks = custom_text_split_function(text)
|
|
195
|
+
else:
|
|
196
|
+
tmp_chunks = split_text_to_chunks(text, max_tokens, chunk_mode, must_break_at_empty_line)
|
|
197
|
+
chunks += tmp_chunks
|
|
198
|
+
sources += [{"source": url if url else file}] * len(tmp_chunks)
|
|
199
|
+
|
|
200
|
+
return chunks, sources
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def get_files_from_dir(
|
|
204
|
+
dir_path: Union[str, list[str]], types: list[str] = TEXT_FORMATS, recursive: bool = True
|
|
205
|
+
) -> list[Any]:
|
|
206
|
+
"""Return a list of all the files in a given directory, a url, a file path or a list of them."""
|
|
207
|
+
if len(types) == 0:
|
|
208
|
+
raise ValueError("types cannot be empty.")
|
|
209
|
+
types = [t[1:].lower() if t.startswith(".") else t.lower() for t in set(types)]
|
|
210
|
+
types += [t.upper() for t in types]
|
|
211
|
+
|
|
212
|
+
files = []
|
|
213
|
+
# If the path is a list of files or urls, process and return them
|
|
214
|
+
if isinstance(dir_path, list):
|
|
215
|
+
for item in dir_path:
|
|
216
|
+
if os.path.isfile(item):
|
|
217
|
+
files.append(item)
|
|
218
|
+
elif is_url(item):
|
|
219
|
+
filepath = get_file_from_url(item)
|
|
220
|
+
if filepath:
|
|
221
|
+
files.append(filepath)
|
|
222
|
+
elif os.path.exists(item):
|
|
223
|
+
try:
|
|
224
|
+
files.extend(get_files_from_dir(item, types, recursive))
|
|
225
|
+
except ValueError:
|
|
226
|
+
logger.warning(f"Directory {item} does not exist. Skipping.")
|
|
227
|
+
else:
|
|
228
|
+
logger.warning(f"File {item} does not exist. Skipping.")
|
|
229
|
+
return files
|
|
230
|
+
|
|
231
|
+
# If the path is a file, return it
|
|
232
|
+
if os.path.isfile(dir_path):
|
|
233
|
+
return [dir_path]
|
|
234
|
+
|
|
235
|
+
# If the path is a url, download it and return the downloaded file
|
|
236
|
+
if is_url(dir_path):
|
|
237
|
+
filepath = get_file_from_url(dir_path)
|
|
238
|
+
if filepath:
|
|
239
|
+
return [filepath]
|
|
240
|
+
else:
|
|
241
|
+
return []
|
|
242
|
+
|
|
243
|
+
if os.path.exists(dir_path):
|
|
244
|
+
for type in types:
|
|
245
|
+
if recursive:
|
|
246
|
+
files += glob.glob(os.path.join(dir_path, f"**/*.{type}"), recursive=True)
|
|
247
|
+
else:
|
|
248
|
+
files += glob.glob(os.path.join(dir_path, f"*.{type}"), recursive=False)
|
|
249
|
+
else:
|
|
250
|
+
logger.error(f"Directory {dir_path} does not exist.")
|
|
251
|
+
raise ValueError(f"Directory {dir_path} does not exist.")
|
|
252
|
+
return files
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
@require_optional_import(["markdownify", "bs4"], "retrievechat")
|
|
256
|
+
def parse_html_to_markdown(html: str, url: str = None) -> str:
|
|
257
|
+
"""Parse HTML to markdown."""
|
|
258
|
+
soup = BeautifulSoup(html, "html.parser")
|
|
259
|
+
title = soup.title.string
|
|
260
|
+
# Remove javascript and style blocks
|
|
261
|
+
for script in soup(["script", "style"]):
|
|
262
|
+
script.extract()
|
|
263
|
+
|
|
264
|
+
# Convert to markdown -- Wikipedia gets special attention to get a clean version of the page
|
|
265
|
+
if isinstance(url, str) and url.startswith("https://en.wikipedia.org/"):
|
|
266
|
+
body_elm = soup.find("div", {"id": "mw-content-text"})
|
|
267
|
+
title_elm = soup.find("span", {"class": "mw-page-title-main"})
|
|
268
|
+
|
|
269
|
+
if body_elm:
|
|
270
|
+
# What's the title
|
|
271
|
+
main_title = soup.title.string
|
|
272
|
+
if title_elm and len(title_elm) > 0:
|
|
273
|
+
main_title = title_elm.string
|
|
274
|
+
webpage_text = "# " + main_title + "\n\n" + markdownify.MarkdownConverter().convert_soup(body_elm)
|
|
275
|
+
else:
|
|
276
|
+
webpage_text = markdownify.MarkdownConverter().convert_soup(soup)
|
|
277
|
+
else:
|
|
278
|
+
webpage_text = markdownify.MarkdownConverter().convert_soup(soup)
|
|
279
|
+
|
|
280
|
+
# Convert newlines
|
|
281
|
+
webpage_text = re.sub(r"\r\n", "\n", webpage_text)
|
|
282
|
+
webpage_text = re.sub(r"\n{2,}", "\n\n", webpage_text).strip()
|
|
283
|
+
webpage_text = "# " + title + "\n\n" + webpage_text
|
|
284
|
+
return webpage_text
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def _generate_file_name_from_url(url: str, max_length=255) -> str:
|
|
288
|
+
url_bytes = url.encode("utf-8")
|
|
289
|
+
hash = hashlib.blake2b(url_bytes).hexdigest()
|
|
290
|
+
parsed_url = urlparse(url)
|
|
291
|
+
file_name = os.path.basename(url)
|
|
292
|
+
file_name = (
|
|
293
|
+
f"{parsed_url.netloc}_{file_name}_{hash[: min(8, max_length - len(parsed_url.netloc) - len(file_name) - 1)]}"
|
|
294
|
+
)
|
|
295
|
+
return file_name
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
def get_file_from_url(url: str, save_path: str = None) -> tuple[str, str]:
|
|
299
|
+
"""Download a file from a URL."""
|
|
300
|
+
if save_path is None:
|
|
301
|
+
save_path = "tmp/chromadb"
|
|
302
|
+
os.makedirs(save_path, exist_ok=True)
|
|
303
|
+
if os.path.isdir(save_path):
|
|
304
|
+
filename = _generate_file_name_from_url(url)
|
|
305
|
+
save_path = os.path.join(save_path, filename)
|
|
306
|
+
else:
|
|
307
|
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
|
308
|
+
|
|
309
|
+
custom_headers = {
|
|
310
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36",
|
|
311
|
+
}
|
|
312
|
+
try:
|
|
313
|
+
response = requests.get(url, stream=True, headers=custom_headers, timeout=30)
|
|
314
|
+
response.raise_for_status()
|
|
315
|
+
except requests.exceptions.RequestException as e:
|
|
316
|
+
logger.warning(f"Failed to download {url}, {e}")
|
|
317
|
+
return None
|
|
318
|
+
|
|
319
|
+
content_type = response.headers.get("content-type", "")
|
|
320
|
+
if "text/html" in content_type:
|
|
321
|
+
# Get the content of the response
|
|
322
|
+
html = ""
|
|
323
|
+
for chunk in response.iter_content(chunk_size=8192, decode_unicode=True):
|
|
324
|
+
html += chunk
|
|
325
|
+
text = parse_html_to_markdown(html, url)
|
|
326
|
+
with open(save_path, "w", encoding="utf-8") as f:
|
|
327
|
+
f.write(text)
|
|
328
|
+
else:
|
|
329
|
+
with open(save_path, "wb") as f:
|
|
330
|
+
for chunk in response.iter_content(chunk_size=8192):
|
|
331
|
+
f.write(chunk)
|
|
332
|
+
return save_path, url
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def is_url(string: str):
|
|
336
|
+
"""Return True if the string is a valid URL."""
|
|
337
|
+
try:
|
|
338
|
+
result = urlparse(string)
|
|
339
|
+
return all([result.scheme, result.netloc])
|
|
340
|
+
except ValueError:
|
|
341
|
+
return False
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
@require_optional_import("chromadb", "retrievechat")
|
|
345
|
+
def create_vector_db_from_dir(
|
|
346
|
+
dir_path: Union[str, list[str]],
|
|
347
|
+
max_tokens: int = 4000,
|
|
348
|
+
client: "API" = None,
|
|
349
|
+
db_path: str = "tmp/chromadb.db",
|
|
350
|
+
collection_name: str = "all-my-documents",
|
|
351
|
+
get_or_create: bool = False,
|
|
352
|
+
chunk_mode: str = "multi_lines",
|
|
353
|
+
must_break_at_empty_line: bool = True,
|
|
354
|
+
embedding_model: str = "all-MiniLM-L6-v2",
|
|
355
|
+
embedding_function: Callable = None,
|
|
356
|
+
custom_text_split_function: Callable = None,
|
|
357
|
+
custom_text_types: list[str] = TEXT_FORMATS,
|
|
358
|
+
recursive: bool = True,
|
|
359
|
+
extra_docs: bool = False,
|
|
360
|
+
) -> "API":
|
|
361
|
+
"""Create a vector db from all the files in a given directory, the directory can also be a single file or a url to
|
|
362
|
+
a single file. We support chromadb compatible APIs to create the vector db, this function is not required if
|
|
363
|
+
you prepared your own vector db.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
dir_path (Union[str, List[str]]): the path to the directory, file, url or a list of them.
|
|
367
|
+
max_tokens (Optional, int): the maximum number of tokens per chunk. Default is 4000.
|
|
368
|
+
client (Optional, API): the chromadb client. Default is None.
|
|
369
|
+
db_path (Optional, str): the path to the chromadb. Default is "tmp/chromadb.db". The default was `/tmp/chromadb.db` for version `<=0.2.24`.
|
|
370
|
+
collection_name (Optional, str): the name of the collection. Default is "all-my-documents".
|
|
371
|
+
get_or_create (Optional, bool): Whether to get or create the collection. Default is False. If True, the collection
|
|
372
|
+
will be returned if it already exists. Will raise ValueError if the collection already exists and get_or_create is False.
|
|
373
|
+
chunk_mode (Optional, str): the chunk mode. Default is "multi_lines".
|
|
374
|
+
must_break_at_empty_line (Optional, bool): Whether to break at empty line. Default is True.
|
|
375
|
+
embedding_model (Optional, str): the embedding model to use. Default is "all-MiniLM-L6-v2". Will be ignored if
|
|
376
|
+
embedding_function is not None.
|
|
377
|
+
embedding_function (Optional, Callable): the embedding function to use. Default is None, SentenceTransformer with
|
|
378
|
+
the given `embedding_model` will be used. If you want to use OpenAI, Cohere, HuggingFace or other embedding
|
|
379
|
+
functions, you can pass it here, follow the examples in `https://docs.trychroma.com/embeddings`.
|
|
380
|
+
custom_text_split_function (Optional, Callable): a custom function to split a string into a list of strings.
|
|
381
|
+
Default is None, will use the default function in `autogen.retrieve_utils.split_text_to_chunks`.
|
|
382
|
+
custom_text_types (Optional, List[str]): a list of file types to be processed. Default is TEXT_FORMATS.
|
|
383
|
+
recursive (Optional, bool): whether to search documents recursively in the dir_path. Default is True.
|
|
384
|
+
extra_docs (Optional, bool): whether to add more documents in the collection. Default is False
|
|
385
|
+
|
|
386
|
+
Returns:
|
|
387
|
+
The chromadb client.
|
|
388
|
+
"""
|
|
389
|
+
if client is None:
|
|
390
|
+
client = chromadb.PersistentClient(path=db_path)
|
|
391
|
+
try:
|
|
392
|
+
embedding_function = (
|
|
393
|
+
ef.SentenceTransformerEmbeddingFunction(embedding_model)
|
|
394
|
+
if embedding_function is None
|
|
395
|
+
else embedding_function
|
|
396
|
+
)
|
|
397
|
+
collection = client.create_collection(
|
|
398
|
+
collection_name,
|
|
399
|
+
get_or_create=get_or_create,
|
|
400
|
+
embedding_function=embedding_function,
|
|
401
|
+
# https://github.com/nmslib/hnswlib#supported-distances
|
|
402
|
+
# https://github.com/chroma-core/chroma/blob/566bc80f6c8ee29f7d99b6322654f32183c368c4/chromadb/segment/impl/vector/local_hnsw.py#L184
|
|
403
|
+
# https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
|
|
404
|
+
metadata={"hnsw:space": "ip", "hnsw:construction_ef": 30, "hnsw:M": 32}, # ip, l2, cosine
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
length = 0
|
|
408
|
+
if extra_docs:
|
|
409
|
+
length = len(collection.get()["ids"])
|
|
410
|
+
|
|
411
|
+
if custom_text_split_function is not None:
|
|
412
|
+
chunks, sources = split_files_to_chunks(
|
|
413
|
+
get_files_from_dir(dir_path, custom_text_types, recursive),
|
|
414
|
+
custom_text_split_function=custom_text_split_function,
|
|
415
|
+
)
|
|
416
|
+
else:
|
|
417
|
+
chunks, sources = split_files_to_chunks(
|
|
418
|
+
get_files_from_dir(dir_path, custom_text_types, recursive),
|
|
419
|
+
max_tokens,
|
|
420
|
+
chunk_mode,
|
|
421
|
+
must_break_at_empty_line,
|
|
422
|
+
)
|
|
423
|
+
logger.info(f"Found {len(chunks)} chunks.")
|
|
424
|
+
# Upsert in batch of 40000 or less if the total number of chunks is less than 40000
|
|
425
|
+
for i in range(0, len(chunks), min(40000, len(chunks))):
|
|
426
|
+
end_idx = i + min(40000, len(chunks) - i)
|
|
427
|
+
collection.upsert(
|
|
428
|
+
documents=chunks[i:end_idx],
|
|
429
|
+
ids=[f"doc_{j + length}" for j in range(i, end_idx)], # unique for each doc
|
|
430
|
+
metadatas=sources[i:end_idx],
|
|
431
|
+
)
|
|
432
|
+
except ValueError as e:
|
|
433
|
+
logger.warning(f"{e}")
|
|
434
|
+
return client
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
@require_optional_import("chromadb", "retrievechat")
|
|
438
|
+
def query_vector_db(
|
|
439
|
+
query_texts: list[str],
|
|
440
|
+
n_results: int = 10,
|
|
441
|
+
client: "API" = None,
|
|
442
|
+
db_path: str = "tmp/chromadb.db",
|
|
443
|
+
collection_name: str = "all-my-documents",
|
|
444
|
+
search_string: str = "",
|
|
445
|
+
embedding_model: str = "all-MiniLM-L6-v2",
|
|
446
|
+
embedding_function: Callable = None,
|
|
447
|
+
) -> "QueryResult":
|
|
448
|
+
"""Query a vector db. We support chromadb compatible APIs, it's not required if you prepared your own vector db
|
|
449
|
+
and query function.
|
|
450
|
+
|
|
451
|
+
Args:
|
|
452
|
+
query_texts (List[str]): the list of strings which will be used to query the vector db.
|
|
453
|
+
n_results (Optional, int): the number of results to return. Default is 10.
|
|
454
|
+
client (Optional, API): the chromadb compatible client. Default is None, a chromadb client will be used.
|
|
455
|
+
db_path (Optional, str): the path to the vector db. Default is "tmp/chromadb.db". The default was `/tmp/chromadb.db` for version `<=0.2.24`.
|
|
456
|
+
collection_name (Optional, str): the name of the collection. Default is "all-my-documents".
|
|
457
|
+
search_string (Optional, str): the search string. Only docs that contain an exact match of this string will be retrieved. Default is "".
|
|
458
|
+
embedding_model (Optional, str): the embedding model to use. Default is "all-MiniLM-L6-v2". Will be ignored if
|
|
459
|
+
embedding_function is not None.
|
|
460
|
+
embedding_function (Optional, Callable): the embedding function to use. Default is None, SentenceTransformer with
|
|
461
|
+
the given `embedding_model` will be used. If you want to use OpenAI, Cohere, HuggingFace or other embedding
|
|
462
|
+
functions, you can pass it here, follow the examples in `https://docs.trychroma.com/embeddings`.
|
|
463
|
+
|
|
464
|
+
Returns:
|
|
465
|
+
The query result. The format is:
|
|
466
|
+
|
|
467
|
+
```python
|
|
468
|
+
class QueryResult(TypedDict):
|
|
469
|
+
ids: List[IDs]
|
|
470
|
+
embeddings: Optional[List[List[Embedding]]]
|
|
471
|
+
documents: Optional[List[List[Document]]]
|
|
472
|
+
metadatas: Optional[List[List[Metadata]]]
|
|
473
|
+
distances: Optional[List[List[float]]]
|
|
474
|
+
```
|
|
475
|
+
"""
|
|
476
|
+
if client is None:
|
|
477
|
+
client = chromadb.PersistentClient(path=db_path)
|
|
478
|
+
# the collection's embedding function is always the default one, but we want to use the one we used to create the
|
|
479
|
+
# collection. So we compute the embeddings ourselves and pass it to the query function.
|
|
480
|
+
collection = client.get_collection(collection_name)
|
|
481
|
+
embedding_function = (
|
|
482
|
+
ef.SentenceTransformerEmbeddingFunction(embedding_model) if embedding_function is None else embedding_function
|
|
483
|
+
)
|
|
484
|
+
query_embeddings = embedding_function(query_texts)
|
|
485
|
+
# Query/search n most similar results. You can also .get by id
|
|
486
|
+
results = collection.query(
|
|
487
|
+
query_embeddings=query_embeddings,
|
|
488
|
+
n_results=n_results,
|
|
489
|
+
where_document={"$contains": search_string} if search_string else None, # optional filter
|
|
490
|
+
)
|
|
491
|
+
return results
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
import sqlite3
|
|
11
|
+
import uuid
|
|
12
|
+
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, TypeVar
|
|
13
|
+
|
|
14
|
+
from .logger.base_logger import BaseLogger, LLMConfig
|
|
15
|
+
from .logger.logger_factory import LoggerFactory
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from openai import AzureOpenAI, OpenAI
|
|
19
|
+
from openai.types.chat import ChatCompletion
|
|
20
|
+
|
|
21
|
+
from . import Agent, ConversableAgent, OpenAIWrapper
|
|
22
|
+
from .oai.anthropic import AnthropicClient
|
|
23
|
+
from .oai.bedrock import BedrockClient
|
|
24
|
+
from .oai.cerebras import CerebrasClient
|
|
25
|
+
from .oai.cohere import CohereClient
|
|
26
|
+
from .oai.gemini import GeminiClient
|
|
27
|
+
from .oai.groq import GroqClient
|
|
28
|
+
from .oai.mistral import MistralAIClient
|
|
29
|
+
from .oai.ollama import OllamaClient
|
|
30
|
+
from .oai.together import TogetherClient
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
autogen_logger = None
|
|
35
|
+
is_logging = False
|
|
36
|
+
|
|
37
|
+
F = TypeVar("F", bound=Callable[..., Any])
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def start(
|
|
41
|
+
logger: Optional[BaseLogger] = None,
|
|
42
|
+
logger_type: Literal["sqlite", "file"] = "sqlite",
|
|
43
|
+
config: Optional[dict[str, Any]] = None,
|
|
44
|
+
) -> str:
|
|
45
|
+
"""Start logging for the runtime.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
logger (BaseLogger): A logger instance
|
|
49
|
+
logger_type (str): The type of logger to use (default: sqlite)
|
|
50
|
+
config (dict): Configuration for the logger
|
|
51
|
+
Returns:
|
|
52
|
+
session_id (str(uuid.uuid4)): a unique id for the logging session
|
|
53
|
+
"""
|
|
54
|
+
global autogen_logger
|
|
55
|
+
global is_logging
|
|
56
|
+
|
|
57
|
+
autogen_logger = logger or LoggerFactory.get_logger(logger_type=logger_type, config=config)
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
session_id = autogen_logger.start()
|
|
61
|
+
is_logging = True
|
|
62
|
+
except Exception as e:
|
|
63
|
+
logger.error(f"[runtime logging] Failed to start logging: {e}")
|
|
64
|
+
finally:
|
|
65
|
+
return session_id
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def log_chat_completion(
|
|
69
|
+
invocation_id: uuid.UUID,
|
|
70
|
+
client_id: int,
|
|
71
|
+
wrapper_id: int,
|
|
72
|
+
agent: str | Agent,
|
|
73
|
+
request: dict[str, float | str | list[dict[str, str]]],
|
|
74
|
+
response: str | "ChatCompletion",
|
|
75
|
+
is_cached: int,
|
|
76
|
+
cost: float,
|
|
77
|
+
start_time: str,
|
|
78
|
+
) -> None:
|
|
79
|
+
if autogen_logger is None:
|
|
80
|
+
logger.error("[runtime logging] log_chat_completion: autogen logger is None")
|
|
81
|
+
return
|
|
82
|
+
|
|
83
|
+
autogen_logger.log_chat_completion(
|
|
84
|
+
invocation_id, client_id, wrapper_id, agent, request, response, is_cached, cost, start_time
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def log_new_agent(agent: ConversableAgent, init_args: dict[str, Any]) -> None:
|
|
89
|
+
if autogen_logger is None:
|
|
90
|
+
logger.error("[runtime logging] log_new_agent: autogen logger is None")
|
|
91
|
+
return
|
|
92
|
+
|
|
93
|
+
autogen_logger.log_new_agent(agent, init_args)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def log_event(source: str | Agent, name: str, **kwargs: dict[str, Any]) -> None:
|
|
97
|
+
if autogen_logger is None:
|
|
98
|
+
logger.error("[runtime logging] log_event: autogen logger is None")
|
|
99
|
+
return
|
|
100
|
+
|
|
101
|
+
autogen_logger.log_event(source, name, **kwargs)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def log_function_use(agent: str | Agent, function: F, args: dict[str, Any], returns: any):
|
|
105
|
+
if autogen_logger is None:
|
|
106
|
+
logger.error("[runtime logging] log_function_use: autogen logger is None")
|
|
107
|
+
return
|
|
108
|
+
|
|
109
|
+
autogen_logger.log_function_use(agent, function, args, returns)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def log_new_wrapper(wrapper: OpenAIWrapper, init_args: dict[str, LLMConfig | list[LLMConfig]]) -> None:
|
|
113
|
+
if autogen_logger is None:
|
|
114
|
+
logger.error("[runtime logging] log_new_wrapper: autogen logger is None")
|
|
115
|
+
return
|
|
116
|
+
|
|
117
|
+
autogen_logger.log_new_wrapper(wrapper, init_args)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def log_new_client(
|
|
121
|
+
client: (
|
|
122
|
+
AzureOpenAI
|
|
123
|
+
| OpenAI
|
|
124
|
+
| CerebrasClient
|
|
125
|
+
| GeminiClient
|
|
126
|
+
| AnthropicClient
|
|
127
|
+
| MistralAIClient
|
|
128
|
+
| TogetherClient
|
|
129
|
+
| GroqClient
|
|
130
|
+
| CohereClient
|
|
131
|
+
| OllamaClient
|
|
132
|
+
| BedrockClient
|
|
133
|
+
),
|
|
134
|
+
wrapper: OpenAIWrapper,
|
|
135
|
+
init_args: dict[str, Any],
|
|
136
|
+
) -> None:
|
|
137
|
+
if autogen_logger is None:
|
|
138
|
+
logger.error("[runtime logging] log_new_client: autogen logger is None")
|
|
139
|
+
return
|
|
140
|
+
|
|
141
|
+
autogen_logger.log_new_client(client, wrapper, init_args)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def stop() -> None:
|
|
145
|
+
global is_logging
|
|
146
|
+
if autogen_logger:
|
|
147
|
+
autogen_logger.stop()
|
|
148
|
+
is_logging = False
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def get_connection() -> None | sqlite3.Connection:
|
|
152
|
+
if autogen_logger is None:
|
|
153
|
+
logger.error("[runtime logging] get_connection: autogen logger is None")
|
|
154
|
+
return None
|
|
155
|
+
|
|
156
|
+
return autogen_logger.get_connection()
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def logging_enabled() -> bool:
|
|
160
|
+
return is_logging
|