ag2 0.9.1a1__py3-none-any.whl → 0.9.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/METADATA +272 -75
- ag2-0.9.2.dist-info/RECORD +406 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info}/WHEEL +1 -2
- autogen/__init__.py +89 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +427 -0
- autogen/_website/generate_mkdocs.py +1174 -0
- autogen/_website/notebook_processor.py +476 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +412 -0
- autogen/agentchat/__init__.py +44 -0
- autogen/agentchat/agent.py +182 -0
- autogen/agentchat/assistant_agent.py +85 -0
- autogen/agentchat/chat.py +309 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +429 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +566 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +214 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +512 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +335 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +170 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +268 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +187 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +324 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +272 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +198 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +329 -0
- autogen/agentchat/contrib/rag/query_engine.py +74 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +703 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +199 -0
- autogen/agentchat/contrib/swarm_agent.py +1425 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +232 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +315 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +407 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +550 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +928 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +303 -0
- autogen/agentchat/conversable_agent.py +4023 -0
- autogen/agentchat/group/__init__.py +64 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +41 -0
- autogen/agentchat/group/context_variables.py +192 -0
- autogen/agentchat/group/group_tool_executor.py +202 -0
- autogen/agentchat/group/group_utils.py +591 -0
- autogen/agentchat/group/handoffs.py +244 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +237 -0
- autogen/agentchat/group/on_condition.py +58 -0
- autogen/agentchat/group/on_context_condition.py +54 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +159 -0
- autogen/agentchat/group/patterns/manual.py +176 -0
- autogen/agentchat/group/patterns/pattern.py +288 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +26 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/group_chat_target.py +132 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +413 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1694 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +190 -0
- autogen/agentchat/realtime/experimental/function_observer.py +85 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +475 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +111 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +73 -0
- autogen/agents/contrib/time/time_tool_agent.py +51 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +316 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +118 -0
- autogen/agents/experimental/document_agent/document_agent.py +461 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +380 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +220 -0
- autogen/agents/experimental/document_agent/parser_utils.py +130 -0
- autogen/agents/experimental/document_agent/url_utils.py +426 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +77 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +62 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +90 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +75 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +102 -0
- autogen/cache/in_memory_cache.py +58 -0
- autogen/cache/redis_cache.py +123 -0
- autogen/code_utils.py +596 -0
- autogen/coding/__init__.py +22 -0
- autogen/coding/base.py +119 -0
- autogen/coding/docker_commandline_code_executor.py +268 -0
- autogen/coding/factory.py +47 -0
- autogen/coding/func_with_reqs.py +202 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +167 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +231 -0
- autogen/coding/jupyter/jupyter_code_executor.py +160 -0
- autogen/coding/jupyter/local_jupyter_server.py +172 -0
- autogen/coding/local_commandline_code_executor.py +405 -0
- autogen/coding/markdown_code_extractor.py +45 -0
- autogen/coding/utils.py +56 -0
- autogen/doc_utils.py +34 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1013 -0
- autogen/events/base_event.py +99 -0
- autogen/events/client_events.py +167 -0
- autogen/events/helpers.py +36 -0
- autogen/events/print_event.py +46 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +80 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +225 -0
- autogen/fast_depends/core/model.py +576 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +29 -0
- autogen/fast_depends/dependencies/provider.py +39 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +280 -0
- autogen/fast_depends/utils.py +187 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +178 -0
- autogen/import_utils.py +526 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +155 -0
- autogen/interop/langchain/langchain_tool.py +82 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +179 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +168 -0
- autogen/interop/registry.py +69 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +56 -0
- autogen/io/run_response.py +293 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +213 -0
- autogen/json_utils.py +43 -0
- autogen/llm_config.py +382 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +128 -0
- autogen/logger/file_logger.py +261 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +523 -0
- autogen/math_utils.py +339 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/mcp_client.py +208 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +63 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +581 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +158 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +114 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +400 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +948 -0
- autogen/messages/base_message.py +107 -0
- autogen/messages/client_messages.py +171 -0
- autogen/messages/print_message.py +49 -0
- autogen/oai/__init__.py +53 -0
- autogen/oai/anthropic.py +714 -0
- autogen/oai/bedrock.py +628 -0
- autogen/oai/cerebras.py +299 -0
- autogen/oai/client.py +1444 -0
- autogen/oai/client_utils.py +169 -0
- autogen/oai/cohere.py +479 -0
- autogen/oai/gemini.py +998 -0
- autogen/oai/gemini_types.py +155 -0
- autogen/oai/groq.py +305 -0
- autogen/oai/mistral.py +303 -0
- autogen/oai/oai_models/__init__.py +11 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +87 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +86 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +37 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +63 -0
- autogen/oai/oai_models/completion_usage.py +60 -0
- autogen/oai/ollama.py +643 -0
- autogen/oai/openai_utils.py +881 -0
- autogen/oai/together.py +370 -0
- autogen/retrieve_utils.py +491 -0
- autogen/runtime_logging.py +160 -0
- autogen/token_count_utils.py +267 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +41 -0
- autogen/tools/dependency_injection.py +254 -0
- autogen/tools/experimental/__init__.py +48 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +161 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +153 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +328 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +109 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +288 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +391 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +275 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +260 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1316 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +183 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +114 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +287 -0
- autogen/tools/function_utils.py +411 -0
- autogen/tools/tool.py +187 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +69 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
- ag2-0.9.1a1.dist-info/RECORD +0 -6
- ag2-0.9.1a1.dist-info/top_level.txt +0 -1
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/LICENSE +0 -0
- {ag2-0.9.1a1.dist-info → ag2-0.9.2.dist-info/licenses}/NOTICE.md +0 -0
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import warnings
|
|
7
|
+
from typing import Any, Optional
|
|
8
|
+
|
|
9
|
+
from ....import_utils import optional_import_block, require_optional_import
|
|
10
|
+
from .document import Document
|
|
11
|
+
from .graph_query_engine import GraphStoreQueryResult
|
|
12
|
+
|
|
13
|
+
with optional_import_block():
|
|
14
|
+
from falkordb import FalkorDB, Graph
|
|
15
|
+
from graphrag_sdk import KnowledgeGraph, Source
|
|
16
|
+
from graphrag_sdk.model_config import KnowledgeGraphModelConfig
|
|
17
|
+
from graphrag_sdk.models import GenerativeModel
|
|
18
|
+
from graphrag_sdk.models.openai import OpenAiGenerativeModel
|
|
19
|
+
from graphrag_sdk.ontology import Ontology
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@require_optional_import(["falkordb", "graphrag_sdk"], "graph-rag-falkor-db")
|
|
23
|
+
class FalkorGraphQueryEngine:
|
|
24
|
+
"""This is a wrapper for FalkorDB KnowledgeGraph."""
|
|
25
|
+
|
|
26
|
+
def __init__( # type: ignore[no-any-unimported]
|
|
27
|
+
self,
|
|
28
|
+
name: str,
|
|
29
|
+
host: str = "127.0.0.1",
|
|
30
|
+
port: int = 6379,
|
|
31
|
+
username: Optional[str] = None,
|
|
32
|
+
password: Optional[str] = None,
|
|
33
|
+
model: Optional["GenerativeModel"] = None,
|
|
34
|
+
ontology: Optional["Ontology"] = None,
|
|
35
|
+
):
|
|
36
|
+
"""Initialize a FalkorDB knowledge graph.
|
|
37
|
+
Please also refer to https://github.com/FalkorDB/GraphRAG-SDK/blob/main/graphrag_sdk/kg.py
|
|
38
|
+
|
|
39
|
+
TODO: Fix LLM API cost calculation for FalkorDB usages.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
name (str): Knowledge graph name.
|
|
43
|
+
host (str): FalkorDB hostname.
|
|
44
|
+
port (int): FalkorDB port number.
|
|
45
|
+
username (str|None): FalkorDB username.
|
|
46
|
+
password (str|None): FalkorDB password.
|
|
47
|
+
model (GenerativeModel): LLM model to use for FalkorDB to build and retrieve from the graph, default to use OAI gpt-4o.
|
|
48
|
+
ontology: FalkorDB knowledge graph schema/ontology, https://github.com/FalkorDB/GraphRAG-SDK/blob/main/graphrag_sdk/ontology.py
|
|
49
|
+
If None, FalkorDB will auto generate an ontology from the input docs.
|
|
50
|
+
"""
|
|
51
|
+
self.name = name
|
|
52
|
+
self.ontology_table_name = name + "_ontology"
|
|
53
|
+
self.host = host
|
|
54
|
+
self.port = port
|
|
55
|
+
self.username = username
|
|
56
|
+
self.password = password
|
|
57
|
+
self.model = model or OpenAiGenerativeModel("gpt-4o")
|
|
58
|
+
self.model_config = KnowledgeGraphModelConfig.with_model(model)
|
|
59
|
+
self.ontology = ontology
|
|
60
|
+
self.knowledge_graph: Optional["KnowledgeGraph"] = None # type: ignore[no-any-unimported]
|
|
61
|
+
self.falkordb = FalkorDB(host=self.host, port=self.port, username=self.username, password=self.password)
|
|
62
|
+
|
|
63
|
+
def connect_db(self) -> None:
|
|
64
|
+
"""Connect to an existing knowledge graph."""
|
|
65
|
+
if self.name in self.falkordb.list_graphs():
|
|
66
|
+
try:
|
|
67
|
+
self.ontology = self._load_ontology_from_db()
|
|
68
|
+
except Exception:
|
|
69
|
+
warnings.warn("Graph Ontology is not loaded.")
|
|
70
|
+
|
|
71
|
+
if self.ontology is None:
|
|
72
|
+
raise ValueError(f"Ontology of the knowledge graph '{self.name}' can't be None.")
|
|
73
|
+
|
|
74
|
+
self.knowledge_graph = KnowledgeGraph(
|
|
75
|
+
name=self.name,
|
|
76
|
+
host=self.host,
|
|
77
|
+
port=self.port,
|
|
78
|
+
username=self.username,
|
|
79
|
+
password=self.password,
|
|
80
|
+
model_config=self.model_config,
|
|
81
|
+
ontology=self.ontology,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Establishing a chat session will maintain the history
|
|
85
|
+
self._chat_session = self.knowledge_graph.chat_session()
|
|
86
|
+
else:
|
|
87
|
+
raise ValueError(f"Knowledge graph '{self.name}' does not exist")
|
|
88
|
+
|
|
89
|
+
def init_db(self, input_doc: list[Document]) -> None:
|
|
90
|
+
"""Build the knowledge graph with input documents."""
|
|
91
|
+
sources = []
|
|
92
|
+
for doc in input_doc:
|
|
93
|
+
if doc.path_or_url and os.path.exists(doc.path_or_url):
|
|
94
|
+
sources.append(Source(doc.path_or_url))
|
|
95
|
+
|
|
96
|
+
if sources:
|
|
97
|
+
# Auto generate graph ontology if not created by user.
|
|
98
|
+
if self.ontology is None:
|
|
99
|
+
self.ontology = Ontology.from_sources(
|
|
100
|
+
sources=sources,
|
|
101
|
+
model=self.model,
|
|
102
|
+
)
|
|
103
|
+
# Save Ontology to graph for future access.
|
|
104
|
+
self._save_ontology_to_db(self.ontology)
|
|
105
|
+
|
|
106
|
+
self.knowledge_graph = KnowledgeGraph(
|
|
107
|
+
name=self.name,
|
|
108
|
+
host=self.host,
|
|
109
|
+
port=self.port,
|
|
110
|
+
username=self.username,
|
|
111
|
+
password=self.password,
|
|
112
|
+
model_config=KnowledgeGraphModelConfig.with_model(self.model),
|
|
113
|
+
ontology=self.ontology,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
self.knowledge_graph.process_sources(sources)
|
|
117
|
+
|
|
118
|
+
# Establishing a chat session will maintain the history
|
|
119
|
+
self._chat_session = self.knowledge_graph.chat_session()
|
|
120
|
+
else:
|
|
121
|
+
raise ValueError("No input documents could be loaded.")
|
|
122
|
+
|
|
123
|
+
def add_records(self, new_records: list[Document]) -> bool:
|
|
124
|
+
raise NotImplementedError("This method is not supported by FalkorDB SDK yet.")
|
|
125
|
+
|
|
126
|
+
def query(self, question: str, n_results: int = 1, **kwargs: Any) -> GraphStoreQueryResult:
|
|
127
|
+
"""Query the knowledge graph with a question and optional message history.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
question: a human input question.
|
|
131
|
+
n_results: number of returned results.
|
|
132
|
+
kwargs:
|
|
133
|
+
messages: a list of message history.
|
|
134
|
+
|
|
135
|
+
Returns: FalkorGraphQueryResult
|
|
136
|
+
"""
|
|
137
|
+
if self.knowledge_graph is None:
|
|
138
|
+
raise ValueError("Knowledge graph has not been selected or created.")
|
|
139
|
+
|
|
140
|
+
response = self._chat_session.send_message(question)
|
|
141
|
+
|
|
142
|
+
# History will be considered when querying by setting the last_answer
|
|
143
|
+
self._chat_session.last_answer = response["response"]
|
|
144
|
+
|
|
145
|
+
return GraphStoreQueryResult(answer=response["response"], results=[])
|
|
146
|
+
|
|
147
|
+
def delete(self) -> bool:
|
|
148
|
+
"""Delete graph and its data from database."""
|
|
149
|
+
all_graphs = self.falkordb.list_graphs()
|
|
150
|
+
if self.name in all_graphs:
|
|
151
|
+
self.falkordb.select_graph(self.name).delete()
|
|
152
|
+
if self.ontology_table_name in all_graphs:
|
|
153
|
+
self.falkordb.select_graph(self.ontology_table_name).delete()
|
|
154
|
+
return True
|
|
155
|
+
|
|
156
|
+
def __get_ontology_storage_graph(self) -> "Graph": # type: ignore[no-any-unimported]
|
|
157
|
+
return self.falkordb.select_graph(self.ontology_table_name)
|
|
158
|
+
|
|
159
|
+
def _save_ontology_to_db(self, ontology: "Ontology") -> None: # type: ignore[no-any-unimported]
|
|
160
|
+
"""Save graph ontology to a separate table with {graph_name}_ontology"""
|
|
161
|
+
if self.ontology_table_name in self.falkordb.list_graphs():
|
|
162
|
+
raise ValueError(f"Knowledge graph {self.name} is already created.")
|
|
163
|
+
graph = self.__get_ontology_storage_graph()
|
|
164
|
+
ontology.save_to_graph(graph)
|
|
165
|
+
|
|
166
|
+
def _load_ontology_from_db(self) -> "Ontology": # type: ignore[no-any-unimported]
|
|
167
|
+
if self.ontology_table_name not in self.falkordb.list_graphs():
|
|
168
|
+
raise ValueError(f"Knowledge graph {self.name} has not been created.")
|
|
169
|
+
graph = self.__get_ontology_storage_graph()
|
|
170
|
+
return Ontology.from_graph(graph)
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from typing import Any, Optional, Union
|
|
6
|
+
|
|
7
|
+
from .... import Agent, ConversableAgent
|
|
8
|
+
from .falkor_graph_query_engine import FalkorGraphQueryEngine
|
|
9
|
+
from .graph_query_engine import GraphStoreQueryResult
|
|
10
|
+
from .graph_rag_capability import GraphRagCapability
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class FalkorGraphRagCapability(GraphRagCapability):
|
|
14
|
+
"""The FalkorDB GraphRAG capability integrate FalkorDB with graphrag_sdk version: 0.1.3b0.
|
|
15
|
+
Ref: https://github.com/FalkorDB/GraphRAG-SDK/tree/2-move-away-from-sql-to-json-ontology-detection
|
|
16
|
+
|
|
17
|
+
For usage, please refer to example notebook/agentchat_graph_rag_falkordb.ipynb
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, query_engine: FalkorGraphQueryEngine):
|
|
21
|
+
"""Initialize GraphRAG capability with a graph query engine"""
|
|
22
|
+
self.query_engine = query_engine
|
|
23
|
+
|
|
24
|
+
def add_to_agent(self, agent: ConversableAgent) -> None:
|
|
25
|
+
"""Add FalkorDB GraphRAG capability to a ConversableAgent.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
agent: The ConversableAgent instance to add the capability to.
|
|
29
|
+
|
|
30
|
+
The restriction to a ConversableAgent to make sure the returned message does not contain information retrieved from the graph DB instead of any LLMs.
|
|
31
|
+
|
|
32
|
+
"""
|
|
33
|
+
if not isinstance(agent, ConversableAgent):
|
|
34
|
+
raise Exception("FalkorDB GraphRAG capability can only be added to a ConversableAgent.")
|
|
35
|
+
|
|
36
|
+
self.graph_rag_agent = agent
|
|
37
|
+
|
|
38
|
+
# Validate the agent config
|
|
39
|
+
if agent.llm_config not in (None, False):
|
|
40
|
+
raise Exception(
|
|
41
|
+
"Agents with GraphRAG capabilities do not use an LLM configuration. Please set your llm_config to None or False."
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# Register method to generate the reply using a FalkorDB query
|
|
45
|
+
# All other reply methods will be removed
|
|
46
|
+
agent.register_reply(
|
|
47
|
+
[ConversableAgent, None], self._reply_using_falkordb_query, position=0, remove_other_reply_funcs=True
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
def _reply_using_falkordb_query(
|
|
51
|
+
self,
|
|
52
|
+
recipient: ConversableAgent,
|
|
53
|
+
messages: Optional[list[dict[str, Any]]] = None,
|
|
54
|
+
sender: Optional[Agent] = None,
|
|
55
|
+
config: Optional[Any] = None,
|
|
56
|
+
) -> tuple[bool, Optional[Union[str, dict[str, Any]]]]:
|
|
57
|
+
"""Query FalkorDB and return the message. Internally, it utilises OpenAI to generate a reply based on the given messages.
|
|
58
|
+
The history with FalkorDB is also logged and updated.
|
|
59
|
+
|
|
60
|
+
The agent's system message will be incorporated into the query, if it's not blank.
|
|
61
|
+
|
|
62
|
+
If no results are found, a default message is returned: "I'm sorry, I don't have an answer for that."
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
recipient: The agent instance that will receive the message.
|
|
66
|
+
messages: A list of messages in the conversation history with the sender.
|
|
67
|
+
sender: The agent instance that sent the message.
|
|
68
|
+
config: Optional configuration for message processing.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
A tuple containing a boolean indicating success and the assistant's reply.
|
|
72
|
+
"""
|
|
73
|
+
# todo: fix typing, this is not correct
|
|
74
|
+
question = self._messages_summary(messages, recipient.system_message) # type: ignore[arg-type]
|
|
75
|
+
result: GraphStoreQueryResult = self.query_engine.query(question)
|
|
76
|
+
|
|
77
|
+
return True, result.answer if result.answer else "I'm sorry, I don't have an answer for that."
|
|
78
|
+
|
|
79
|
+
def _messages_summary(self, messages: Union[dict[str, Any], str], system_message: str) -> str:
|
|
80
|
+
"""Summarize the messages in the conversation history. Excluding any message with 'tool_calls' and 'tool_responses'
|
|
81
|
+
Includes the 'name' (if it exists) and the 'content', with a new line between each one, like:
|
|
82
|
+
customer:
|
|
83
|
+
<content>
|
|
84
|
+
|
|
85
|
+
agent:
|
|
86
|
+
<content>
|
|
87
|
+
"""
|
|
88
|
+
if isinstance(messages, str):
|
|
89
|
+
return (f"IMPORTANT: {system_message}\n" if system_message else "") + f"Context:\n\n{messages}"
|
|
90
|
+
|
|
91
|
+
elif isinstance(messages, list):
|
|
92
|
+
summary = ""
|
|
93
|
+
for message in messages:
|
|
94
|
+
if "content" in message and "tool_calls" not in message and "tool_responses" not in message:
|
|
95
|
+
summary += f"{message.get('name', '')}: {message.get('content', '')}\n\n"
|
|
96
|
+
|
|
97
|
+
if system_message:
|
|
98
|
+
summary = f"IMPORTANT: {system_message}\nContext:\n\n{summary}"
|
|
99
|
+
|
|
100
|
+
return summary
|
|
101
|
+
|
|
102
|
+
else:
|
|
103
|
+
raise ValueError("Invalid messages format. Must be a list of messages or a string.")
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import Any, Optional, Protocol, runtime_checkable
|
|
9
|
+
|
|
10
|
+
from .document import Document
|
|
11
|
+
|
|
12
|
+
__all__ = ["GraphQueryEngine", "GraphStoreQueryResult"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class GraphStoreQueryResult:
|
|
17
|
+
"""A wrapper of graph store query results.
|
|
18
|
+
|
|
19
|
+
answer: human readable answer to question/query.
|
|
20
|
+
results: intermediate results to question/query, e.g. node entities.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
answer: Optional[str] = None
|
|
24
|
+
results: list[Any] = field(default_factory=list)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@runtime_checkable
|
|
28
|
+
class GraphQueryEngine(Protocol):
|
|
29
|
+
"""An abstract base class that represents a graph query engine on top of a underlying graph database.
|
|
30
|
+
|
|
31
|
+
This interface defines the basic methods for graph-based RAG.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def init_db(self, input_doc: Optional[list[Document]] = None) -> None:
|
|
35
|
+
"""This method initializes graph database with the input documents or records.
|
|
36
|
+
Usually, it takes the following steps,
|
|
37
|
+
1. connecting to a graph database.
|
|
38
|
+
2. extract graph nodes, edges based on input data, graph schema and etc.
|
|
39
|
+
3. build indexes etc.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
input_doc: a list of input documents that are used to build the graph in database.
|
|
43
|
+
|
|
44
|
+
"""
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
def add_records(self, new_records: list[Any]) -> bool:
|
|
48
|
+
"""Add new records to the underlying database and add to the graph if required."""
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
def query(self, question: str, n_results: int = 1, **kwarg: Any) -> GraphStoreQueryResult:
|
|
52
|
+
"""This method transform a string format question into database query and return the result."""
|
|
53
|
+
pass
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from ...conversable_agent import ConversableAgent
|
|
8
|
+
from ..capabilities.agent_capability import AgentCapability
|
|
9
|
+
from .graph_query_engine import GraphQueryEngine
|
|
10
|
+
|
|
11
|
+
__all__ = ["GraphRagCapability"]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class GraphRagCapability(AgentCapability):
|
|
15
|
+
"""A graph-based RAG capability uses a graph query engine to give a conversable agent the graph-based RAG ability.
|
|
16
|
+
|
|
17
|
+
An agent class with graph-based RAG capability could
|
|
18
|
+
1. create a graph in the underlying database with input documents.
|
|
19
|
+
2. retrieved relevant information based on messages received by the agent.
|
|
20
|
+
3. generate answers from retrieved information and send messages back.
|
|
21
|
+
|
|
22
|
+
For example,
|
|
23
|
+
```python
|
|
24
|
+
graph_query_engine = GraphQueryEngine(...)
|
|
25
|
+
graph_query_engine.init_db([Document(doc1), Document(doc2), ...])
|
|
26
|
+
|
|
27
|
+
graph_rag_agent = ConversableAgent(
|
|
28
|
+
name="graph_rag_agent",
|
|
29
|
+
max_consecutive_auto_reply=3,
|
|
30
|
+
...
|
|
31
|
+
)
|
|
32
|
+
graph_rag_capability = GraphRagCapbility(graph_query_engine)
|
|
33
|
+
graph_rag_capability.add_to_agent(graph_rag_agent)
|
|
34
|
+
|
|
35
|
+
user_proxy = UserProxyAgent(
|
|
36
|
+
name="user_proxy",
|
|
37
|
+
code_execution_config=False,
|
|
38
|
+
is_termination_msg=lambda msg: "TERMINATE" in msg["content"],
|
|
39
|
+
human_input_mode="ALWAYS",
|
|
40
|
+
)
|
|
41
|
+
user_proxy.initiate_chat(graph_rag_agent, message="Name a few actors who've played in 'The Matrix'")
|
|
42
|
+
|
|
43
|
+
# ChatResult(
|
|
44
|
+
# chat_id=None,
|
|
45
|
+
# chat_history=[
|
|
46
|
+
# {'content': 'Name a few actors who've played in \'The Matrix\'', 'role': 'graph_rag_agent'},
|
|
47
|
+
# {'content': 'A few actors who have played in The Matrix are:
|
|
48
|
+
# - Keanu Reeves
|
|
49
|
+
# - Laurence Fishburne
|
|
50
|
+
# - Carrie-Anne Moss
|
|
51
|
+
# - Hugo Weaving',
|
|
52
|
+
# 'role': 'user_proxy'},
|
|
53
|
+
# ...)
|
|
54
|
+
```
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
def __init__(self, query_engine: GraphQueryEngine) -> None:
|
|
58
|
+
"""Initialize graph-based RAG capability with a graph query engine"""
|
|
59
|
+
...
|
|
60
|
+
|
|
61
|
+
def add_to_agent(self, agent: ConversableAgent) -> None:
|
|
62
|
+
"""Add the capability to an agent"""
|
|
63
|
+
...
|
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
import os
|
|
5
|
+
import sys
|
|
6
|
+
from typing import Any, Optional, Union
|
|
7
|
+
|
|
8
|
+
if sys.version_info >= (3, 10):
|
|
9
|
+
from typing import TypeAlias
|
|
10
|
+
else:
|
|
11
|
+
from typing_extensions import TypeAlias
|
|
12
|
+
|
|
13
|
+
from ....import_utils import optional_import_block, require_optional_import
|
|
14
|
+
from .document import Document, DocumentType
|
|
15
|
+
from .graph_query_engine import GraphStoreQueryResult
|
|
16
|
+
|
|
17
|
+
with optional_import_block():
|
|
18
|
+
from llama_index.core import PropertyGraphIndex, SimpleDirectoryReader
|
|
19
|
+
from llama_index.core.base.embeddings.base import BaseEmbedding
|
|
20
|
+
from llama_index.core.chat_engine.types import ChatMode
|
|
21
|
+
from llama_index.core.indices.property_graph import (
|
|
22
|
+
DynamicLLMPathExtractor,
|
|
23
|
+
SchemaLLMPathExtractor,
|
|
24
|
+
)
|
|
25
|
+
from llama_index.core.indices.property_graph.transformations.schema_llm import Triple
|
|
26
|
+
from llama_index.core.llms import LLM
|
|
27
|
+
from llama_index.core.readers.json import JSONReader
|
|
28
|
+
from llama_index.core.schema import Document as LlamaDocument
|
|
29
|
+
from llama_index.core.schema import TransformComponent
|
|
30
|
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
|
31
|
+
from llama_index.graph_stores.neo4j import Neo4jPropertyGraphStore
|
|
32
|
+
from llama_index.llms.openai import OpenAI
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@require_optional_import("llama_index", "neo4j")
|
|
36
|
+
class Neo4jGraphQueryEngine:
|
|
37
|
+
"""This class serves as a wrapper for a property graph query engine backed by LlamaIndex and Neo4j,
|
|
38
|
+
facilitating the creating, connecting, updating, and querying of LlamaIndex property graphs.
|
|
39
|
+
|
|
40
|
+
It builds a property graph Index from input documents,
|
|
41
|
+
storing and retrieving data from the property graph in the Neo4j database.
|
|
42
|
+
|
|
43
|
+
It extracts triplets, i.e., [entity] -> [relationship] -> [entity] sets,
|
|
44
|
+
from the input documents using llamIndex extractors.
|
|
45
|
+
|
|
46
|
+
Users can provide custom entities, relationships, and schema to guide the extraction process.
|
|
47
|
+
|
|
48
|
+
If strict is True, the engine will extract triplets following the schema
|
|
49
|
+
of allowed relationships for each entity specified in the schema.
|
|
50
|
+
|
|
51
|
+
It also leverages LlamaIndex's chat engine which has a conversation history internally to provide context-aware responses.
|
|
52
|
+
|
|
53
|
+
For usage, please refer to example notebook/agentchat_graph_rag_neo4j.ipynb
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
def __init__( # type: ignore[no-any-unimported]
|
|
57
|
+
self,
|
|
58
|
+
host: str = "bolt://localhost",
|
|
59
|
+
port: int = 7687,
|
|
60
|
+
database: str = "neo4j",
|
|
61
|
+
username: str = "neo4j",
|
|
62
|
+
password: str = "neo4j",
|
|
63
|
+
llm: Optional["LLM"] = None,
|
|
64
|
+
embedding: Optional["BaseEmbedding"] = None,
|
|
65
|
+
entities: Optional["TypeAlias"] = None,
|
|
66
|
+
relations: Optional["TypeAlias"] = None,
|
|
67
|
+
schema: Optional[Union[dict[str, str], list["Triple"]]] = None,
|
|
68
|
+
strict: Optional[bool] = False,
|
|
69
|
+
):
|
|
70
|
+
"""Initialize a Neo4j Property graph.
|
|
71
|
+
Please also refer to https://docs.llamaindex.ai/en/stable/examples/property_graph/graph_store/
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
name (str): Property graph name.
|
|
75
|
+
host (str): Neo4j hostname.
|
|
76
|
+
port (int): Neo4j port number.
|
|
77
|
+
database (str): Neo4j database name.
|
|
78
|
+
username (str): Neo4j username.
|
|
79
|
+
password (str): Neo4j password.
|
|
80
|
+
llm (LLM): Language model to use for extracting triplets.
|
|
81
|
+
embedding (BaseEmbedding): Embedding model to use constructing index and query
|
|
82
|
+
entities (Optional[TypeAlias]): Custom suggested entities to include in the graph.
|
|
83
|
+
relations (Optional[TypeAlias]): Custom suggested relations to include in the graph.
|
|
84
|
+
schema (Optional[Union[Dict[str, str], List[Triple]]): Custom schema to specify allowed relationships for each entity.
|
|
85
|
+
strict (Optional[bool]): If false, allows for values outside of the input schema.
|
|
86
|
+
"""
|
|
87
|
+
self.host = host
|
|
88
|
+
self.port = port
|
|
89
|
+
self.database = database
|
|
90
|
+
self.username = username
|
|
91
|
+
self.password = password
|
|
92
|
+
self.llm = llm or OpenAI(model="gpt-4o", temperature=0.0)
|
|
93
|
+
self.embedding = embedding or OpenAIEmbedding(model_name="text-embedding-3-small")
|
|
94
|
+
self.entities = entities
|
|
95
|
+
self.relations = relations
|
|
96
|
+
self.schema = schema
|
|
97
|
+
self.strict = strict
|
|
98
|
+
|
|
99
|
+
def init_db(self, input_doc: Optional[list[Document]] = None) -> None:
|
|
100
|
+
"""Build the knowledge graph with input documents."""
|
|
101
|
+
self.documents = self._load_doc(input_doc if input_doc is not None else [])
|
|
102
|
+
|
|
103
|
+
self.graph_store = Neo4jPropertyGraphStore(
|
|
104
|
+
username=self.username,
|
|
105
|
+
password=self.password,
|
|
106
|
+
url=self.host + ":" + str(self.port),
|
|
107
|
+
database=self.database,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
# delete all entities and relationships in case a graph pre-exists
|
|
111
|
+
self._clear()
|
|
112
|
+
|
|
113
|
+
# Create knowledge graph extractors.
|
|
114
|
+
self.kg_extractors = self._create_kg_extractors()
|
|
115
|
+
|
|
116
|
+
self.index = PropertyGraphIndex.from_documents(
|
|
117
|
+
self.documents,
|
|
118
|
+
llm=self.llm,
|
|
119
|
+
embed_model=self.embedding,
|
|
120
|
+
kg_extractors=self.kg_extractors,
|
|
121
|
+
property_graph_store=self.graph_store,
|
|
122
|
+
show_progress=True,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
def connect_db(self) -> None:
|
|
126
|
+
"""Connect to an existing knowledge graph database."""
|
|
127
|
+
self.graph_store = Neo4jPropertyGraphStore(
|
|
128
|
+
username=self.username,
|
|
129
|
+
password=self.password,
|
|
130
|
+
url=self.host + ":" + str(self.port),
|
|
131
|
+
database=self.database,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
self.kg_extractors = self._create_kg_extractors()
|
|
135
|
+
|
|
136
|
+
self.index = PropertyGraphIndex.from_existing(
|
|
137
|
+
property_graph_store=self.graph_store,
|
|
138
|
+
kg_extractors=self.kg_extractors,
|
|
139
|
+
llm=self.llm,
|
|
140
|
+
embed_model=self.embedding,
|
|
141
|
+
show_progress=True,
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
def add_records(self, new_records: list[Document]) -> bool:
|
|
145
|
+
"""Add new records to the knowledge graph. Must be local files.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
new_records (List[Document]): List of new documents to add.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
bool: True if successful, False otherwise.
|
|
152
|
+
"""
|
|
153
|
+
if self.graph_store is None:
|
|
154
|
+
raise ValueError("Knowledge graph is not initialized. Please call init_db or connect_db first.")
|
|
155
|
+
|
|
156
|
+
try:
|
|
157
|
+
"""
|
|
158
|
+
SimpleDirectoryReader will select the best file reader based on the file extensions,
|
|
159
|
+
see _load_doc for supported file types.
|
|
160
|
+
"""
|
|
161
|
+
new_documents = SimpleDirectoryReader(input_files=[doc.path_or_url for doc in new_records]).load_data()
|
|
162
|
+
|
|
163
|
+
for doc in new_documents:
|
|
164
|
+
self.index.insert(doc)
|
|
165
|
+
|
|
166
|
+
return True
|
|
167
|
+
except Exception as e:
|
|
168
|
+
print(f"Error adding records: {e}")
|
|
169
|
+
return False
|
|
170
|
+
|
|
171
|
+
def query(self, question: str, n_results: int = 1, **kwargs: Any) -> GraphStoreQueryResult:
|
|
172
|
+
"""Query the property graph with a question using LlamaIndex chat engine.
|
|
173
|
+
We use the condense_plus_context chat mode
|
|
174
|
+
which condenses the conversation history and the user query into a standalone question,
|
|
175
|
+
and then build a context for the standadlone question
|
|
176
|
+
from the property graph to generate a response.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
question: a human input question.
|
|
180
|
+
n_results: number of results to return.
|
|
181
|
+
**kwargs: additional keyword arguments.
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
A GrapStoreQueryResult object containing the answer and related triplets.
|
|
185
|
+
"""
|
|
186
|
+
if not hasattr(self, "index"):
|
|
187
|
+
raise ValueError("Property graph index is not created.")
|
|
188
|
+
|
|
189
|
+
# Initialize chat engine if not already initialized
|
|
190
|
+
if not hasattr(self, "chat_engine"):
|
|
191
|
+
self.chat_engine = self.index.as_chat_engine(chat_mode=ChatMode.CONDENSE_PLUS_CONTEXT, llm=self.llm)
|
|
192
|
+
|
|
193
|
+
response = self.chat_engine.chat(question)
|
|
194
|
+
return GraphStoreQueryResult(answer=str(response))
|
|
195
|
+
|
|
196
|
+
def _clear(self) -> None:
|
|
197
|
+
"""Delete all entities and relationships in the graph.
|
|
198
|
+
TODO: Delete all the data in the database including indexes and constraints.
|
|
199
|
+
"""
|
|
200
|
+
with self.graph_store._driver.session() as session:
|
|
201
|
+
session.run("MATCH (n) DETACH DELETE n;")
|
|
202
|
+
|
|
203
|
+
def _load_doc(self, input_doc: list[Document]) -> list["LlamaDocument"]: # type: ignore[no-any-unimported]
|
|
204
|
+
"""Load documents from the input files. Currently support the following file types:
|
|
205
|
+
.csv - comma-separated values
|
|
206
|
+
.docx - Microsoft Word
|
|
207
|
+
.epub - EPUB ebook format
|
|
208
|
+
.hwp - Hangul Word Processor
|
|
209
|
+
.ipynb - Jupyter Notebook
|
|
210
|
+
.jpeg, .jpg - JPEG image
|
|
211
|
+
.mbox - MBOX email archive
|
|
212
|
+
.md - Markdown
|
|
213
|
+
.mp3, .mp4 - audio and video
|
|
214
|
+
.pdf - Portable Document Format
|
|
215
|
+
.png - Portable Network Graphics
|
|
216
|
+
.ppt, .pptm, .pptx - Microsoft PowerPoint
|
|
217
|
+
.json JSON files
|
|
218
|
+
"""
|
|
219
|
+
for doc in input_doc:
|
|
220
|
+
if not os.path.exists(doc.path_or_url): # type: ignore[arg-type]
|
|
221
|
+
raise ValueError(f"Document file not found: {doc.path_or_url}")
|
|
222
|
+
|
|
223
|
+
common_type_input_files = []
|
|
224
|
+
json_type_input_files = []
|
|
225
|
+
for doc in input_doc:
|
|
226
|
+
if doc.doctype is DocumentType.JSON:
|
|
227
|
+
json_type_input_files.append(doc.path_or_url)
|
|
228
|
+
else:
|
|
229
|
+
common_type_input_files.append(doc.path_or_url)
|
|
230
|
+
loaded_documents = []
|
|
231
|
+
if common_type_input_files:
|
|
232
|
+
loaded_documents.extend(SimpleDirectoryReader(input_files=common_type_input_files).load_data())
|
|
233
|
+
for json_file in json_type_input_files:
|
|
234
|
+
loaded_documents.extend(JSONReader().load_data(input_file=json_file)) # type: ignore[arg-type]
|
|
235
|
+
|
|
236
|
+
return loaded_documents
|
|
237
|
+
|
|
238
|
+
def _create_kg_extractors(self) -> list["TransformComponent"]: # type: ignore[no-any-unimported]
|
|
239
|
+
"""If strict is True,
|
|
240
|
+
extract paths following a strict schema of allowed relationships for each entity.
|
|
241
|
+
|
|
242
|
+
If strict is False,
|
|
243
|
+
auto-create relationships and schema that fit the graph
|
|
244
|
+
|
|
245
|
+
# To add more extractors, please refer to https://docs.llamaindex.ai/en/latest/module_guides/indexing/lpg_index_guide/#construction
|
|
246
|
+
"""
|
|
247
|
+
#
|
|
248
|
+
kg_extractors: list["TransformComponent"] = [ # type: ignore[no-any-unimported]
|
|
249
|
+
SchemaLLMPathExtractor(
|
|
250
|
+
llm=self.llm,
|
|
251
|
+
possible_entities=self.entities,
|
|
252
|
+
possible_relations=self.relations,
|
|
253
|
+
kg_validation_schema=self.schema,
|
|
254
|
+
strict=self.strict if self.strict else False,
|
|
255
|
+
),
|
|
256
|
+
]
|
|
257
|
+
|
|
258
|
+
# DynamicLLMPathExtractor will auto-create relationships and schema that fit the graph
|
|
259
|
+
if not self.strict:
|
|
260
|
+
kg_extractors.append(
|
|
261
|
+
DynamicLLMPathExtractor(
|
|
262
|
+
llm=self.llm,
|
|
263
|
+
allowed_entity_types=self.entities,
|
|
264
|
+
allowed_relation_types=self.relations,
|
|
265
|
+
)
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
return kg_extractors
|