ag2 0.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ag2-0.10.2.dist-info/METADATA +819 -0
- ag2-0.10.2.dist-info/RECORD +423 -0
- ag2-0.10.2.dist-info/WHEEL +4 -0
- ag2-0.10.2.dist-info/licenses/LICENSE +201 -0
- ag2-0.10.2.dist-info/licenses/NOTICE.md +19 -0
- autogen/__init__.py +88 -0
- autogen/_website/__init__.py +3 -0
- autogen/_website/generate_api_references.py +426 -0
- autogen/_website/generate_mkdocs.py +1216 -0
- autogen/_website/notebook_processor.py +475 -0
- autogen/_website/process_notebooks.py +656 -0
- autogen/_website/utils.py +413 -0
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +86 -0
- autogen/a2a/client.py +357 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +207 -0
- autogen/agentchat/__init__.py +47 -0
- autogen/agentchat/agent.py +180 -0
- autogen/agentchat/assistant_agent.py +86 -0
- autogen/agentchat/chat.py +325 -0
- autogen/agentchat/contrib/__init__.py +5 -0
- autogen/agentchat/contrib/agent_eval/README.md +7 -0
- autogen/agentchat/contrib/agent_eval/agent_eval.py +108 -0
- autogen/agentchat/contrib/agent_eval/criterion.py +43 -0
- autogen/agentchat/contrib/agent_eval/critic_agent.py +44 -0
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +39 -0
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +45 -0
- autogen/agentchat/contrib/agent_eval/task.py +42 -0
- autogen/agentchat/contrib/agent_optimizer.py +432 -0
- autogen/agentchat/contrib/capabilities/__init__.py +5 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +20 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +301 -0
- autogen/agentchat/contrib/capabilities/teachability.py +393 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +66 -0
- autogen/agentchat/contrib/capabilities/tools_capability.py +22 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +93 -0
- autogen/agentchat/contrib/capabilities/transforms.py +578 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +122 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +215 -0
- autogen/agentchat/contrib/captainagent/__init__.py +9 -0
- autogen/agentchat/contrib/captainagent/agent_builder.py +790 -0
- autogen/agentchat/contrib/captainagent/captainagent.py +514 -0
- autogen/agentchat/contrib/captainagent/tool_retriever.py +334 -0
- autogen/agentchat/contrib/captainagent/tools/README.md +44 -0
- autogen/agentchat/contrib/captainagent/tools/__init__.py +5 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +40 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +30 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +53 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +38 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +60 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +61 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +47 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +33 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +35 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +21 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +18 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +31 -0
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +16 -0
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +25 -0
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +27 -0
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +28 -0
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +34 -0
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +39 -0
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +23 -0
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +36 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +15 -0
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +10 -0
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +34 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +526 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +9 -0
- autogen/agentchat/contrib/graph_rag/document.py +29 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +167 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +103 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +53 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +63 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +263 -0
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +83 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +210 -0
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +93 -0
- autogen/agentchat/contrib/img_utils.py +397 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +117 -0
- autogen/agentchat/contrib/llava_agent.py +189 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +464 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +125 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
- autogen/agentchat/contrib/rag/__init__.py +10 -0
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +268 -0
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +195 -0
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +319 -0
- autogen/agentchat/contrib/rag/query_engine.py +76 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +59 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +704 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +200 -0
- autogen/agentchat/contrib/swarm_agent.py +1404 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +79 -0
- autogen/agentchat/contrib/vectordb/__init__.py +5 -0
- autogen/agentchat/contrib/vectordb/base.py +224 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +316 -0
- autogen/agentchat/contrib/vectordb/couchbase.py +405 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +551 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +927 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +320 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +304 -0
- autogen/agentchat/conversable_agent.py +4307 -0
- autogen/agentchat/group/__init__.py +67 -0
- autogen/agentchat/group/available_condition.py +91 -0
- autogen/agentchat/group/context_condition.py +77 -0
- autogen/agentchat/group/context_expression.py +238 -0
- autogen/agentchat/group/context_str.py +39 -0
- autogen/agentchat/group/context_variables.py +182 -0
- autogen/agentchat/group/events/transition_events.py +111 -0
- autogen/agentchat/group/group_tool_executor.py +324 -0
- autogen/agentchat/group/group_utils.py +659 -0
- autogen/agentchat/group/guardrails.py +179 -0
- autogen/agentchat/group/handoffs.py +303 -0
- autogen/agentchat/group/llm_condition.py +93 -0
- autogen/agentchat/group/multi_agent_chat.py +291 -0
- autogen/agentchat/group/on_condition.py +55 -0
- autogen/agentchat/group/on_context_condition.py +51 -0
- autogen/agentchat/group/patterns/__init__.py +18 -0
- autogen/agentchat/group/patterns/auto.py +160 -0
- autogen/agentchat/group/patterns/manual.py +177 -0
- autogen/agentchat/group/patterns/pattern.py +295 -0
- autogen/agentchat/group/patterns/random.py +106 -0
- autogen/agentchat/group/patterns/round_robin.py +117 -0
- autogen/agentchat/group/reply_result.py +24 -0
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +140 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/group/speaker_selection_result.py +41 -0
- autogen/agentchat/group/targets/__init__.py +4 -0
- autogen/agentchat/group/targets/function_target.py +245 -0
- autogen/agentchat/group/targets/group_chat_target.py +133 -0
- autogen/agentchat/group/targets/group_manager_target.py +151 -0
- autogen/agentchat/group/targets/transition_target.py +424 -0
- autogen/agentchat/group/targets/transition_utils.py +6 -0
- autogen/agentchat/groupchat.py +1832 -0
- autogen/agentchat/realtime/__init__.py +3 -0
- autogen/agentchat/realtime/experimental/__init__.py +20 -0
- autogen/agentchat/realtime/experimental/audio_adapters/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +148 -0
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +139 -0
- autogen/agentchat/realtime/experimental/audio_observer.py +42 -0
- autogen/agentchat/realtime/experimental/clients/__init__.py +15 -0
- autogen/agentchat/realtime/experimental/clients/gemini/__init__.py +7 -0
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +274 -0
- autogen/agentchat/realtime/experimental/clients/oai/__init__.py +8 -0
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +220 -0
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +243 -0
- autogen/agentchat/realtime/experimental/clients/oai/utils.py +48 -0
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +191 -0
- autogen/agentchat/realtime/experimental/function_observer.py +84 -0
- autogen/agentchat/realtime/experimental/realtime_agent.py +158 -0
- autogen/agentchat/realtime/experimental/realtime_events.py +42 -0
- autogen/agentchat/realtime/experimental/realtime_observer.py +100 -0
- autogen/agentchat/realtime/experimental/realtime_swarm.py +533 -0
- autogen/agentchat/realtime/experimental/websockets.py +21 -0
- autogen/agentchat/realtime_agent/__init__.py +21 -0
- autogen/agentchat/user_proxy_agent.py +114 -0
- autogen/agentchat/utils.py +206 -0
- autogen/agents/__init__.py +3 -0
- autogen/agents/contrib/__init__.py +10 -0
- autogen/agents/contrib/time/__init__.py +8 -0
- autogen/agents/contrib/time/time_reply_agent.py +74 -0
- autogen/agents/contrib/time/time_tool_agent.py +52 -0
- autogen/agents/experimental/__init__.py +27 -0
- autogen/agents/experimental/deep_research/__init__.py +7 -0
- autogen/agents/experimental/deep_research/deep_research.py +52 -0
- autogen/agents/experimental/discord/__init__.py +7 -0
- autogen/agents/experimental/discord/discord.py +66 -0
- autogen/agents/experimental/document_agent/__init__.py +19 -0
- autogen/agents/experimental/document_agent/chroma_query_engine.py +301 -0
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +113 -0
- autogen/agents/experimental/document_agent/document_agent.py +643 -0
- autogen/agents/experimental/document_agent/document_conditions.py +50 -0
- autogen/agents/experimental/document_agent/document_utils.py +376 -0
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +214 -0
- autogen/agents/experimental/document_agent/parser_utils.py +134 -0
- autogen/agents/experimental/document_agent/url_utils.py +417 -0
- autogen/agents/experimental/reasoning/__init__.py +7 -0
- autogen/agents/experimental/reasoning/reasoning_agent.py +1178 -0
- autogen/agents/experimental/slack/__init__.py +7 -0
- autogen/agents/experimental/slack/slack.py +73 -0
- autogen/agents/experimental/telegram/__init__.py +7 -0
- autogen/agents/experimental/telegram/telegram.py +76 -0
- autogen/agents/experimental/websurfer/__init__.py +7 -0
- autogen/agents/experimental/websurfer/websurfer.py +70 -0
- autogen/agents/experimental/wikipedia/__init__.py +7 -0
- autogen/agents/experimental/wikipedia/wikipedia.py +88 -0
- autogen/browser_utils.py +309 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +71 -0
- autogen/cache/cache.py +203 -0
- autogen/cache/cache_factory.py +88 -0
- autogen/cache/cosmos_db_cache.py +144 -0
- autogen/cache/disk_cache.py +97 -0
- autogen/cache/in_memory_cache.py +54 -0
- autogen/cache/redis_cache.py +119 -0
- autogen/code_utils.py +598 -0
- autogen/coding/__init__.py +30 -0
- autogen/coding/base.py +120 -0
- autogen/coding/docker_commandline_code_executor.py +283 -0
- autogen/coding/factory.py +56 -0
- autogen/coding/func_with_reqs.py +203 -0
- autogen/coding/jupyter/__init__.py +23 -0
- autogen/coding/jupyter/base.py +36 -0
- autogen/coding/jupyter/docker_jupyter_server.py +160 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/import_utils.py +82 -0
- autogen/coding/jupyter/jupyter_client.py +224 -0
- autogen/coding/jupyter/jupyter_code_executor.py +154 -0
- autogen/coding/jupyter/local_jupyter_server.py +164 -0
- autogen/coding/local_commandline_code_executor.py +341 -0
- autogen/coding/markdown_code_extractor.py +44 -0
- autogen/coding/utils.py +55 -0
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/doc_utils.py +35 -0
- autogen/environments/__init__.py +10 -0
- autogen/environments/docker_python_environment.py +365 -0
- autogen/environments/python_environment.py +125 -0
- autogen/environments/system_python_environment.py +85 -0
- autogen/environments/venv_python_environment.py +220 -0
- autogen/environments/working_directory.py +74 -0
- autogen/events/__init__.py +7 -0
- autogen/events/agent_events.py +1016 -0
- autogen/events/base_event.py +100 -0
- autogen/events/client_events.py +168 -0
- autogen/events/helpers.py +44 -0
- autogen/events/print_event.py +45 -0
- autogen/exception_utils.py +73 -0
- autogen/extensions/__init__.py +5 -0
- autogen/fast_depends/__init__.py +16 -0
- autogen/fast_depends/_compat.py +75 -0
- autogen/fast_depends/core/__init__.py +14 -0
- autogen/fast_depends/core/build.py +206 -0
- autogen/fast_depends/core/model.py +527 -0
- autogen/fast_depends/dependencies/__init__.py +15 -0
- autogen/fast_depends/dependencies/model.py +30 -0
- autogen/fast_depends/dependencies/provider.py +40 -0
- autogen/fast_depends/library/__init__.py +10 -0
- autogen/fast_depends/library/model.py +46 -0
- autogen/fast_depends/py.typed +6 -0
- autogen/fast_depends/schema.py +66 -0
- autogen/fast_depends/use.py +272 -0
- autogen/fast_depends/utils.py +177 -0
- autogen/formatting_utils.py +83 -0
- autogen/function_utils.py +13 -0
- autogen/graph_utils.py +173 -0
- autogen/import_utils.py +539 -0
- autogen/interop/__init__.py +22 -0
- autogen/interop/crewai/__init__.py +7 -0
- autogen/interop/crewai/crewai.py +88 -0
- autogen/interop/interoperability.py +71 -0
- autogen/interop/interoperable.py +46 -0
- autogen/interop/langchain/__init__.py +8 -0
- autogen/interop/langchain/langchain_chat_model_factory.py +156 -0
- autogen/interop/langchain/langchain_tool.py +78 -0
- autogen/interop/litellm/__init__.py +7 -0
- autogen/interop/litellm/litellm_config_factory.py +178 -0
- autogen/interop/pydantic_ai/__init__.py +7 -0
- autogen/interop/pydantic_ai/pydantic_ai.py +172 -0
- autogen/interop/registry.py +70 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +151 -0
- autogen/io/console.py +56 -0
- autogen/io/processors/__init__.py +12 -0
- autogen/io/processors/base.py +21 -0
- autogen/io/processors/console_event_processor.py +61 -0
- autogen/io/run_response.py +294 -0
- autogen/io/thread_io_stream.py +63 -0
- autogen/io/websockets.py +214 -0
- autogen/json_utils.py +42 -0
- autogen/llm_clients/MIGRATION_TO_V2.md +782 -0
- autogen/llm_clients/__init__.py +77 -0
- autogen/llm_clients/client_v2.py +122 -0
- autogen/llm_clients/models/__init__.py +55 -0
- autogen/llm_clients/models/content_blocks.py +389 -0
- autogen/llm_clients/models/unified_message.py +145 -0
- autogen/llm_clients/models/unified_response.py +83 -0
- autogen/llm_clients/openai_completions_client.py +444 -0
- autogen/llm_config/__init__.py +11 -0
- autogen/llm_config/client.py +59 -0
- autogen/llm_config/config.py +461 -0
- autogen/llm_config/entry.py +169 -0
- autogen/llm_config/types.py +37 -0
- autogen/llm_config/utils.py +223 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +129 -0
- autogen/logger/file_logger.py +262 -0
- autogen/logger/logger_factory.py +42 -0
- autogen/logger/logger_utils.py +57 -0
- autogen/logger/sqlite_logger.py +524 -0
- autogen/math_utils.py +338 -0
- autogen/mcp/__init__.py +7 -0
- autogen/mcp/__main__.py +78 -0
- autogen/mcp/helpers.py +45 -0
- autogen/mcp/mcp_client.py +349 -0
- autogen/mcp/mcp_proxy/__init__.py +19 -0
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +62 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +577 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +166 -0
- autogen/mcp/mcp_proxy/operation_renaming.py +110 -0
- autogen/mcp/mcp_proxy/patch_fastapi_code_generator.py +98 -0
- autogen/mcp/mcp_proxy/security.py +399 -0
- autogen/mcp/mcp_proxy/security_schema_visitor.py +37 -0
- autogen/messages/__init__.py +7 -0
- autogen/messages/agent_messages.py +946 -0
- autogen/messages/base_message.py +108 -0
- autogen/messages/client_messages.py +172 -0
- autogen/messages/print_message.py +48 -0
- autogen/oai/__init__.py +61 -0
- autogen/oai/anthropic.py +1516 -0
- autogen/oai/bedrock.py +800 -0
- autogen/oai/cerebras.py +302 -0
- autogen/oai/client.py +1658 -0
- autogen/oai/client_utils.py +196 -0
- autogen/oai/cohere.py +494 -0
- autogen/oai/gemini.py +1045 -0
- autogen/oai/gemini_types.py +156 -0
- autogen/oai/groq.py +319 -0
- autogen/oai/mistral.py +311 -0
- autogen/oai/oai_models/__init__.py +23 -0
- autogen/oai/oai_models/_models.py +16 -0
- autogen/oai/oai_models/chat_completion.py +86 -0
- autogen/oai/oai_models/chat_completion_audio.py +32 -0
- autogen/oai/oai_models/chat_completion_message.py +97 -0
- autogen/oai/oai_models/chat_completion_message_tool_call.py +60 -0
- autogen/oai/oai_models/chat_completion_token_logprob.py +62 -0
- autogen/oai/oai_models/completion_usage.py +59 -0
- autogen/oai/ollama.py +657 -0
- autogen/oai/openai_responses.py +451 -0
- autogen/oai/openai_utils.py +897 -0
- autogen/oai/together.py +387 -0
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +197 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/retrieve_utils.py +490 -0
- autogen/runtime_logging.py +161 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/token_count_utils.py +280 -0
- autogen/tools/__init__.py +20 -0
- autogen/tools/contrib/__init__.py +9 -0
- autogen/tools/contrib/time/__init__.py +7 -0
- autogen/tools/contrib/time/time.py +40 -0
- autogen/tools/dependency_injection.py +249 -0
- autogen/tools/experimental/__init__.py +54 -0
- autogen/tools/experimental/browser_use/__init__.py +7 -0
- autogen/tools/experimental/browser_use/browser_use.py +154 -0
- autogen/tools/experimental/code_execution/__init__.py +7 -0
- autogen/tools/experimental/code_execution/python_code_execution.py +86 -0
- autogen/tools/experimental/crawl4ai/__init__.py +7 -0
- autogen/tools/experimental/crawl4ai/crawl4ai.py +150 -0
- autogen/tools/experimental/deep_research/__init__.py +7 -0
- autogen/tools/experimental/deep_research/deep_research.py +329 -0
- autogen/tools/experimental/duckduckgo/__init__.py +7 -0
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +103 -0
- autogen/tools/experimental/firecrawl/__init__.py +7 -0
- autogen/tools/experimental/firecrawl/firecrawl_tool.py +836 -0
- autogen/tools/experimental/google/__init__.py +14 -0
- autogen/tools/experimental/google/authentication/__init__.py +11 -0
- autogen/tools/experimental/google/authentication/credentials_hosted_provider.py +43 -0
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +91 -0
- autogen/tools/experimental/google/authentication/credentials_provider.py +35 -0
- autogen/tools/experimental/google/drive/__init__.py +9 -0
- autogen/tools/experimental/google/drive/drive_functions.py +124 -0
- autogen/tools/experimental/google/drive/toolkit.py +88 -0
- autogen/tools/experimental/google/model.py +17 -0
- autogen/tools/experimental/google/toolkit_protocol.py +19 -0
- autogen/tools/experimental/google_search/__init__.py +8 -0
- autogen/tools/experimental/google_search/google_search.py +93 -0
- autogen/tools/experimental/google_search/youtube_search.py +181 -0
- autogen/tools/experimental/messageplatform/__init__.py +17 -0
- autogen/tools/experimental/messageplatform/discord/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/discord/discord.py +284 -0
- autogen/tools/experimental/messageplatform/slack/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/slack/slack.py +385 -0
- autogen/tools/experimental/messageplatform/telegram/__init__.py +7 -0
- autogen/tools/experimental/messageplatform/telegram/telegram.py +271 -0
- autogen/tools/experimental/perplexity/__init__.py +7 -0
- autogen/tools/experimental/perplexity/perplexity_search.py +249 -0
- autogen/tools/experimental/reliable/__init__.py +10 -0
- autogen/tools/experimental/reliable/reliable.py +1311 -0
- autogen/tools/experimental/searxng/__init__.py +7 -0
- autogen/tools/experimental/searxng/searxng_search.py +142 -0
- autogen/tools/experimental/tavily/__init__.py +7 -0
- autogen/tools/experimental/tavily/tavily_search.py +176 -0
- autogen/tools/experimental/web_search_preview/__init__.py +7 -0
- autogen/tools/experimental/web_search_preview/web_search_preview.py +120 -0
- autogen/tools/experimental/wikipedia/__init__.py +7 -0
- autogen/tools/experimental/wikipedia/wikipedia.py +284 -0
- autogen/tools/function_utils.py +412 -0
- autogen/tools/tool.py +188 -0
- autogen/tools/toolkit.py +86 -0
- autogen/types.py +29 -0
- autogen/version.py +7 -0
- templates/client_template/main.jinja2 +72 -0
- templates/config_template/config.jinja2 +7 -0
- templates/main.jinja2 +61 -0
|
@@ -0,0 +1,1216 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
import re
|
|
9
|
+
import shutil
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
from ..import_utils import optional_import_block, require_optional_import
|
|
13
|
+
from .notebook_processor import (
|
|
14
|
+
create_base_argument_parser,
|
|
15
|
+
process_notebooks_core,
|
|
16
|
+
)
|
|
17
|
+
from .utils import (
|
|
18
|
+
NavigationGroup,
|
|
19
|
+
add_authors_and_social_preview,
|
|
20
|
+
copy_files,
|
|
21
|
+
get_authors_info,
|
|
22
|
+
get_git_tracked_and_untracked_files_in_directory,
|
|
23
|
+
remove_marker_blocks,
|
|
24
|
+
render_gallery_html,
|
|
25
|
+
separate_front_matter_and_content,
|
|
26
|
+
sort_files_by_date,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
with optional_import_block():
|
|
30
|
+
import yaml
|
|
31
|
+
from jinja2 import Template
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
root_dir = Path(__file__).resolve().parents[2]
|
|
35
|
+
website_dir = root_dir / "website"
|
|
36
|
+
|
|
37
|
+
mint_docs_dir = website_dir / "docs"
|
|
38
|
+
|
|
39
|
+
mkdocs_root_dir = website_dir / "mkdocs"
|
|
40
|
+
|
|
41
|
+
mkdocs_docs_dir = mkdocs_root_dir / "docs"
|
|
42
|
+
mkdocs_output_dir = mkdocs_root_dir / "docs" / "docs"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def filter_excluded_files(files: list[Path], exclusion_list: list[str], website_dir: Path) -> list[Path]:
|
|
46
|
+
return [
|
|
47
|
+
file
|
|
48
|
+
for file in files
|
|
49
|
+
if not any(Path(str(file.relative_to(website_dir))).as_posix().startswith(excl) for excl in exclusion_list)
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def copy_file(file: Path, mkdocs_output_dir: Path) -> None:
|
|
54
|
+
dest = mkdocs_output_dir / file.relative_to(file.parents[1])
|
|
55
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
56
|
+
shutil.copy2(file, dest)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def transform_tab_component(content: str) -> str:
|
|
60
|
+
"""Transform React-style tab components to MkDocs tab components.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
content: String containing React-style tab components.
|
|
64
|
+
Expected format is:
|
|
65
|
+
<Tabs>
|
|
66
|
+
<Tab title="Title 1">
|
|
67
|
+
content 1
|
|
68
|
+
</Tab>
|
|
69
|
+
<Tab title="Title 2">
|
|
70
|
+
content 2
|
|
71
|
+
</Tab>
|
|
72
|
+
</Tabs>
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
String with MkDocs tab components:
|
|
76
|
+
=== "Title 1"
|
|
77
|
+
content 1
|
|
78
|
+
|
|
79
|
+
=== "Title 2"
|
|
80
|
+
content 2
|
|
81
|
+
"""
|
|
82
|
+
if "<Tabs>" not in content:
|
|
83
|
+
return content
|
|
84
|
+
|
|
85
|
+
# Find and replace each Tabs section
|
|
86
|
+
pattern = re.compile(r"<Tabs>(.*?)</Tabs>", re.DOTALL)
|
|
87
|
+
|
|
88
|
+
def replace_tabs(match: re.Match[str]) -> str:
|
|
89
|
+
tabs_content = match.group(1)
|
|
90
|
+
|
|
91
|
+
# Extract all Tab elements
|
|
92
|
+
tab_pattern = re.compile(r'<Tab title="([^"]+)">(.*?)</Tab>', re.DOTALL)
|
|
93
|
+
tabs = tab_pattern.findall(tabs_content)
|
|
94
|
+
|
|
95
|
+
if not tabs:
|
|
96
|
+
return ""
|
|
97
|
+
|
|
98
|
+
result = []
|
|
99
|
+
|
|
100
|
+
for i, (title, tab_content) in enumerate(tabs):
|
|
101
|
+
# Add tab header
|
|
102
|
+
result.append(f'=== "{title}"')
|
|
103
|
+
|
|
104
|
+
# Process content by maintaining indentation structure
|
|
105
|
+
lines = tab_content.strip().split("\n")
|
|
106
|
+
|
|
107
|
+
# Find minimum common indentation for non-empty lines
|
|
108
|
+
non_empty_lines = [line for line in lines if line.strip()]
|
|
109
|
+
min_indent = min(len(line) - len(line.lstrip()) for line in non_empty_lines) if non_empty_lines else 0
|
|
110
|
+
|
|
111
|
+
# Remove common indentation and add 4-space indent
|
|
112
|
+
processed_lines = []
|
|
113
|
+
for line in lines:
|
|
114
|
+
if line.strip():
|
|
115
|
+
# Remove the common indentation but preserve relative indentation
|
|
116
|
+
if len(line) >= min_indent:
|
|
117
|
+
processed_lines.append(" " + line[min_indent:])
|
|
118
|
+
else:
|
|
119
|
+
processed_lines.append(" " + line.lstrip())
|
|
120
|
+
else:
|
|
121
|
+
processed_lines.append("")
|
|
122
|
+
|
|
123
|
+
result.append("\n".join(processed_lines))
|
|
124
|
+
|
|
125
|
+
# Add a blank line between tabs (but not after the last one)
|
|
126
|
+
if i < len(tabs) - 1:
|
|
127
|
+
result.append("")
|
|
128
|
+
|
|
129
|
+
return "\n".join(result)
|
|
130
|
+
|
|
131
|
+
# Replace each Tabs section
|
|
132
|
+
result = pattern.sub(replace_tabs, content)
|
|
133
|
+
|
|
134
|
+
return result
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def transform_card_grp_component(content: str) -> str:
|
|
138
|
+
# Replace CardGroup tags
|
|
139
|
+
modified_content = re.sub(r"<CardGroup\s+cols=\{(\d+)\}>\s*", "", content)
|
|
140
|
+
modified_content = re.sub(r"\s*</CardGroup>", "", modified_content)
|
|
141
|
+
|
|
142
|
+
# Replace Card tags with title and href attributes
|
|
143
|
+
pattern = r'<Card\s+title="([^"]*)"\s+href="([^"]*)">(.*?)</Card>'
|
|
144
|
+
replacement = r'<a class="card" href="\2">\n<h2>\1</h2>\3</a>'
|
|
145
|
+
modified_content = re.sub(pattern, replacement, modified_content, flags=re.DOTALL)
|
|
146
|
+
|
|
147
|
+
# Replace simple Card tags
|
|
148
|
+
modified_content = re.sub(r"<Card>", '<div class="card">', modified_content)
|
|
149
|
+
modified_content = re.sub(r"</Card>", "</div>", modified_content)
|
|
150
|
+
|
|
151
|
+
return modified_content
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def fix_asset_path(content: str) -> str:
|
|
155
|
+
# Replace static/img paths with /assets/img
|
|
156
|
+
modified_content = re.sub(r'src="/static/img/([^"]+)"', r'src="/assets/img/\1"', content)
|
|
157
|
+
modified_content = re.sub(r"!\[([^\]]*)\]\(/static/img/([^)]+)\)", r"", modified_content)
|
|
158
|
+
|
|
159
|
+
# Replace docs paths with /docs
|
|
160
|
+
modified_content = re.sub(r'href="/docs/([^"]+)"', r'href="/docs/\1"', modified_content)
|
|
161
|
+
|
|
162
|
+
return modified_content
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def fix_internal_references(abs_file_url: str, mkdocs_docs_dir: Path = mkdocs_docs_dir) -> str:
|
|
166
|
+
# Special case for the API Reference
|
|
167
|
+
if abs_file_url in {"/docs/api-reference", "/docs/api-reference/autogen"}:
|
|
168
|
+
return (
|
|
169
|
+
f"{abs_file_url}/autogen/AfterWork"
|
|
170
|
+
if abs_file_url == "/docs/api-reference"
|
|
171
|
+
else f"{abs_file_url}/AfterWork"
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Handle API reference URLs with hash fragments
|
|
175
|
+
if abs_file_url.startswith("/docs/api-reference/") and "#" in abs_file_url:
|
|
176
|
+
base_url, fragment = abs_file_url.split("#")
|
|
177
|
+
module_prefix = base_url.replace("/docs/api-reference/", "").replace("/", ".")
|
|
178
|
+
return f"{base_url}#{module_prefix}.{fragment.replace('-', '_')}"
|
|
179
|
+
|
|
180
|
+
file_path = mkdocs_docs_dir / (abs_file_url.lstrip("/") + ".md")
|
|
181
|
+
if file_path.is_file():
|
|
182
|
+
return abs_file_url
|
|
183
|
+
|
|
184
|
+
full_path = mkdocs_docs_dir / abs_file_url.lstrip("/")
|
|
185
|
+
|
|
186
|
+
if not full_path.is_dir():
|
|
187
|
+
return abs_file_url
|
|
188
|
+
|
|
189
|
+
# Find the first .md file in the directory
|
|
190
|
+
md_files = sorted(full_path.glob("*.md"))
|
|
191
|
+
return f"{abs_file_url}/{md_files[0].stem}"
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def absolute_to_relative(source_path: str, dest_path: str) -> str:
|
|
195
|
+
"""Convert an absolute path to a relative path from the source directory.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
source_path: The source file's absolute path (e.g., "/docs/home/quick-start.md")
|
|
199
|
+
dest_path: The destination file's absolute path (e.g., "/docs/user-guide/basic-concepts/installing-ag2")
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
A relative path from source to destination (e.g., "../../user-guide/basic-concepts/installing-ag2")
|
|
203
|
+
"""
|
|
204
|
+
sep = os.sep
|
|
205
|
+
try:
|
|
206
|
+
# Primary approach: Use pathlib for clean path calculation
|
|
207
|
+
rel_path = str(Path(dest_path).relative_to(Path(source_path).parent))
|
|
208
|
+
return f".{sep}{rel_path}" if Path(source_path).stem == "index" else f"..{sep}{rel_path}"
|
|
209
|
+
except ValueError:
|
|
210
|
+
# Fallback approach: Use os.path.relpath when paths don't share a common parent
|
|
211
|
+
rel_path = os.path.relpath(dest_path, source_path)
|
|
212
|
+
|
|
213
|
+
# Special case for blog directories: add deeper path traversal
|
|
214
|
+
ret_val = os.path.join("..", "..", "..", rel_path) if "blog" in source_path else rel_path
|
|
215
|
+
|
|
216
|
+
# Special case for index files: strip leading "../"
|
|
217
|
+
if Path(source_path).stem == "index":
|
|
218
|
+
ret_val = ret_val[3:]
|
|
219
|
+
|
|
220
|
+
return ret_val
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def fix_internal_links(source_path: str, content: str) -> str:
|
|
224
|
+
"""Detect internal links in content that start with '/docs' and convert them to relative paths.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
source_path: The source file's absolute path
|
|
228
|
+
content: The content with potential internal links
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
Content with internal links converted to relative paths
|
|
232
|
+
"""
|
|
233
|
+
# Define regex patterns for HTML and Markdown links
|
|
234
|
+
html_link_pattern = r'href="(/docs/[^"]*)"'
|
|
235
|
+
html_img_src_pattern = r'src="(/snippets/[^"]+)"'
|
|
236
|
+
html_assets_src_pattern = r'src="(/assets/[^"]+)"'
|
|
237
|
+
|
|
238
|
+
markdown_link_pattern = r"\[([^\]]+)\]\((/docs/[^)]*)\)"
|
|
239
|
+
markdown_img_pattern = r"!\[([^\]]*)\]\((/snippets/[^)]+)\)"
|
|
240
|
+
markdown_assets_pattern = r"!\[([^\]]*)\]\((/assets/[^)]+)\)"
|
|
241
|
+
|
|
242
|
+
def handle_blog_url(url: str) -> str:
|
|
243
|
+
"""Special handling for blog URLs, converting date format from YYYY-MM-DD to YYYY/MM/DD.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
url: The URL to process
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
The URL with date format converted if it matches the blog URL pattern
|
|
250
|
+
"""
|
|
251
|
+
blog_date_pattern = r"/docs/blog/(\d{4})-(\d{2})-(\d{2})-([\w-]+)"
|
|
252
|
+
|
|
253
|
+
if re.match(blog_date_pattern, url):
|
|
254
|
+
return re.sub(blog_date_pattern, r"/docs/blog/\1/\2/\3/\4", url)
|
|
255
|
+
|
|
256
|
+
return url
|
|
257
|
+
|
|
258
|
+
# Convert HTML links
|
|
259
|
+
def replace_html(match: re.Match[str], attr_type: str) -> str:
|
|
260
|
+
# There's only one group in the pattern, which is the path
|
|
261
|
+
absolute_link = match.group(1)
|
|
262
|
+
|
|
263
|
+
absolute_link = handle_blog_url(absolute_link)
|
|
264
|
+
abs_file_path = fix_internal_references(absolute_link)
|
|
265
|
+
relative_link = absolute_to_relative(source_path, abs_file_path)
|
|
266
|
+
return f'{attr_type}="{relative_link}"'
|
|
267
|
+
|
|
268
|
+
# Convert Markdown links
|
|
269
|
+
def replace_markdown(match: re.Match[str], is_image: bool) -> str:
|
|
270
|
+
text = match.group(1)
|
|
271
|
+
absolute_link = match.group(2)
|
|
272
|
+
|
|
273
|
+
absolute_link = handle_blog_url(absolute_link)
|
|
274
|
+
abs_file_path = fix_internal_references(absolute_link)
|
|
275
|
+
relative_link = absolute_to_relative(source_path, abs_file_path)
|
|
276
|
+
prefix = "!" if is_image else ""
|
|
277
|
+
return f"{prefix}[{text}]({relative_link})"
|
|
278
|
+
|
|
279
|
+
# Apply replacements
|
|
280
|
+
content = re.sub(html_link_pattern, lambda match: replace_html(match, "href"), content)
|
|
281
|
+
content = re.sub(html_img_src_pattern, lambda match: replace_html(match, "src"), content)
|
|
282
|
+
content = re.sub(html_assets_src_pattern, lambda match: replace_html(match, "src"), content)
|
|
283
|
+
|
|
284
|
+
content = re.sub(markdown_link_pattern, lambda match: replace_markdown(match, False), content)
|
|
285
|
+
content = re.sub(markdown_img_pattern, lambda match: replace_markdown(match, True), content)
|
|
286
|
+
content = re.sub(markdown_assets_pattern, lambda match: replace_markdown(match, True), content)
|
|
287
|
+
|
|
288
|
+
return content
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def transform_content_for_mkdocs(content: str, rel_file_path: str) -> str:
|
|
292
|
+
# Transform admonitions (Tip, Warning, Note)
|
|
293
|
+
tag_mappings = {
|
|
294
|
+
"Tip": "tip",
|
|
295
|
+
"Warning": "warning",
|
|
296
|
+
"Note": "note",
|
|
297
|
+
"Danger": "danger",
|
|
298
|
+
}
|
|
299
|
+
for html_tag, mkdocs_type in tag_mappings.items():
|
|
300
|
+
pattern = f"<{html_tag}>(.*?)</{html_tag}>"
|
|
301
|
+
|
|
302
|
+
def replacement(match: re.Match[str]) -> str:
|
|
303
|
+
inner_content = match.group(1).strip()
|
|
304
|
+
|
|
305
|
+
lines = inner_content.split("\n")
|
|
306
|
+
|
|
307
|
+
non_empty_lines = [line for line in lines if line.strip()]
|
|
308
|
+
min_indent = min(len(line) - len(line.lstrip()) for line in non_empty_lines) if non_empty_lines else 0
|
|
309
|
+
|
|
310
|
+
# Process each line
|
|
311
|
+
processed_lines = []
|
|
312
|
+
for line in lines:
|
|
313
|
+
if line.strip():
|
|
314
|
+
# Remove common indentation and add 4-space indent
|
|
315
|
+
if len(line) >= min_indent:
|
|
316
|
+
processed_lines.append(" " + line[min_indent:])
|
|
317
|
+
else:
|
|
318
|
+
processed_lines.append(" " + line.lstrip())
|
|
319
|
+
else:
|
|
320
|
+
processed_lines.append("")
|
|
321
|
+
|
|
322
|
+
# Format the admonition with properly indented content
|
|
323
|
+
return f"!!! {mkdocs_type.lstrip()}\n" + "\n".join(processed_lines)
|
|
324
|
+
|
|
325
|
+
content = re.sub(pattern, replacement, content, flags=re.DOTALL)
|
|
326
|
+
|
|
327
|
+
# Clean up style tags with double curly braces
|
|
328
|
+
style_pattern = r"style\s*=\s*{{\s*([^}]+)\s*}}"
|
|
329
|
+
|
|
330
|
+
def style_replacement(match: re.Match[str]) -> str:
|
|
331
|
+
style_content = match.group(1).strip()
|
|
332
|
+
return f"style={{ {style_content} }}"
|
|
333
|
+
|
|
334
|
+
content = re.sub(style_pattern, style_replacement, content)
|
|
335
|
+
|
|
336
|
+
# Fix snippet imports
|
|
337
|
+
content = fix_snippet_imports(content)
|
|
338
|
+
|
|
339
|
+
# Transform tab components
|
|
340
|
+
content = transform_tab_component(content)
|
|
341
|
+
|
|
342
|
+
# Transform CardGroup components
|
|
343
|
+
content = transform_card_grp_component(content)
|
|
344
|
+
|
|
345
|
+
# Fix assets path
|
|
346
|
+
content = fix_asset_path(content)
|
|
347
|
+
|
|
348
|
+
# Remove the mintlify specific markers
|
|
349
|
+
content = remove_marker_blocks(content, "DELETE-ME-WHILE-BUILDING-MKDOCS")
|
|
350
|
+
|
|
351
|
+
# Fix Internal links
|
|
352
|
+
content = fix_internal_links(rel_file_path, content)
|
|
353
|
+
|
|
354
|
+
return content
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
def rename_user_story(p: Path) -> Path:
|
|
358
|
+
name = p.parent.name.split("-")[3:]
|
|
359
|
+
return p.parent / ("_".join(name).lower() + p.suffix)
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def process_and_copy_files(input_dir: Path, output_dir: Path, files: list[Path]) -> None:
|
|
363
|
+
sep = os.sep
|
|
364
|
+
# Keep track of MD files we need to process
|
|
365
|
+
md_files_to_process = []
|
|
366
|
+
|
|
367
|
+
# Step 1: First copy mdx files to destination as md files
|
|
368
|
+
for file in files:
|
|
369
|
+
if file.suffix == ".mdx":
|
|
370
|
+
dest = output_dir / file.relative_to(input_dir).with_suffix(".md")
|
|
371
|
+
|
|
372
|
+
if file.name == "home.mdx":
|
|
373
|
+
dest = output_dir / "home.md"
|
|
374
|
+
|
|
375
|
+
if f"{sep}user-stories{sep}" in str(dest):
|
|
376
|
+
dest = rename_user_story(dest)
|
|
377
|
+
|
|
378
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
379
|
+
dest.write_text(file.read_text())
|
|
380
|
+
md_files_to_process.append(dest)
|
|
381
|
+
else:
|
|
382
|
+
copy_files(input_dir, output_dir, [file])
|
|
383
|
+
|
|
384
|
+
# Step 2: Process the MD files we created
|
|
385
|
+
for md_file in md_files_to_process:
|
|
386
|
+
content = md_file.read_text()
|
|
387
|
+
|
|
388
|
+
rel_path = f"{sep}{md_file.relative_to(output_dir.parents[0])}"
|
|
389
|
+
processed_content = transform_content_for_mkdocs(content, rel_path)
|
|
390
|
+
|
|
391
|
+
md_file.write_text(processed_content)
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
def format_title(file_path_str: str, keywords: dict[str, str], mkdocs_docs_dir: Path) -> str:
|
|
395
|
+
"""Format a page title with proper capitalization for special keywords."""
|
|
396
|
+
file_path = mkdocs_docs_dir / Path(file_path_str)
|
|
397
|
+
|
|
398
|
+
# Default formatting function for filenames
|
|
399
|
+
def format_with_keywords(text: str) -> str:
|
|
400
|
+
words = text.replace("-", " ").title().split()
|
|
401
|
+
return " ".join(keywords.get(word, word) for word in words)
|
|
402
|
+
|
|
403
|
+
try:
|
|
404
|
+
front_matter_string, _ = separate_front_matter_and_content(file_path)
|
|
405
|
+
if front_matter_string:
|
|
406
|
+
front_matter = yaml.safe_load(front_matter_string[4:-3])
|
|
407
|
+
sidebar_title: str = front_matter.get("sidebarTitle")
|
|
408
|
+
if sidebar_title:
|
|
409
|
+
return sidebar_title
|
|
410
|
+
except (FileNotFoundError, yaml.YAMLError):
|
|
411
|
+
pass
|
|
412
|
+
|
|
413
|
+
# Fall back to filename if file not found or no sidebarTitle
|
|
414
|
+
return format_with_keywords(Path(file_path_str).stem)
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
def format_page_entry(page_loc: str, indent: str, keywords: dict[str, str], mkdocs_docs_dir: Path) -> str:
|
|
418
|
+
"""Format a single page entry as either a parenthesized path or a markdown link."""
|
|
419
|
+
file_path_str = f"{page_loc}.md"
|
|
420
|
+
title = format_title(file_path_str, keywords, mkdocs_docs_dir)
|
|
421
|
+
return f"{indent} - [{title}]({file_path_str})"
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
def format_navigation(
|
|
425
|
+
nav: list[NavigationGroup],
|
|
426
|
+
mkdocs_docs_dir: Path = mkdocs_docs_dir,
|
|
427
|
+
depth: int = 0,
|
|
428
|
+
keywords: dict[str, str] | None = None,
|
|
429
|
+
) -> str:
|
|
430
|
+
"""Recursively format navigation structure into markdown-style nested list.
|
|
431
|
+
|
|
432
|
+
Args:
|
|
433
|
+
nav: List of navigation items with groups and pages
|
|
434
|
+
mkdocs_docs_dir: Directory where the markdown files are located
|
|
435
|
+
depth: Current indentation depth
|
|
436
|
+
keywords: Dictionary of special case word capitalizations
|
|
437
|
+
|
|
438
|
+
Returns:
|
|
439
|
+
Formatted navigation as a string
|
|
440
|
+
"""
|
|
441
|
+
if keywords is None:
|
|
442
|
+
keywords = {
|
|
443
|
+
"Ag2": "AG2",
|
|
444
|
+
"Rag": "RAG",
|
|
445
|
+
"Llm": "LLM",
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
indent = " " * depth
|
|
449
|
+
result = []
|
|
450
|
+
|
|
451
|
+
for item in nav:
|
|
452
|
+
# Add group header
|
|
453
|
+
result.append(f"{indent}- {item['group']}")
|
|
454
|
+
|
|
455
|
+
# Process each page
|
|
456
|
+
for page in item["pages"]:
|
|
457
|
+
if isinstance(page, dict):
|
|
458
|
+
# Handle nested navigation groups
|
|
459
|
+
result.append(format_navigation([page], mkdocs_docs_dir, depth + 1, keywords))
|
|
460
|
+
else:
|
|
461
|
+
# Handle individual pages
|
|
462
|
+
result.append(format_page_entry(page, indent, keywords, mkdocs_docs_dir))
|
|
463
|
+
|
|
464
|
+
ret_val = "\n".join(result)
|
|
465
|
+
|
|
466
|
+
ret_val = ret_val.replace(
|
|
467
|
+
"- Quick Start\n - [Quick Start](docs/quick-start.md)\n",
|
|
468
|
+
"- [Quick Start](docs/quick-start.md)\n",
|
|
469
|
+
)
|
|
470
|
+
ret_val = ret_val.replace(
|
|
471
|
+
"- Basic Concepts\n",
|
|
472
|
+
"- [Basic Concepts](docs/user-guide/basic-concepts/overview.md)\n",
|
|
473
|
+
)
|
|
474
|
+
ret_val = ret_val.replace("- FAQs\n - [Faq](docs/faq/FAQ.md)\n", "- [FAQs](docs/faq/FAQ.md)\n")
|
|
475
|
+
return ret_val
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
def add_api_ref_to_mkdocs_template(mkdocs_nav: str, section_to_follow: str) -> str:
|
|
479
|
+
"""Add API Reference section to the navigation template."""
|
|
480
|
+
api_reference_section = """- API References
|
|
481
|
+
{api}
|
|
482
|
+
"""
|
|
483
|
+
section_to_follow_marker = f"- {section_to_follow}"
|
|
484
|
+
|
|
485
|
+
replacement_content = f"{api_reference_section}{section_to_follow_marker}"
|
|
486
|
+
ret_val = mkdocs_nav.replace(section_to_follow_marker, replacement_content)
|
|
487
|
+
return ret_val
|
|
488
|
+
|
|
489
|
+
|
|
490
|
+
@require_optional_import("jinja2", "docs")
|
|
491
|
+
def generate_mkdocs_navigation(website_dir: Path, mkdocs_root_dir: Path, nav_exclusions: list[str]) -> None:
|
|
492
|
+
mintlify_nav_template_path = website_dir / "mint-json-template.json.jinja"
|
|
493
|
+
mkdocs_nav_path = mkdocs_root_dir / "docs" / "navigation_template.txt"
|
|
494
|
+
summary_md_path = mkdocs_root_dir / "docs" / "SUMMARY.md"
|
|
495
|
+
|
|
496
|
+
mintlify_json = json.loads(Template(mintlify_nav_template_path.read_text(encoding="utf-8")).render())
|
|
497
|
+
mintlify_nav = mintlify_json["navigation"]
|
|
498
|
+
filtered_nav = [item for item in mintlify_nav if item["group"] not in nav_exclusions]
|
|
499
|
+
|
|
500
|
+
mkdocs_docs_dir = mkdocs_root_dir / "docs"
|
|
501
|
+
mkdocs_nav = format_navigation(filtered_nav, mkdocs_docs_dir)
|
|
502
|
+
mkdocs_nav_with_api_ref = add_api_ref_to_mkdocs_template(mkdocs_nav, "Contributor Guide")
|
|
503
|
+
|
|
504
|
+
blog_nav = "- Blog\n - [Blog](docs/blog/index.md)"
|
|
505
|
+
|
|
506
|
+
mkdocs_nav_content = "---\nsearch:\n exclude: true\n---\n" + mkdocs_nav_with_api_ref + "\n" + blog_nav + "\n"
|
|
507
|
+
mkdocs_nav_path.write_text(mkdocs_nav_content)
|
|
508
|
+
summary_md_path.write_text(mkdocs_nav_content)
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
def copy_assets(website_dir: Path) -> None:
|
|
512
|
+
src_dir = website_dir / "static" / "img"
|
|
513
|
+
dest_dir = website_dir / "mkdocs" / "docs" / "assets" / "img"
|
|
514
|
+
|
|
515
|
+
git_tracked_img_files = get_git_tracked_and_untracked_files_in_directory(website_dir / "static" / "img")
|
|
516
|
+
copy_files(src_dir, dest_dir, git_tracked_img_files)
|
|
517
|
+
|
|
518
|
+
|
|
519
|
+
def add_excerpt_marker(content: str) -> str:
|
|
520
|
+
"""Add <!-- more --> marker before the second heading in markdown body content.
|
|
521
|
+
|
|
522
|
+
Args:
|
|
523
|
+
content (str): Body content of the markdown file (without frontmatter)
|
|
524
|
+
|
|
525
|
+
Returns:
|
|
526
|
+
str: Modified body content with <!-- more --> added
|
|
527
|
+
"""
|
|
528
|
+
if "<!-- more -->" in content:
|
|
529
|
+
return content.replace(r"\<!-- more -->", "<!-- more -->")
|
|
530
|
+
|
|
531
|
+
# Find all headings
|
|
532
|
+
heading_pattern = re.compile(r"^(#{1,6}\s+.+?)$", re.MULTILINE)
|
|
533
|
+
headings = list(heading_pattern.finditer(content))
|
|
534
|
+
|
|
535
|
+
# If there are fewer than 2 headings, add the marker at the end
|
|
536
|
+
if len(headings) < 2:
|
|
537
|
+
# If there's content, add the marker at the end
|
|
538
|
+
return content.rstrip() + "\n\n<!-- more -->\n"
|
|
539
|
+
|
|
540
|
+
# Get position of the second heading
|
|
541
|
+
second_heading = headings[1]
|
|
542
|
+
position = second_heading.start()
|
|
543
|
+
|
|
544
|
+
# Insert the more marker before the second heading
|
|
545
|
+
return content[:position] + "\n<!-- more -->\n\n" + content[position:]
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
def generate_url_slug(file: Path) -> str:
|
|
549
|
+
parent_dir = file.parts[-2]
|
|
550
|
+
slug = "-".join(parent_dir.split("-")[3:])
|
|
551
|
+
return f"\nslug: {slug}"
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
def process_blog_contents(contents: str, file: Path) -> str:
|
|
555
|
+
# Split the content into parts
|
|
556
|
+
parts = contents.split("---", 2)
|
|
557
|
+
if len(parts) < 3:
|
|
558
|
+
return contents
|
|
559
|
+
|
|
560
|
+
frontmatter = parts[1]
|
|
561
|
+
content = parts[2]
|
|
562
|
+
|
|
563
|
+
# Extract tags
|
|
564
|
+
tags_match = re.search(r"tags:\s*\[(.*?)\]", frontmatter)
|
|
565
|
+
if not tags_match:
|
|
566
|
+
return contents
|
|
567
|
+
|
|
568
|
+
tags_str = tags_match.group(1)
|
|
569
|
+
tags = [tag.strip() for tag in tags_str.split(",")]
|
|
570
|
+
|
|
571
|
+
# Extract date from second-to-last part of file path
|
|
572
|
+
date_match = re.match(r"(\d{4}-\d{2}-\d{2})", file.parts[-2])
|
|
573
|
+
date = date_match.group(1) if date_match else None
|
|
574
|
+
|
|
575
|
+
# Remove original tags
|
|
576
|
+
frontmatter = re.sub(r"tags:\s*\[.*?\]", "", frontmatter).strip()
|
|
577
|
+
|
|
578
|
+
# Format tags and categories as YAML lists
|
|
579
|
+
tags_yaml = "tags:\n - " + "\n - ".join(tags)
|
|
580
|
+
categories_yaml = "categories:\n - " + "\n - ".join(tags)
|
|
581
|
+
|
|
582
|
+
# Add date to metadata
|
|
583
|
+
date_yaml = f"\ndate: {date}" if date else ""
|
|
584
|
+
|
|
585
|
+
# Add URL slug metadata
|
|
586
|
+
url_slug = generate_url_slug(file)
|
|
587
|
+
|
|
588
|
+
# add the excerpt marker in the content
|
|
589
|
+
content_with_excerpt_marker = add_excerpt_marker(content)
|
|
590
|
+
|
|
591
|
+
return f"---\n{frontmatter}\n{tags_yaml}\n{categories_yaml}{date_yaml}{url_slug}\n---{content_with_excerpt_marker}"
|
|
592
|
+
|
|
593
|
+
|
|
594
|
+
def fix_snippet_imports(content: str, snippets_dir: Path = mkdocs_output_dir.parent / "snippets") -> str:
|
|
595
|
+
"""Replace import statements for MDX files from snippets directory with the target format.
|
|
596
|
+
|
|
597
|
+
Args:
|
|
598
|
+
content (str): Content containing import statements
|
|
599
|
+
snippets_dir (Path): Path to the snippets directory
|
|
600
|
+
|
|
601
|
+
Returns:
|
|
602
|
+
str: Content with import statements replaced
|
|
603
|
+
"""
|
|
604
|
+
# Regular expression to find import statements for MDX files from /snippets/
|
|
605
|
+
import_pattern = re.compile(r'import\s+(\w+)\s+from\s+"(/snippets/[^"]+\.mdx)"\s*;')
|
|
606
|
+
|
|
607
|
+
# Process all matches
|
|
608
|
+
matches = list(import_pattern.finditer(content))
|
|
609
|
+
|
|
610
|
+
# Process matches in reverse order to avoid offset issues when replacing text
|
|
611
|
+
for match in reversed(matches):
|
|
612
|
+
imported_path = match.group(2)
|
|
613
|
+
|
|
614
|
+
# Check if the path starts with /snippets/
|
|
615
|
+
if not imported_path.startswith("/snippets/"):
|
|
616
|
+
continue
|
|
617
|
+
|
|
618
|
+
# Extract the relative path (without the /snippets/ prefix)
|
|
619
|
+
relative_path = imported_path[len("/snippets/") :]
|
|
620
|
+
|
|
621
|
+
# Construct the full file path
|
|
622
|
+
file_path = snippets_dir / relative_path
|
|
623
|
+
|
|
624
|
+
# Read the file content
|
|
625
|
+
with open(file_path) as f:
|
|
626
|
+
file_content = f.read()
|
|
627
|
+
|
|
628
|
+
# Replace the import statement with the file content
|
|
629
|
+
start, end = match.span()
|
|
630
|
+
content = content[:start] + file_content + content[end:]
|
|
631
|
+
|
|
632
|
+
return content
|
|
633
|
+
|
|
634
|
+
|
|
635
|
+
def process_blog_files(mkdocs_output_dir: Path, authors_yml_path: Path, snippets_src_path: Path) -> None:
|
|
636
|
+
src_blog_dir = mkdocs_output_dir / "_blogs"
|
|
637
|
+
target_blog_dir = mkdocs_output_dir / "blog"
|
|
638
|
+
target_posts_dir = target_blog_dir / "posts"
|
|
639
|
+
snippets_dir = mkdocs_output_dir.parent / "snippets"
|
|
640
|
+
|
|
641
|
+
# Create the target posts directory
|
|
642
|
+
target_posts_dir.mkdir(parents=True, exist_ok=True)
|
|
643
|
+
|
|
644
|
+
# Create the index file in the target blog directory
|
|
645
|
+
index_file = target_blog_dir / "index.md"
|
|
646
|
+
index_file.write_text("# Blog\n\n")
|
|
647
|
+
|
|
648
|
+
# Get all files to copy
|
|
649
|
+
files_to_copy = list(src_blog_dir.rglob("*"))
|
|
650
|
+
|
|
651
|
+
# process blog metadata
|
|
652
|
+
for file in files_to_copy:
|
|
653
|
+
if file.suffix == ".md":
|
|
654
|
+
contents = file.read_text()
|
|
655
|
+
processed_contents = process_blog_contents(contents, file)
|
|
656
|
+
processed_contents = fix_snippet_imports(processed_contents, snippets_dir)
|
|
657
|
+
file.write_text(processed_contents)
|
|
658
|
+
|
|
659
|
+
# Copy files from source to target
|
|
660
|
+
copy_files(src_blog_dir, target_posts_dir, files_to_copy)
|
|
661
|
+
|
|
662
|
+
# Copy snippets directory
|
|
663
|
+
snippets_files_to_copy = list(snippets_src_path.rglob("*"))
|
|
664
|
+
copy_files(snippets_src_path, snippets_dir, snippets_files_to_copy)
|
|
665
|
+
|
|
666
|
+
# Copy authors_yml_path to the target_blog_dir and rename it as .authors.yml
|
|
667
|
+
target_authors_yml_path = target_blog_dir / ".authors.yml"
|
|
668
|
+
shutil.copy2(authors_yml_path, target_authors_yml_path)
|
|
669
|
+
|
|
670
|
+
|
|
671
|
+
_is_first_notebook = True
|
|
672
|
+
|
|
673
|
+
|
|
674
|
+
def add_front_matter_to_metadata_yml(
|
|
675
|
+
front_matter: dict[str, str | list[str] | None], website_build_directory: Path, rendered_mdx: Path
|
|
676
|
+
) -> None:
|
|
677
|
+
"""Add notebook metadata to a YAML file containing metadata for all notebooks."""
|
|
678
|
+
global _is_first_notebook
|
|
679
|
+
|
|
680
|
+
source = front_matter.get("source_notebook")
|
|
681
|
+
if isinstance(source, str) and source.startswith("/website/docs/"):
|
|
682
|
+
return
|
|
683
|
+
|
|
684
|
+
# Get the metadata file path
|
|
685
|
+
metadata_yml_path = website_build_directory / "../../data/notebooks_metadata.yml"
|
|
686
|
+
|
|
687
|
+
# Create parent directories if they don't exist
|
|
688
|
+
metadata_yml_path.parent.mkdir(parents=True, exist_ok=True)
|
|
689
|
+
|
|
690
|
+
# If this is the first notebook, delete the existing file
|
|
691
|
+
if _is_first_notebook and metadata_yml_path.exists():
|
|
692
|
+
metadata_yml_path.unlink()
|
|
693
|
+
_is_first_notebook = False
|
|
694
|
+
|
|
695
|
+
# Create new entry for current notebook
|
|
696
|
+
title = front_matter.get("title", "")
|
|
697
|
+
link = f"/docs/use-cases/notebooks/notebooks/{rendered_mdx.stem}.md"
|
|
698
|
+
rel_link = f"../notebooks/{rendered_mdx.stem}"
|
|
699
|
+
description = front_matter.get("description", "")
|
|
700
|
+
tags = front_matter.get("tags", []) or []
|
|
701
|
+
|
|
702
|
+
# Escape quotes in strings
|
|
703
|
+
title = str(title).replace('"', '\\"')
|
|
704
|
+
description = str(description).replace('"', '\\"')
|
|
705
|
+
source_str = str(source or "").replace('"', '\\"')
|
|
706
|
+
|
|
707
|
+
# Open file in append mode
|
|
708
|
+
with open(metadata_yml_path, "a", encoding="utf-8") as f:
|
|
709
|
+
# Write the entry
|
|
710
|
+
f.write(f'- title: "{title}"\n')
|
|
711
|
+
f.write(f' link: "{link}"\n')
|
|
712
|
+
f.write(f' rel_link: "{rel_link}"\n')
|
|
713
|
+
f.write(f' description: "{description}"\n')
|
|
714
|
+
f.write(' image: ""\n')
|
|
715
|
+
|
|
716
|
+
# Write tags
|
|
717
|
+
if tags:
|
|
718
|
+
f.write(" tags:\n")
|
|
719
|
+
for tag in tags:
|
|
720
|
+
if tag: # Only write non-empty tags
|
|
721
|
+
tag_str = str(tag).replace('"', '\\"')
|
|
722
|
+
f.write(f' - "{tag_str}"\n')
|
|
723
|
+
else:
|
|
724
|
+
f.write(" tags: []\n")
|
|
725
|
+
|
|
726
|
+
# Write source
|
|
727
|
+
f.write(f' source: "{source_str}"\n')
|
|
728
|
+
f.write("\n")
|
|
729
|
+
|
|
730
|
+
|
|
731
|
+
def transform_admonition_blocks(content: str) -> str:
|
|
732
|
+
"""Transform admonition blocks from ::: syntax to Material for MkDocs syntax.
|
|
733
|
+
|
|
734
|
+
Converts blocks like:
|
|
735
|
+
:::info Requirements
|
|
736
|
+
content here
|
|
737
|
+
:::
|
|
738
|
+
|
|
739
|
+
To:
|
|
740
|
+
!!! info "Requirements"
|
|
741
|
+
content here
|
|
742
|
+
|
|
743
|
+
Args:
|
|
744
|
+
content: String containing ::: syntax admonition blocks
|
|
745
|
+
|
|
746
|
+
Returns:
|
|
747
|
+
String with Material for MkDocs admonition blocks
|
|
748
|
+
"""
|
|
749
|
+
tag_mappings = {
|
|
750
|
+
"Tip": "tip",
|
|
751
|
+
"Warning": "warning",
|
|
752
|
+
"Note": "note",
|
|
753
|
+
"Danger": "danger",
|
|
754
|
+
}
|
|
755
|
+
|
|
756
|
+
# Simplified approach: first detect admonition blocks boundaries
|
|
757
|
+
lines = content.split("\n")
|
|
758
|
+
admonition_start = None
|
|
759
|
+
admonition_type = None
|
|
760
|
+
admonition_title = None
|
|
761
|
+
admonition_content: list[str] = []
|
|
762
|
+
result_lines = []
|
|
763
|
+
|
|
764
|
+
i = 0
|
|
765
|
+
while i < len(lines):
|
|
766
|
+
line = lines[i]
|
|
767
|
+
|
|
768
|
+
# Check for admonition start
|
|
769
|
+
if line.strip().startswith(":::") and admonition_start is None:
|
|
770
|
+
admonition_start = i
|
|
771
|
+
# Extract admonition type and optional title
|
|
772
|
+
match = re.match(r":::(\w+)(?:\s+(.+))?", line.strip())
|
|
773
|
+
if match:
|
|
774
|
+
admonition_type = match.group(1)
|
|
775
|
+
admonition_title = match.group(2) if match.group(2) else ""
|
|
776
|
+
else:
|
|
777
|
+
# No match for admonition type means we couldn't parse the format
|
|
778
|
+
admonition_type = None
|
|
779
|
+
i += 1
|
|
780
|
+
continue
|
|
781
|
+
|
|
782
|
+
# Check for admonition end
|
|
783
|
+
elif line.strip() == ":::" and admonition_start is not None:
|
|
784
|
+
# If admonition_type is None, preserve the original content
|
|
785
|
+
if admonition_type is None:
|
|
786
|
+
# Add back the original admonition block without transformation
|
|
787
|
+
original_lines = []
|
|
788
|
+
original_lines.append(lines[admonition_start]) # Opening :::
|
|
789
|
+
original_lines.extend(admonition_content) # Content
|
|
790
|
+
original_lines.append(line) # Closing :::
|
|
791
|
+
result_lines.extend(original_lines)
|
|
792
|
+
else:
|
|
793
|
+
# Process as before for valid admonition types
|
|
794
|
+
# Map the admonition type
|
|
795
|
+
if admonition_type in tag_mappings:
|
|
796
|
+
mapped_type = tag_mappings[admonition_type]
|
|
797
|
+
else:
|
|
798
|
+
# Try case-insensitive match
|
|
799
|
+
for tag, mapped in tag_mappings.items():
|
|
800
|
+
if tag.lower() == admonition_type.lower():
|
|
801
|
+
mapped_type = mapped
|
|
802
|
+
break
|
|
803
|
+
else:
|
|
804
|
+
# Default to lowercase of original if no mapping found
|
|
805
|
+
mapped_type = admonition_type.lower()
|
|
806
|
+
|
|
807
|
+
# Process indentation
|
|
808
|
+
if admonition_content:
|
|
809
|
+
# Find minimum common indentation
|
|
810
|
+
non_empty_lines = [line for line in admonition_content if line.strip()]
|
|
811
|
+
min_indent = min((len(line) - len(line.lstrip()) for line in non_empty_lines), default=0)
|
|
812
|
+
|
|
813
|
+
# Remove common indentation and add 4-space indent
|
|
814
|
+
processed_content = []
|
|
815
|
+
for line in admonition_content:
|
|
816
|
+
if line.strip():
|
|
817
|
+
if len(line) >= min_indent:
|
|
818
|
+
processed_content.append(" " + line[min_indent:])
|
|
819
|
+
else:
|
|
820
|
+
processed_content.append(" " + line.lstrip())
|
|
821
|
+
else:
|
|
822
|
+
processed_content.append("")
|
|
823
|
+
else:
|
|
824
|
+
processed_content = []
|
|
825
|
+
|
|
826
|
+
# Create the MkDocs admonition
|
|
827
|
+
if admonition_title:
|
|
828
|
+
mkdocs_admonition = [f'!!! {mapped_type} "{admonition_title}"'] + processed_content
|
|
829
|
+
else:
|
|
830
|
+
mkdocs_admonition = [f"!!! {mapped_type}"] + processed_content
|
|
831
|
+
|
|
832
|
+
# Add the processed admonition
|
|
833
|
+
result_lines.extend(mkdocs_admonition)
|
|
834
|
+
|
|
835
|
+
# Reset admonition tracking
|
|
836
|
+
admonition_start = None
|
|
837
|
+
admonition_type = None
|
|
838
|
+
admonition_title = None
|
|
839
|
+
admonition_content = []
|
|
840
|
+
i += 1
|
|
841
|
+
continue
|
|
842
|
+
|
|
843
|
+
elif admonition_start is not None:
|
|
844
|
+
admonition_content.append(line)
|
|
845
|
+
i += 1
|
|
846
|
+
continue
|
|
847
|
+
|
|
848
|
+
else:
|
|
849
|
+
result_lines.append(line)
|
|
850
|
+
i += 1
|
|
851
|
+
|
|
852
|
+
if admonition_start is not None:
|
|
853
|
+
for j in range(admonition_start, len(lines)):
|
|
854
|
+
result_lines.append(lines[j])
|
|
855
|
+
|
|
856
|
+
return "\n".join(result_lines)
|
|
857
|
+
|
|
858
|
+
|
|
859
|
+
def remove_mdx_code_blocks(content: str) -> str:
|
|
860
|
+
"""Remove ````mdx-code-block and ```` markers from the content.
|
|
861
|
+
|
|
862
|
+
This function removes the mdx-code-block markers while preserving the content inside.
|
|
863
|
+
|
|
864
|
+
Args:
|
|
865
|
+
content: String containing mdx-code-block markers
|
|
866
|
+
|
|
867
|
+
Returns:
|
|
868
|
+
String with mdx-code-block markers removed
|
|
869
|
+
"""
|
|
870
|
+
# Pattern to match mdx-code-block sections
|
|
871
|
+
# Captures everything between ````mdx-code-block and ````
|
|
872
|
+
pattern = re.compile(r"````mdx-code-block\n(.*?)\n````", re.DOTALL)
|
|
873
|
+
|
|
874
|
+
# Replace with just the content (group 1)
|
|
875
|
+
result = pattern.sub(r"\1", content)
|
|
876
|
+
|
|
877
|
+
return result
|
|
878
|
+
|
|
879
|
+
|
|
880
|
+
def remove_quarto_raw_html_wrappers(content: str) -> str:
|
|
881
|
+
"""Remove Quarto JavaScript wrappers around raw HTML content.
|
|
882
|
+
|
|
883
|
+
Quarto wraps raw HTML in JavaScript like:
|
|
884
|
+
export const quartoRawHtml = [`<table>...</table>`];
|
|
885
|
+
And then references it with:
|
|
886
|
+
<div dangerouslySetInnerHTML={{ __html: quartoRawHtml[0] }} />
|
|
887
|
+
|
|
888
|
+
This function extracts the raw HTML and removes the JavaScript wrapper.
|
|
889
|
+
|
|
890
|
+
Args:
|
|
891
|
+
content: String containing Quarto raw HTML wrappers
|
|
892
|
+
|
|
893
|
+
Returns:
|
|
894
|
+
String with raw HTML extracted and JavaScript wrappers removed
|
|
895
|
+
"""
|
|
896
|
+
# Pattern to match the quartoRawHtml declaration and extract the HTML content
|
|
897
|
+
# Matches: export const quartoRawHtml\s*=\s*\[\s*`(.*?)`\s*\];
|
|
898
|
+
declaration_pattern = re.compile(r"export\s+const\s+quartoRawHtml\s*=\s*\[\s*`(.*?)`\s*\];", re.DOTALL)
|
|
899
|
+
|
|
900
|
+
# Find all quartoRawHtml declarations and store the HTML content
|
|
901
|
+
html_blocks = []
|
|
902
|
+
for match in declaration_pattern.finditer(content):
|
|
903
|
+
html_blocks.append(match.group(1))
|
|
904
|
+
|
|
905
|
+
# Remove the declarations
|
|
906
|
+
content = declaration_pattern.sub("", content)
|
|
907
|
+
|
|
908
|
+
# Pattern to match the dangerouslySetInnerHTML usage
|
|
909
|
+
# Matches: <div dangerouslySetInnerHTML={{ __html: quartoRawHtml[0] }} />
|
|
910
|
+
# Or: <div dangerouslySetInnerHTML={{ __html: quartoRawHtml[N] }} />
|
|
911
|
+
usage_pattern = re.compile(r"<div\s+dangerouslySetInnerHTML=\{\{\s*__html:\s*quartoRawHtml\[(\d+)\]\s*\}\}\s*/>")
|
|
912
|
+
|
|
913
|
+
# Replace usage with the actual HTML content
|
|
914
|
+
def replace_usage(match: re.Match[str]) -> str:
|
|
915
|
+
index = int(match.group(1))
|
|
916
|
+
if index < len(html_blocks):
|
|
917
|
+
return html_blocks[index]
|
|
918
|
+
return match.group(0) # Return original if index out of bounds
|
|
919
|
+
|
|
920
|
+
content = usage_pattern.sub(replace_usage, content)
|
|
921
|
+
|
|
922
|
+
return content
|
|
923
|
+
|
|
924
|
+
|
|
925
|
+
@require_optional_import("yaml", "docs")
|
|
926
|
+
def post_process_func(
|
|
927
|
+
rendered_mdx: Path,
|
|
928
|
+
source_notebooks: Path,
|
|
929
|
+
front_matter: dict[str, str | list[str] | None],
|
|
930
|
+
website_build_directory: Path,
|
|
931
|
+
) -> None:
|
|
932
|
+
with open(rendered_mdx, encoding="utf-8") as f:
|
|
933
|
+
content = f.read()
|
|
934
|
+
|
|
935
|
+
# If there is front matter in the mdx file, we need to remove it
|
|
936
|
+
if content.startswith("---"):
|
|
937
|
+
front_matter_end = content.find("---", 3)
|
|
938
|
+
mdx_front_matter = yaml.safe_load(content[4:front_matter_end])
|
|
939
|
+
# Merge while preserving original values
|
|
940
|
+
front_matter = {**front_matter, **mdx_front_matter}
|
|
941
|
+
content = content[front_matter_end + 3 :]
|
|
942
|
+
|
|
943
|
+
# Clean heading IDs using regex - matches from # to the end of ID block
|
|
944
|
+
content = re.sub(r"(#{1,6}[^{]+){#[^}]+}", r"\1", content)
|
|
945
|
+
|
|
946
|
+
# Each intermediate path needs to be resolved for this to work reliably
|
|
947
|
+
repo_root = Path(__file__).resolve().parents[2]
|
|
948
|
+
repo_relative_notebook = source_notebooks.resolve().relative_to(repo_root)
|
|
949
|
+
front_matter["source_notebook"] = f"/{repo_relative_notebook}"
|
|
950
|
+
front_matter["custom_edit_url"] = f"https://github.com/ag2ai/ag2/edit/main/{repo_relative_notebook}"
|
|
951
|
+
|
|
952
|
+
github_link = f"https://github.com/ag2ai/ag2/blob/main/{repo_relative_notebook}"
|
|
953
|
+
content = (
|
|
954
|
+
f'\n<a href="{github_link}" class="github-badge" target="_blank">'
|
|
955
|
+
+ """<img noZoom src="https://img.shields.io/badge/Open%20on%20GitHub-grey?logo=github" alt="Open on GitHub" />"""
|
|
956
|
+
+ "</a>"
|
|
957
|
+
+ content
|
|
958
|
+
)
|
|
959
|
+
|
|
960
|
+
# If no colab link is present, insert one
|
|
961
|
+
if "colab-badge.svg" not in content:
|
|
962
|
+
colab_link = f"https://colab.research.google.com/github/ag2ai/ag2/blob/main/{repo_relative_notebook}"
|
|
963
|
+
content = (
|
|
964
|
+
f'\n<a href="{colab_link}" class="colab-badge" target="_blank">'
|
|
965
|
+
+ """<img noZoom src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" />"""
|
|
966
|
+
+ "</a>"
|
|
967
|
+
+ content
|
|
968
|
+
)
|
|
969
|
+
|
|
970
|
+
# Create the front matter metadata js file for examples by notebook section
|
|
971
|
+
add_front_matter_to_metadata_yml(front_matter, website_build_directory, rendered_mdx)
|
|
972
|
+
|
|
973
|
+
# Dump front_matter to yaml
|
|
974
|
+
front_matter_str = yaml.dump(front_matter, default_flow_style=False)
|
|
975
|
+
|
|
976
|
+
# Add render_macros: false to the front matter
|
|
977
|
+
front_matter_str += "render_macros: false\n"
|
|
978
|
+
|
|
979
|
+
# transform content for mkdocs
|
|
980
|
+
rel_path = f"/{rendered_mdx.relative_to(website_build_directory.parents[0])}"
|
|
981
|
+
content = transform_content_for_mkdocs(content, rel_path)
|
|
982
|
+
|
|
983
|
+
# Convert mdx image syntax to mintlify image syntax
|
|
984
|
+
# content = convert_mdx_image_blocks(content, rendered_mdx, website_build_directory)
|
|
985
|
+
|
|
986
|
+
# ensure editUrl is present
|
|
987
|
+
# content = ensure_edit_url(content, repo_relative_notebook)
|
|
988
|
+
|
|
989
|
+
# Remove admonition blocks
|
|
990
|
+
content = transform_admonition_blocks(content)
|
|
991
|
+
|
|
992
|
+
# Remove mdx-code-block markers
|
|
993
|
+
content = remove_mdx_code_blocks(content)
|
|
994
|
+
|
|
995
|
+
# Remove Quarto raw HTML JavaScript wrappers
|
|
996
|
+
content = remove_quarto_raw_html_wrappers(content)
|
|
997
|
+
|
|
998
|
+
# Generate the page title
|
|
999
|
+
page_header = front_matter.get("title")
|
|
1000
|
+
page_title = f"# {page_header}\n\n" if page_header else ""
|
|
1001
|
+
|
|
1002
|
+
# Rewrite the content as
|
|
1003
|
+
# ---
|
|
1004
|
+
# front_matter_str
|
|
1005
|
+
# ---
|
|
1006
|
+
# content
|
|
1007
|
+
new_content = f"---\n{front_matter_str}---\n\n{page_title}\n{content}"
|
|
1008
|
+
|
|
1009
|
+
# Change the file extension to .md
|
|
1010
|
+
rendered_md = rendered_mdx.with_suffix(".md")
|
|
1011
|
+
|
|
1012
|
+
with open(rendered_md, "w", encoding="utf-8") as f:
|
|
1013
|
+
f.write(new_content)
|
|
1014
|
+
|
|
1015
|
+
# Optionally, remove the original .mdx file
|
|
1016
|
+
rendered_mdx.unlink()
|
|
1017
|
+
|
|
1018
|
+
|
|
1019
|
+
def target_dir_func(website_build_directory: Path) -> Path:
|
|
1020
|
+
"""Return the target directory for notebooks."""
|
|
1021
|
+
return website_build_directory / "use-cases" / "notebooks" / "notebooks"
|
|
1022
|
+
|
|
1023
|
+
|
|
1024
|
+
def inject_gallery_html(notebooks_md_path: Path, metadata_yml_path: Path) -> None:
|
|
1025
|
+
"""Generate the index.html file for the notebooks section."""
|
|
1026
|
+
with open(notebooks_md_path, encoding="utf-8") as f:
|
|
1027
|
+
content = f.read()
|
|
1028
|
+
|
|
1029
|
+
gallery_html = render_gallery_html(metadata_yml_path)
|
|
1030
|
+
|
|
1031
|
+
updated_content = content.replace("{{ render_gallery(gallery_items) }}", gallery_html)
|
|
1032
|
+
with open(notebooks_md_path, "w", encoding="utf-8") as f:
|
|
1033
|
+
f.write(updated_content)
|
|
1034
|
+
|
|
1035
|
+
|
|
1036
|
+
@require_optional_import("yaml", "docs")
|
|
1037
|
+
def add_notebooks_nav(mkdocs_nav_path: Path, metadata_yml_path: Path) -> None:
|
|
1038
|
+
"""Add notebooks navigation to the summary markdown file.
|
|
1039
|
+
|
|
1040
|
+
Args:
|
|
1041
|
+
mkdocs_nav_path: Path to the mkdocs navigation template file
|
|
1042
|
+
metadata_yml_path: Path to the notebooks metadata YAML file
|
|
1043
|
+
"""
|
|
1044
|
+
# Read the metadata file to get notebook items
|
|
1045
|
+
with open(metadata_yml_path) as file:
|
|
1046
|
+
items = yaml.safe_load(file)
|
|
1047
|
+
|
|
1048
|
+
# Create navigation list entries for each notebook
|
|
1049
|
+
nav_list = []
|
|
1050
|
+
for item in items:
|
|
1051
|
+
_link = item["link"][1:] if item["link"].startswith("/") else item["link"]
|
|
1052
|
+
nav_list.append(f" - [{item['title']}]({_link})\n")
|
|
1053
|
+
|
|
1054
|
+
# Read the summary file
|
|
1055
|
+
with open(mkdocs_nav_path) as file:
|
|
1056
|
+
lines = file.readlines()
|
|
1057
|
+
|
|
1058
|
+
# Find where to insert the notebook entries
|
|
1059
|
+
for i, line in enumerate(lines):
|
|
1060
|
+
if line.strip() == "- [All Notebooks](docs/use-cases/notebooks/Notebooks.md)":
|
|
1061
|
+
# Insert all notebook items after the Notebooks line
|
|
1062
|
+
# No need to insert extra blank lines, just the notebook entries
|
|
1063
|
+
for j, nav_item in enumerate(nav_list):
|
|
1064
|
+
lines.insert(i + 1 + j, nav_item)
|
|
1065
|
+
break
|
|
1066
|
+
|
|
1067
|
+
# Write the updated content back to the summary file
|
|
1068
|
+
with open(mkdocs_nav_path, "w") as file:
|
|
1069
|
+
file.writelines(lines)
|
|
1070
|
+
|
|
1071
|
+
|
|
1072
|
+
def _generate_navigation_entries(dir_path: Path, mkdocs_output_dir: Path) -> list[str]:
|
|
1073
|
+
"""Generate navigation entries for user stories and community talks.
|
|
1074
|
+
|
|
1075
|
+
Args:
|
|
1076
|
+
dir_path (Path): Path to the directory containing user stories or community talks.
|
|
1077
|
+
mkdocs_output_dir (Path): Path to the MkDocs output directory.
|
|
1078
|
+
|
|
1079
|
+
Returns:
|
|
1080
|
+
str: Formatted navigation entries.
|
|
1081
|
+
"""
|
|
1082
|
+
# Read all user story files and sort them by date (newest first)
|
|
1083
|
+
files = sorted(dir_path.glob("**/*.md"), key=sort_files_by_date, reverse=True)
|
|
1084
|
+
|
|
1085
|
+
# Prepare user stories navigation entries
|
|
1086
|
+
entries = []
|
|
1087
|
+
for file in files:
|
|
1088
|
+
# Extract the title from the frontmatter using a simpler split approach
|
|
1089
|
+
content = file.read_text()
|
|
1090
|
+
|
|
1091
|
+
# Split content at the "---" markers
|
|
1092
|
+
parts = content.split("---", 2)
|
|
1093
|
+
if len(parts) < 3:
|
|
1094
|
+
# No valid frontmatter found, use directory name as title
|
|
1095
|
+
title = file.parent.name
|
|
1096
|
+
else:
|
|
1097
|
+
# Parse the frontmatter
|
|
1098
|
+
frontmatter_text = parts[1].strip()
|
|
1099
|
+
frontmatter = yaml.safe_load(frontmatter_text)
|
|
1100
|
+
title = frontmatter.get("title", file.parent.name)
|
|
1101
|
+
|
|
1102
|
+
# Generate relative path from the docs root directory
|
|
1103
|
+
relative_path = file.parent.relative_to(mkdocs_output_dir)
|
|
1104
|
+
path_for_link = str(relative_path).replace("\\", "/")
|
|
1105
|
+
|
|
1106
|
+
# Format navigation entry
|
|
1107
|
+
entries.append(f" - [{title}]({path_for_link}/{file.name})")
|
|
1108
|
+
|
|
1109
|
+
return entries
|
|
1110
|
+
|
|
1111
|
+
|
|
1112
|
+
def generate_community_insights_nav(mkdocs_output_dir: Path, mkdocs_nav_path: Path) -> None:
|
|
1113
|
+
user_stories_dir = mkdocs_output_dir / "docs" / "user-stories"
|
|
1114
|
+
community_talks_dir = mkdocs_output_dir / "docs" / "community-talks"
|
|
1115
|
+
|
|
1116
|
+
user_stories_entries = _generate_navigation_entries(user_stories_dir, mkdocs_output_dir)
|
|
1117
|
+
community_talks_entries = _generate_navigation_entries(community_talks_dir, mkdocs_output_dir)
|
|
1118
|
+
|
|
1119
|
+
user_stories_nav = " - User Stories\n" + "\n".join(user_stories_entries)
|
|
1120
|
+
community_talks_nav = " - Community Talks\n" + "\n".join(community_talks_entries)
|
|
1121
|
+
community_insights_nav = "- Community Insights\n" + user_stories_nav + "\n" + community_talks_nav
|
|
1122
|
+
|
|
1123
|
+
# Read existing navigation template
|
|
1124
|
+
nav_content = mkdocs_nav_path.read_text()
|
|
1125
|
+
|
|
1126
|
+
section_to_follow_marker = "- Blog"
|
|
1127
|
+
|
|
1128
|
+
replacement_content = f"{community_insights_nav}\n{section_to_follow_marker}"
|
|
1129
|
+
updated_nav_content = nav_content.replace(section_to_follow_marker, replacement_content)
|
|
1130
|
+
|
|
1131
|
+
# Write updated navigation to file
|
|
1132
|
+
mkdocs_nav_path.write_text(updated_nav_content)
|
|
1133
|
+
|
|
1134
|
+
|
|
1135
|
+
def add_authors_info_to_user_stories(website_dir: Path) -> None:
|
|
1136
|
+
mkdocs_output_dir = website_dir / "mkdocs" / "docs" / "docs"
|
|
1137
|
+
user_stories_dir = mkdocs_output_dir / "user-stories"
|
|
1138
|
+
authors_yml = website_dir / "blogs_and_user_stories_authors.yml"
|
|
1139
|
+
|
|
1140
|
+
all_authors_info = get_authors_info(authors_yml)
|
|
1141
|
+
|
|
1142
|
+
add_authors_and_social_preview(website_dir, user_stories_dir, all_authors_info, "mkdocs")
|
|
1143
|
+
|
|
1144
|
+
for file_path in user_stories_dir.glob("**/*.md"):
|
|
1145
|
+
content = file_path.read_text(encoding="utf-8")
|
|
1146
|
+
rel_path = f"/{file_path.relative_to(mkdocs_output_dir.parents[0])}"
|
|
1147
|
+
updated_content = transform_content_for_mkdocs(content, rel_path)
|
|
1148
|
+
file_path.write_text(updated_content, encoding="utf-8")
|
|
1149
|
+
|
|
1150
|
+
|
|
1151
|
+
def main(force: bool) -> None:
|
|
1152
|
+
parser = create_base_argument_parser()
|
|
1153
|
+
args = parser.parse_args(["render"])
|
|
1154
|
+
args.dry_run = False
|
|
1155
|
+
args.quarto_bin = "quarto"
|
|
1156
|
+
args.notebooks = None
|
|
1157
|
+
|
|
1158
|
+
# check if args.force is set
|
|
1159
|
+
if force and mkdocs_output_dir.exists():
|
|
1160
|
+
shutil.rmtree(mkdocs_output_dir)
|
|
1161
|
+
|
|
1162
|
+
exclusion_list = [
|
|
1163
|
+
"docs/.gitignore",
|
|
1164
|
+
"docs/installation",
|
|
1165
|
+
"docs/user-guide/getting-started",
|
|
1166
|
+
"docs/user-guide/models/litellm-with-watsonx.md",
|
|
1167
|
+
"docs/contributor-guide/Migration-Guide.md",
|
|
1168
|
+
]
|
|
1169
|
+
nav_exclusions = [""]
|
|
1170
|
+
|
|
1171
|
+
files_to_copy = get_git_tracked_and_untracked_files_in_directory(mint_docs_dir)
|
|
1172
|
+
filtered_files = filter_excluded_files(files_to_copy, exclusion_list, website_dir)
|
|
1173
|
+
|
|
1174
|
+
# Copy snippet files
|
|
1175
|
+
snippet_files = get_git_tracked_and_untracked_files_in_directory(website_dir / "snippets")
|
|
1176
|
+
copy_files(website_dir / "snippets", mkdocs_output_dir.parent / "snippets", snippet_files)
|
|
1177
|
+
|
|
1178
|
+
copy_assets(website_dir)
|
|
1179
|
+
process_and_copy_files(mint_docs_dir, mkdocs_output_dir, filtered_files)
|
|
1180
|
+
|
|
1181
|
+
snippets_dir_path = website_dir / "snippets"
|
|
1182
|
+
authors_yml_path = website_dir / "blogs_and_user_stories_authors.yml"
|
|
1183
|
+
|
|
1184
|
+
process_blog_files(mkdocs_output_dir, authors_yml_path, snippets_dir_path)
|
|
1185
|
+
generate_mkdocs_navigation(website_dir, mkdocs_root_dir, nav_exclusions)
|
|
1186
|
+
|
|
1187
|
+
if args.website_build_directory is None:
|
|
1188
|
+
args.website_build_directory = mkdocs_output_dir
|
|
1189
|
+
|
|
1190
|
+
if args.notebook_directory is None:
|
|
1191
|
+
args.notebook_directory = mkdocs_root_dir / "../../notebook"
|
|
1192
|
+
|
|
1193
|
+
metadata_yml_path = Path(args.website_build_directory) / "../../data/notebooks_metadata.yml"
|
|
1194
|
+
|
|
1195
|
+
if not metadata_yml_path.exists() or (force and mkdocs_output_dir.exists()):
|
|
1196
|
+
process_notebooks_core(args, post_process_func, target_dir_func)
|
|
1197
|
+
|
|
1198
|
+
# Render Notebooks Gallery HTML
|
|
1199
|
+
notebooks_md_path = mkdocs_output_dir / "use-cases" / "notebooks" / "Notebooks.md"
|
|
1200
|
+
inject_gallery_html(notebooks_md_path, metadata_yml_path)
|
|
1201
|
+
|
|
1202
|
+
# Add Notebooks Navigation to Summary.md
|
|
1203
|
+
mkdocs_nav_path = mkdocs_root_dir / "docs" / "navigation_template.txt"
|
|
1204
|
+
add_notebooks_nav(mkdocs_nav_path, metadata_yml_path)
|
|
1205
|
+
|
|
1206
|
+
# Render Community Gallery HTML
|
|
1207
|
+
community_md_path = mkdocs_output_dir / "use-cases" / "community-gallery" / "community-gallery.md"
|
|
1208
|
+
metadata_yml_path = Path(args.website_build_directory) / "../../data/gallery_items.yml"
|
|
1209
|
+
inject_gallery_html(community_md_path, metadata_yml_path)
|
|
1210
|
+
|
|
1211
|
+
# Generate Navigation for User Stories
|
|
1212
|
+
docs_dir = mkdocs_root_dir / "docs"
|
|
1213
|
+
generate_community_insights_nav(docs_dir, mkdocs_nav_path)
|
|
1214
|
+
|
|
1215
|
+
# Add Authors info to User Stories
|
|
1216
|
+
add_authors_info_to_user_stories(website_dir)
|