attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,1426 @@
|
|
|
1
|
+
"""Document Generation Workflow.
|
|
2
|
+
|
|
3
|
+
Main workflow orchestration for documentation generation.
|
|
4
|
+
|
|
5
|
+
Copyright 2025 Smart-AI-Memory
|
|
6
|
+
Licensed under Fair Source License 0.9
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
from attune.config import _validate_file_path
|
|
15
|
+
|
|
16
|
+
from ..base import BaseWorkflow, ModelTier
|
|
17
|
+
from .config import DOC_GEN_STEPS, TOKEN_COSTS
|
|
18
|
+
from .report_formatter import format_doc_gen_report
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class DocumentGenerationWorkflow(BaseWorkflow):
|
|
24
|
+
"""Multi-tier document generation workflow.
|
|
25
|
+
|
|
26
|
+
Uses cheap models for outlining, capable models for content
|
|
27
|
+
generation, and premium models for final polish and consistency
|
|
28
|
+
review.
|
|
29
|
+
|
|
30
|
+
Usage:
|
|
31
|
+
workflow = DocumentGenerationWorkflow()
|
|
32
|
+
result = await workflow.execute(
|
|
33
|
+
source_code="...",
|
|
34
|
+
doc_type="api_reference",
|
|
35
|
+
audience="developers"
|
|
36
|
+
)
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
name = "doc-gen"
|
|
40
|
+
description = "Cost-optimized documentation generation pipeline"
|
|
41
|
+
stages = ["outline", "write", "polish"]
|
|
42
|
+
tier_map = {
|
|
43
|
+
"outline": ModelTier.CHEAP,
|
|
44
|
+
"write": ModelTier.CAPABLE,
|
|
45
|
+
"polish": ModelTier.PREMIUM,
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
skip_polish_threshold: int = 1000,
|
|
51
|
+
max_sections: int = 10,
|
|
52
|
+
max_write_tokens: int | None = None, # Auto-scaled if None
|
|
53
|
+
section_focus: list[str] | None = None,
|
|
54
|
+
chunked_generation: bool = True,
|
|
55
|
+
sections_per_chunk: int = 3,
|
|
56
|
+
max_cost: float = 5.0, # Cost guardrail in USD
|
|
57
|
+
cost_warning_threshold: float = 0.8, # Warn at 80% of max_cost
|
|
58
|
+
graceful_degradation: bool = True, # Return partial results on error
|
|
59
|
+
export_path: str | Path | None = None, # Export docs to file (e.g., "docs/generated")
|
|
60
|
+
max_display_chars: int = 45000, # Max chars before chunking output
|
|
61
|
+
enable_auth_strategy: bool = True, # Enable intelligent auth routing
|
|
62
|
+
**kwargs: Any,
|
|
63
|
+
):
|
|
64
|
+
"""Initialize workflow with enterprise-safe defaults.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
skip_polish_threshold: Skip premium polish for docs under this
|
|
68
|
+
token count (they're already good enough).
|
|
69
|
+
max_sections: Maximum number of sections to generate.
|
|
70
|
+
max_write_tokens: Maximum tokens for content generation.
|
|
71
|
+
If None, auto-scales based on section count (recommended).
|
|
72
|
+
section_focus: Optional list of specific sections to generate
|
|
73
|
+
(e.g., ["Testing Guide", "API Reference"]).
|
|
74
|
+
chunked_generation: If True, generates large docs in chunks to avoid
|
|
75
|
+
truncation (default True).
|
|
76
|
+
sections_per_chunk: Number of sections to generate per chunk (default 3).
|
|
77
|
+
max_cost: Maximum cost in USD before stopping (default $5).
|
|
78
|
+
Set to 0 to disable cost limits.
|
|
79
|
+
cost_warning_threshold: Percentage of max_cost to trigger warning (default 0.8).
|
|
80
|
+
graceful_degradation: If True, return partial results on errors
|
|
81
|
+
instead of failing completely (default True).
|
|
82
|
+
export_path: Optional directory to export generated docs (e.g., "docs/generated").
|
|
83
|
+
If provided, documentation will be saved to a file automatically.
|
|
84
|
+
max_display_chars: Maximum characters before splitting output into chunks
|
|
85
|
+
for display (default 45000). Helps avoid terminal/UI truncation.
|
|
86
|
+
enable_auth_strategy: If True, use intelligent subscription vs API routing
|
|
87
|
+
based on module size (default True).
|
|
88
|
+
|
|
89
|
+
"""
|
|
90
|
+
super().__init__(**kwargs)
|
|
91
|
+
self.skip_polish_threshold = skip_polish_threshold
|
|
92
|
+
self.max_sections = max_sections
|
|
93
|
+
self._user_max_write_tokens = max_write_tokens # Store user preference
|
|
94
|
+
self.max_write_tokens = max_write_tokens or 16000 # Will be auto-scaled
|
|
95
|
+
self.section_focus = section_focus
|
|
96
|
+
self.chunked_generation = chunked_generation
|
|
97
|
+
self.sections_per_chunk = sections_per_chunk
|
|
98
|
+
self.max_cost = max_cost
|
|
99
|
+
self.cost_warning_threshold = cost_warning_threshold
|
|
100
|
+
self.graceful_degradation = graceful_degradation
|
|
101
|
+
self.export_path = Path(export_path) if export_path else None
|
|
102
|
+
self.max_display_chars = max_display_chars
|
|
103
|
+
self.enable_auth_strategy = enable_auth_strategy
|
|
104
|
+
self._total_content_tokens: int = 0
|
|
105
|
+
self._accumulated_cost: float = 0.0
|
|
106
|
+
self._cost_warning_issued: bool = False
|
|
107
|
+
self._partial_results: dict = {}
|
|
108
|
+
self._auth_mode_used: str | None = None # Track which auth was recommended
|
|
109
|
+
|
|
110
|
+
def _estimate_cost(self, tier: ModelTier, input_tokens: int, output_tokens: int) -> float:
|
|
111
|
+
"""Estimate cost for a given tier and token counts."""
|
|
112
|
+
costs = TOKEN_COSTS.get(tier, TOKEN_COSTS[ModelTier.CAPABLE])
|
|
113
|
+
input_cost = (input_tokens / 1000) * costs["input"]
|
|
114
|
+
output_cost = (output_tokens / 1000) * costs["output"]
|
|
115
|
+
return input_cost + output_cost
|
|
116
|
+
|
|
117
|
+
def _track_cost(
|
|
118
|
+
self,
|
|
119
|
+
tier: ModelTier,
|
|
120
|
+
input_tokens: int,
|
|
121
|
+
output_tokens: int,
|
|
122
|
+
) -> tuple[float, bool]:
|
|
123
|
+
"""Track accumulated cost and check against limits.
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
Tuple of (cost_for_this_call, should_stop)
|
|
127
|
+
|
|
128
|
+
"""
|
|
129
|
+
cost = self._estimate_cost(tier, input_tokens, output_tokens)
|
|
130
|
+
self._accumulated_cost += cost
|
|
131
|
+
|
|
132
|
+
# Check warning threshold
|
|
133
|
+
if (
|
|
134
|
+
self.max_cost > 0
|
|
135
|
+
and not self._cost_warning_issued
|
|
136
|
+
and self._accumulated_cost >= self.max_cost * self.cost_warning_threshold
|
|
137
|
+
):
|
|
138
|
+
self._cost_warning_issued = True
|
|
139
|
+
logger.warning(
|
|
140
|
+
f"Doc-gen cost approaching limit: ${self._accumulated_cost:.2f} "
|
|
141
|
+
f"of ${self.max_cost:.2f} ({self.cost_warning_threshold * 100:.0f}% threshold)",
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# Check if we should stop
|
|
145
|
+
should_stop = self.max_cost > 0 and self._accumulated_cost >= self.max_cost
|
|
146
|
+
if should_stop:
|
|
147
|
+
logger.warning(
|
|
148
|
+
f"Doc-gen cost limit reached: ${self._accumulated_cost:.2f} >= ${self.max_cost:.2f}",
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
return cost, should_stop
|
|
152
|
+
|
|
153
|
+
def _auto_scale_tokens(self, section_count: int) -> int:
|
|
154
|
+
"""Auto-scale max_write_tokens based on section count.
|
|
155
|
+
|
|
156
|
+
Enterprise projects may have 20+ sections requiring more tokens.
|
|
157
|
+
"""
|
|
158
|
+
if self._user_max_write_tokens is not None:
|
|
159
|
+
return self._user_max_write_tokens # User override
|
|
160
|
+
|
|
161
|
+
# Base: 2000 tokens per section, minimum 16000, maximum 64000
|
|
162
|
+
scaled = max(16000, min(64000, section_count * 2000))
|
|
163
|
+
logger.info(f"Auto-scaled max_write_tokens to {scaled} for {section_count} sections")
|
|
164
|
+
return scaled
|
|
165
|
+
|
|
166
|
+
def _export_document(
|
|
167
|
+
self,
|
|
168
|
+
document: str,
|
|
169
|
+
doc_type: str,
|
|
170
|
+
report: str | None = None,
|
|
171
|
+
) -> tuple[Path | None, Path | None]:
|
|
172
|
+
"""Export generated documentation to file.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
document: The generated documentation content
|
|
176
|
+
doc_type: Document type for naming
|
|
177
|
+
report: Optional report to save alongside document
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
Tuple of (doc_path, report_path) or (None, None) if export disabled
|
|
181
|
+
|
|
182
|
+
"""
|
|
183
|
+
if not self.export_path:
|
|
184
|
+
return None, None
|
|
185
|
+
|
|
186
|
+
# Create export directory
|
|
187
|
+
self.export_path.mkdir(parents=True, exist_ok=True)
|
|
188
|
+
|
|
189
|
+
# Generate filename with timestamp
|
|
190
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
191
|
+
safe_doc_type = doc_type.replace(" ", "_").replace("/", "-").lower()
|
|
192
|
+
doc_filename = f"{safe_doc_type}_{timestamp}.md"
|
|
193
|
+
report_filename = f"{safe_doc_type}_{timestamp}_report.txt"
|
|
194
|
+
|
|
195
|
+
doc_path = self.export_path / doc_filename
|
|
196
|
+
report_path = self.export_path / report_filename if report else None
|
|
197
|
+
|
|
198
|
+
# Write document
|
|
199
|
+
try:
|
|
200
|
+
validated_doc_path = _validate_file_path(str(doc_path))
|
|
201
|
+
validated_doc_path.write_text(document, encoding="utf-8")
|
|
202
|
+
logger.info(f"Documentation exported to: {validated_doc_path}")
|
|
203
|
+
|
|
204
|
+
# Write report if provided
|
|
205
|
+
if report and report_path:
|
|
206
|
+
validated_report_path = _validate_file_path(str(report_path))
|
|
207
|
+
validated_report_path.write_text(report, encoding="utf-8")
|
|
208
|
+
logger.info(f"Report exported to: {validated_report_path}")
|
|
209
|
+
|
|
210
|
+
return validated_doc_path, validated_report_path if report else None
|
|
211
|
+
except (OSError, ValueError) as e:
|
|
212
|
+
logger.error(f"Failed to export documentation: {e}")
|
|
213
|
+
return None, None
|
|
214
|
+
|
|
215
|
+
def _chunk_output_for_display(self, content: str, chunk_prefix: str = "PART") -> list[str]:
|
|
216
|
+
"""Split large output into displayable chunks.
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
content: The content to chunk
|
|
220
|
+
chunk_prefix: Prefix for chunk headers
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
List of content chunks, each under max_display_chars
|
|
224
|
+
|
|
225
|
+
"""
|
|
226
|
+
if len(content) <= self.max_display_chars:
|
|
227
|
+
return [content]
|
|
228
|
+
|
|
229
|
+
chunks = []
|
|
230
|
+
# Try to split on section boundaries (## headers)
|
|
231
|
+
import re
|
|
232
|
+
|
|
233
|
+
sections = re.split(r"(?=^## )", content, flags=re.MULTILINE)
|
|
234
|
+
|
|
235
|
+
current_chunk = ""
|
|
236
|
+
chunk_num = 1
|
|
237
|
+
|
|
238
|
+
for section in sections:
|
|
239
|
+
# If adding this section would exceed limit, save current chunk
|
|
240
|
+
if current_chunk and len(current_chunk) + len(section) > self.max_display_chars:
|
|
241
|
+
chunks.append(
|
|
242
|
+
f"{'=' * 60}\n{chunk_prefix} {chunk_num} of {{total}}\n{'=' * 60}\n\n"
|
|
243
|
+
+ current_chunk,
|
|
244
|
+
)
|
|
245
|
+
chunk_num += 1
|
|
246
|
+
current_chunk = section
|
|
247
|
+
else:
|
|
248
|
+
current_chunk += section
|
|
249
|
+
|
|
250
|
+
# Add final chunk
|
|
251
|
+
if current_chunk:
|
|
252
|
+
chunks.append(
|
|
253
|
+
f"{'=' * 60}\n{chunk_prefix} {chunk_num} of {{total}}\n{'=' * 60}\n\n"
|
|
254
|
+
+ current_chunk,
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
# Update total count in all chunks
|
|
258
|
+
total = len(chunks)
|
|
259
|
+
chunks = [chunk.format(total=total) for chunk in chunks]
|
|
260
|
+
|
|
261
|
+
return chunks
|
|
262
|
+
|
|
263
|
+
def should_skip_stage(self, stage_name: str, input_data: Any) -> tuple[bool, str | None]:
|
|
264
|
+
"""Skip polish for short documents."""
|
|
265
|
+
if stage_name == "polish":
|
|
266
|
+
if self._total_content_tokens < self.skip_polish_threshold:
|
|
267
|
+
self.tier_map["polish"] = ModelTier.CAPABLE
|
|
268
|
+
return False, None
|
|
269
|
+
return False, None
|
|
270
|
+
|
|
271
|
+
async def run_stage(
|
|
272
|
+
self,
|
|
273
|
+
stage_name: str,
|
|
274
|
+
tier: ModelTier,
|
|
275
|
+
input_data: Any,
|
|
276
|
+
) -> tuple[Any, int, int]:
|
|
277
|
+
"""Execute a document generation stage."""
|
|
278
|
+
if stage_name == "outline":
|
|
279
|
+
return await self._outline(input_data, tier)
|
|
280
|
+
if stage_name == "write":
|
|
281
|
+
return await self._write(input_data, tier)
|
|
282
|
+
if stage_name == "polish":
|
|
283
|
+
return await self._polish(input_data, tier)
|
|
284
|
+
raise ValueError(f"Unknown stage: {stage_name}")
|
|
285
|
+
|
|
286
|
+
async def _outline(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
287
|
+
"""Generate document outline from source."""
|
|
288
|
+
from pathlib import Path
|
|
289
|
+
|
|
290
|
+
source_code = input_data.get("source_code", "")
|
|
291
|
+
target = input_data.get("target", "")
|
|
292
|
+
doc_type = input_data.get("doc_type", "general")
|
|
293
|
+
audience = input_data.get("audience", "developers")
|
|
294
|
+
|
|
295
|
+
# Use target if source_code not provided
|
|
296
|
+
content_to_document = source_code or target
|
|
297
|
+
|
|
298
|
+
# If target looks like a file path and source_code wasn't provided, read the file
|
|
299
|
+
if not source_code and target:
|
|
300
|
+
target_path = Path(target)
|
|
301
|
+
if target_path.exists() and target_path.is_file():
|
|
302
|
+
try:
|
|
303
|
+
content_to_document = target_path.read_text(encoding="utf-8")
|
|
304
|
+
# Prepend file info for context
|
|
305
|
+
content_to_document = f"# File: {target}\n\n{content_to_document}"
|
|
306
|
+
except Exception as e:
|
|
307
|
+
# If we can't read the file, log and use the path as-is
|
|
308
|
+
import logging
|
|
309
|
+
|
|
310
|
+
logging.getLogger(__name__).warning(f"Could not read file {target}: {e}")
|
|
311
|
+
elif target_path.suffix in (
|
|
312
|
+
".py",
|
|
313
|
+
".js",
|
|
314
|
+
".ts",
|
|
315
|
+
".tsx",
|
|
316
|
+
".java",
|
|
317
|
+
".go",
|
|
318
|
+
".rs",
|
|
319
|
+
".md",
|
|
320
|
+
".txt",
|
|
321
|
+
):
|
|
322
|
+
# Looks like a file path but doesn't exist - warn
|
|
323
|
+
import logging
|
|
324
|
+
|
|
325
|
+
logging.getLogger(__name__).warning(
|
|
326
|
+
f"Target appears to be a file path but doesn't exist: {target}",
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
# === AUTH STRATEGY INTEGRATION ===
|
|
330
|
+
# Detect module size and recommend auth mode (first stage only)
|
|
331
|
+
if self.enable_auth_strategy:
|
|
332
|
+
try:
|
|
333
|
+
from attune.models import (
|
|
334
|
+
count_lines_of_code,
|
|
335
|
+
get_auth_strategy,
|
|
336
|
+
get_module_size_category,
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# Calculate module size
|
|
340
|
+
module_lines = 0
|
|
341
|
+
if target and Path(target).exists():
|
|
342
|
+
module_lines = count_lines_of_code(target)
|
|
343
|
+
elif content_to_document:
|
|
344
|
+
# Count from source code content
|
|
345
|
+
module_lines = len(
|
|
346
|
+
[
|
|
347
|
+
line
|
|
348
|
+
for line in content_to_document.split("\n")
|
|
349
|
+
if line.strip() and not line.strip().startswith("#")
|
|
350
|
+
]
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
if module_lines > 0:
|
|
354
|
+
# Get auth strategy (first-time setup if needed)
|
|
355
|
+
strategy = get_auth_strategy()
|
|
356
|
+
|
|
357
|
+
# Get recommended auth mode
|
|
358
|
+
recommended_mode = strategy.get_recommended_mode(module_lines)
|
|
359
|
+
self._auth_mode_used = recommended_mode.value
|
|
360
|
+
|
|
361
|
+
# Get size category
|
|
362
|
+
size_category = get_module_size_category(module_lines)
|
|
363
|
+
|
|
364
|
+
# Log recommendation
|
|
365
|
+
logger.info(
|
|
366
|
+
f"Module: {target or 'source'} ({module_lines} LOC, {size_category})"
|
|
367
|
+
)
|
|
368
|
+
logger.info(f"Recommended auth mode: {recommended_mode.value}")
|
|
369
|
+
|
|
370
|
+
# Get cost estimate
|
|
371
|
+
cost_estimate = strategy.estimate_cost(module_lines, recommended_mode)
|
|
372
|
+
|
|
373
|
+
if recommended_mode.value == "subscription":
|
|
374
|
+
logger.info(
|
|
375
|
+
f"Cost: {cost_estimate['quota_cost']} "
|
|
376
|
+
f"(fits in {cost_estimate['fits_in_context']} context)"
|
|
377
|
+
)
|
|
378
|
+
else: # API
|
|
379
|
+
logger.info(
|
|
380
|
+
f"Cost: ~${cost_estimate['monetary_cost']:.4f} "
|
|
381
|
+
f"(1M context window)"
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
except Exception as e:
|
|
385
|
+
# Don't fail workflow if auth strategy fails
|
|
386
|
+
logger.warning(f"Auth strategy detection failed: {e}")
|
|
387
|
+
|
|
388
|
+
system = """You are an expert technical writer specializing in API Reference documentation.
|
|
389
|
+
|
|
390
|
+
IMPORTANT: This is API REFERENCE documentation, not a tutorial. Focus on documenting EVERY function/class with structured Args/Returns/Raises format.
|
|
391
|
+
|
|
392
|
+
Create a detailed, structured outline for API Reference documentation:
|
|
393
|
+
|
|
394
|
+
1. **Logical Section Structure** (emphasize API reference sections):
|
|
395
|
+
- Overview/Introduction (brief)
|
|
396
|
+
- Quick Start (1 complete example)
|
|
397
|
+
- API Reference - Functions (one subsection per function with Args/Returns/Raises)
|
|
398
|
+
- API Reference - Classes (one subsection per class with Args/Returns/Raises for methods)
|
|
399
|
+
- Usage Examples (showing how to combine multiple functions)
|
|
400
|
+
- Additional reference sections as needed
|
|
401
|
+
|
|
402
|
+
2. **For Each Section**:
|
|
403
|
+
- Clear purpose and what readers will learn
|
|
404
|
+
- Specific topics to cover
|
|
405
|
+
- Types of examples to include (with actual code)
|
|
406
|
+
|
|
407
|
+
3. **Key Requirements**:
|
|
408
|
+
- Include sections for real, copy-paste ready code examples
|
|
409
|
+
- Plan for comprehensive API documentation with all parameters
|
|
410
|
+
- Include edge cases and error handling examples
|
|
411
|
+
- Add best practices and common patterns
|
|
412
|
+
|
|
413
|
+
Format as a numbered list with section titles and detailed descriptions."""
|
|
414
|
+
|
|
415
|
+
user_message = f"""Create a comprehensive documentation outline:
|
|
416
|
+
|
|
417
|
+
Document Type: {doc_type}
|
|
418
|
+
Target Audience: {audience}
|
|
419
|
+
|
|
420
|
+
IMPORTANT: This documentation should be production-ready with:
|
|
421
|
+
- Real, executable code examples (not placeholders)
|
|
422
|
+
- Complete API reference with parameter types and descriptions
|
|
423
|
+
- Usage guides showing common patterns
|
|
424
|
+
- Edge case handling and error scenarios
|
|
425
|
+
- Best practices for the target audience
|
|
426
|
+
|
|
427
|
+
Content to document:
|
|
428
|
+
{content_to_document[:4000]}
|
|
429
|
+
|
|
430
|
+
Generate an outline that covers all these aspects comprehensively."""
|
|
431
|
+
|
|
432
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
433
|
+
tier,
|
|
434
|
+
system,
|
|
435
|
+
user_message,
|
|
436
|
+
max_tokens=1000,
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
return (
|
|
440
|
+
{
|
|
441
|
+
"outline": response,
|
|
442
|
+
"doc_type": doc_type,
|
|
443
|
+
"audience": audience,
|
|
444
|
+
"content_to_document": content_to_document,
|
|
445
|
+
},
|
|
446
|
+
input_tokens,
|
|
447
|
+
output_tokens,
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
def _parse_outline_sections(self, outline: str) -> list[str]:
|
|
451
|
+
"""Parse top-level section titles from the outline.
|
|
452
|
+
|
|
453
|
+
Only matches main sections like "1. Introduction", "2. Setup", etc.
|
|
454
|
+
Ignores sub-sections like "2.1 Prerequisites" or nested items.
|
|
455
|
+
"""
|
|
456
|
+
import re
|
|
457
|
+
|
|
458
|
+
sections = []
|
|
459
|
+
# Match only top-level sections: digit followed by period and space/letter
|
|
460
|
+
# e.g., "1. Introduction" but NOT "1.1 Sub-section" or "2.1.3 Deep"
|
|
461
|
+
top_level_pattern = re.compile(r"^(\d+)\.\s+([A-Za-z].*)")
|
|
462
|
+
|
|
463
|
+
for line in outline.split("\n"):
|
|
464
|
+
stripped = line.strip()
|
|
465
|
+
match = top_level_pattern.match(stripped)
|
|
466
|
+
if match:
|
|
467
|
+
# section_num = match.group(1) - not needed, only extracting title
|
|
468
|
+
title = match.group(2).strip()
|
|
469
|
+
# Remove any trailing description after " - "
|
|
470
|
+
if " - " in title:
|
|
471
|
+
title = title.split(" - ")[0].strip()
|
|
472
|
+
sections.append(title)
|
|
473
|
+
|
|
474
|
+
return sections
|
|
475
|
+
|
|
476
|
+
async def _write(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
477
|
+
"""Write content based on the outline."""
|
|
478
|
+
outline = input_data.get("outline", "")
|
|
479
|
+
doc_type = input_data.get("doc_type", "general")
|
|
480
|
+
audience = input_data.get("audience", "developers")
|
|
481
|
+
content_to_document = input_data.get("content_to_document", "")
|
|
482
|
+
|
|
483
|
+
# Parse sections from outline
|
|
484
|
+
sections = self._parse_outline_sections(outline)
|
|
485
|
+
|
|
486
|
+
# Auto-scale tokens based on section count
|
|
487
|
+
self.max_write_tokens = self._auto_scale_tokens(len(sections))
|
|
488
|
+
|
|
489
|
+
# Use chunked generation for large outlines (more than sections_per_chunk * 2)
|
|
490
|
+
use_chunking = (
|
|
491
|
+
self.chunked_generation
|
|
492
|
+
and len(sections) > self.sections_per_chunk * 2
|
|
493
|
+
and not self.section_focus # Don't chunk if already focused
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
if use_chunking:
|
|
497
|
+
return await self._write_chunked(
|
|
498
|
+
sections,
|
|
499
|
+
outline,
|
|
500
|
+
doc_type,
|
|
501
|
+
audience,
|
|
502
|
+
content_to_document,
|
|
503
|
+
tier,
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
# Handle section_focus for targeted generation
|
|
507
|
+
section_instruction = ""
|
|
508
|
+
if self.section_focus:
|
|
509
|
+
sections_list = ", ".join(self.section_focus)
|
|
510
|
+
section_instruction = f"""
|
|
511
|
+
IMPORTANT: Focus ONLY on generating these specific sections:
|
|
512
|
+
{sections_list}
|
|
513
|
+
|
|
514
|
+
Generate comprehensive, detailed content for each of these sections."""
|
|
515
|
+
|
|
516
|
+
system = f"""You are an expert technical writer creating comprehensive developer documentation.
|
|
517
|
+
|
|
518
|
+
YOUR TASK HAS TWO CRITICAL PHASES - YOU MUST COMPLETE BOTH:
|
|
519
|
+
|
|
520
|
+
═══════════════════════════════════════════════════════════════
|
|
521
|
+
PHASE 1: Write Comprehensive Documentation
|
|
522
|
+
═══════════════════════════════════════════════════════════════
|
|
523
|
+
|
|
524
|
+
Write clear, helpful documentation with:
|
|
525
|
+
- Overview and introduction explaining what this code does
|
|
526
|
+
- Real, executable code examples (NOT placeholders - use actual code from source)
|
|
527
|
+
- Usage guides showing how to use the code in real scenarios
|
|
528
|
+
- Best practices and common patterns
|
|
529
|
+
- Step-by-step instructions where helpful
|
|
530
|
+
- Tables, diagrams, and visual aids as appropriate
|
|
531
|
+
- Clear explanations appropriate for {audience}
|
|
532
|
+
|
|
533
|
+
Do this naturally - write the kind of documentation that helps developers understand and use the code effectively.
|
|
534
|
+
|
|
535
|
+
═══════════════════════════════════════════════════════════════
|
|
536
|
+
PHASE 2: Add Structured API Reference Sections (MANDATORY)
|
|
537
|
+
═══════════════════════════════════════════════════════════════
|
|
538
|
+
|
|
539
|
+
After writing the comprehensive documentation above, you MUST add structured API reference sections for EVERY function and class method.
|
|
540
|
+
|
|
541
|
+
For EACH function/method in the source code, add this EXACT structure:
|
|
542
|
+
|
|
543
|
+
---
|
|
544
|
+
### `function_name()`
|
|
545
|
+
|
|
546
|
+
**Function Signature:**
|
|
547
|
+
```python
|
|
548
|
+
def function_name(param1: type, param2: type = default) -> return_type
|
|
549
|
+
```
|
|
550
|
+
|
|
551
|
+
**Description:**
|
|
552
|
+
[Brief description of what the function does - 1-2 sentences]
|
|
553
|
+
|
|
554
|
+
**Args:**
|
|
555
|
+
- `param1` (`type`): Clear description of this parameter
|
|
556
|
+
- `param2` (`type`, optional): Description. Defaults to `default`.
|
|
557
|
+
|
|
558
|
+
**Returns:**
|
|
559
|
+
- `return_type`: Description of the return value
|
|
560
|
+
|
|
561
|
+
**Raises:**
|
|
562
|
+
- `ExceptionType`: Description of when and why this exception occurs
|
|
563
|
+
- `AnotherException`: Another exception case
|
|
564
|
+
|
|
565
|
+
**Example:**
|
|
566
|
+
```python
|
|
567
|
+
from module import function_name
|
|
568
|
+
|
|
569
|
+
# Show real usage with actual code
|
|
570
|
+
result = function_name(actual_value, param2=123)
|
|
571
|
+
print(result)
|
|
572
|
+
```
|
|
573
|
+
---
|
|
574
|
+
|
|
575
|
+
CRITICAL RULES FOR PHASE 2:
|
|
576
|
+
- Include **Args:** header for ALL functions (write "None" if no parameters)
|
|
577
|
+
- Include **Returns:** header for ALL functions (write "None" if void/no return)
|
|
578
|
+
- Include **Raises:** header for ALL functions (write "None" if no exceptions)
|
|
579
|
+
- Use backticks for code: `param_name` (`type`)
|
|
580
|
+
- Document EVERY public function and method you see in the source code
|
|
581
|
+
|
|
582
|
+
{section_instruction}
|
|
583
|
+
|
|
584
|
+
═══════════════════════════════════════════════════════════════
|
|
585
|
+
REMINDER: BOTH PHASES ARE MANDATORY
|
|
586
|
+
═══════════════════════════════════════════════════════════════
|
|
587
|
+
|
|
588
|
+
1. Write comprehensive documentation (Phase 1) - what you do naturally
|
|
589
|
+
2. Add structured API reference sections (Phase 2) - for every function/method
|
|
590
|
+
|
|
591
|
+
Do NOT skip Phase 2 after completing Phase 1. Both phases are required for complete documentation."""
|
|
592
|
+
|
|
593
|
+
user_message = f"""Write comprehensive, production-ready documentation in TWO PHASES:
|
|
594
|
+
|
|
595
|
+
Document Type: {doc_type}
|
|
596
|
+
Target Audience: {audience}
|
|
597
|
+
|
|
598
|
+
Outline to follow:
|
|
599
|
+
{outline}
|
|
600
|
+
|
|
601
|
+
Source code to document (extract actual class names, function signatures, parameters):
|
|
602
|
+
{content_to_document[:5000]}
|
|
603
|
+
|
|
604
|
+
═══════════════════════════════════════════════════════════════
|
|
605
|
+
YOUR TASK:
|
|
606
|
+
═══════════════════════════════════════════════════════════════
|
|
607
|
+
|
|
608
|
+
PHASE 1: Write comprehensive documentation
|
|
609
|
+
- Use the outline above as your guide
|
|
610
|
+
- Include real, executable code examples from the source
|
|
611
|
+
- Show usage patterns, best practices, common workflows
|
|
612
|
+
- Write clear explanations that help developers understand the code
|
|
613
|
+
|
|
614
|
+
PHASE 2: Add structured API reference sections
|
|
615
|
+
- For EACH function/method in the source code, add:
|
|
616
|
+
- Function signature
|
|
617
|
+
- Description
|
|
618
|
+
- **Args:** section (every parameter with type and description)
|
|
619
|
+
- **Returns:** section (return type and description)
|
|
620
|
+
- **Raises:** section (exceptions that can occur)
|
|
621
|
+
- Example code snippet
|
|
622
|
+
|
|
623
|
+
═══════════════════════════════════════════════════════════════
|
|
624
|
+
IMPORTANT: Complete BOTH phases. Don't stop after Phase 1.
|
|
625
|
+
═══════════════════════════════════════════════════════════════
|
|
626
|
+
|
|
627
|
+
Generate the complete documentation now, ensuring both comprehensive content AND structured API reference sections."""
|
|
628
|
+
|
|
629
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
630
|
+
tier,
|
|
631
|
+
system,
|
|
632
|
+
user_message,
|
|
633
|
+
max_tokens=self.max_write_tokens,
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
self._total_content_tokens = output_tokens
|
|
637
|
+
|
|
638
|
+
return (
|
|
639
|
+
{
|
|
640
|
+
"draft_document": response,
|
|
641
|
+
"doc_type": doc_type,
|
|
642
|
+
"audience": audience,
|
|
643
|
+
"outline": outline,
|
|
644
|
+
"chunked": False,
|
|
645
|
+
"source_code": content_to_document, # Pass through for API reference generation
|
|
646
|
+
},
|
|
647
|
+
input_tokens,
|
|
648
|
+
output_tokens,
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
async def _write_chunked(
|
|
652
|
+
self,
|
|
653
|
+
sections: list[str],
|
|
654
|
+
outline: str,
|
|
655
|
+
doc_type: str,
|
|
656
|
+
audience: str,
|
|
657
|
+
content_to_document: str,
|
|
658
|
+
tier: ModelTier,
|
|
659
|
+
) -> tuple[dict, int, int]:
|
|
660
|
+
"""Generate documentation in chunks to avoid truncation.
|
|
661
|
+
|
|
662
|
+
Enterprise-safe: includes cost tracking and graceful degradation.
|
|
663
|
+
"""
|
|
664
|
+
all_content: list[str] = []
|
|
665
|
+
total_input_tokens: int = 0
|
|
666
|
+
total_output_tokens: int = 0
|
|
667
|
+
stopped_early: bool = False
|
|
668
|
+
error_message: str | None = None
|
|
669
|
+
|
|
670
|
+
# Split sections into chunks
|
|
671
|
+
chunks = []
|
|
672
|
+
for i in range(0, len(sections), self.sections_per_chunk):
|
|
673
|
+
chunks.append(sections[i : i + self.sections_per_chunk])
|
|
674
|
+
|
|
675
|
+
logger.info(f"Generating documentation in {len(chunks)} chunks")
|
|
676
|
+
|
|
677
|
+
for chunk_idx, chunk_sections in enumerate(chunks):
|
|
678
|
+
sections_list = ", ".join(chunk_sections)
|
|
679
|
+
|
|
680
|
+
# Build context about what came before
|
|
681
|
+
previous_context = ""
|
|
682
|
+
if chunk_idx > 0 and all_content:
|
|
683
|
+
# Include last 500 chars of previous content for continuity
|
|
684
|
+
previous_context = f"""
|
|
685
|
+
Previous sections already written (for context/continuity):
|
|
686
|
+
...{all_content[-1][-500:]}
|
|
687
|
+
|
|
688
|
+
Continue with the next sections, maintaining consistent style and terminology."""
|
|
689
|
+
|
|
690
|
+
system = f"""You are an expert technical writer creating comprehensive developer documentation.
|
|
691
|
+
|
|
692
|
+
Write ONLY these sections (part {chunk_idx + 1} of {len(chunks)}): {sections_list}
|
|
693
|
+
|
|
694
|
+
YOUR TASK FOR THESE SECTIONS (TWO PHASES):
|
|
695
|
+
|
|
696
|
+
═══════════════════════════════════════════════════════════════
|
|
697
|
+
PHASE 1: Comprehensive Content
|
|
698
|
+
═══════════════════════════════════════════════════════════════
|
|
699
|
+
- Write clear explanations and overviews
|
|
700
|
+
- Include real, executable code examples (extract from source)
|
|
701
|
+
- Show usage patterns and workflows
|
|
702
|
+
- Add best practices and common patterns
|
|
703
|
+
- Professional language for {audience}
|
|
704
|
+
|
|
705
|
+
═══════════════════════════════════════════════════════════════
|
|
706
|
+
PHASE 2: Structured API Reference
|
|
707
|
+
═══════════════════════════════════════════════════════════════
|
|
708
|
+
For EACH function/method in these sections, add:
|
|
709
|
+
|
|
710
|
+
### `function_name()`
|
|
711
|
+
|
|
712
|
+
**Function Signature:**
|
|
713
|
+
```python
|
|
714
|
+
def function_name(params) -> return_type
|
|
715
|
+
```
|
|
716
|
+
|
|
717
|
+
**Description:**
|
|
718
|
+
[Brief description]
|
|
719
|
+
|
|
720
|
+
**Args:**
|
|
721
|
+
- `param` (`type`): Description
|
|
722
|
+
|
|
723
|
+
**Returns:**
|
|
724
|
+
- `type`: Description
|
|
725
|
+
|
|
726
|
+
**Raises:**
|
|
727
|
+
- `Exception`: When it occurs
|
|
728
|
+
|
|
729
|
+
**Example:**
|
|
730
|
+
```python
|
|
731
|
+
# Real usage example
|
|
732
|
+
```
|
|
733
|
+
|
|
734
|
+
═══════════════════════════════════════════════════════════════
|
|
735
|
+
Complete BOTH phases for these sections.
|
|
736
|
+
═══════════════════════════════════════════════════════════════"""
|
|
737
|
+
|
|
738
|
+
user_message = f"""Write comprehensive documentation for these sections in TWO PHASES:
|
|
739
|
+
|
|
740
|
+
Sections to write: {sections_list}
|
|
741
|
+
|
|
742
|
+
Document Type: {doc_type}
|
|
743
|
+
Target Audience: {audience}
|
|
744
|
+
|
|
745
|
+
Source code (extract actual functions/classes from here):
|
|
746
|
+
{content_to_document[:3000]}
|
|
747
|
+
|
|
748
|
+
Full outline (for context):
|
|
749
|
+
{outline}
|
|
750
|
+
{previous_context}
|
|
751
|
+
|
|
752
|
+
PHASE 1: Write comprehensive content with real code examples
|
|
753
|
+
PHASE 2: Add structured API reference sections with **Args:**, **Returns:**, **Raises:**
|
|
754
|
+
|
|
755
|
+
Generate complete sections now, ensuring both phases are complete."""
|
|
756
|
+
|
|
757
|
+
try:
|
|
758
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
759
|
+
tier,
|
|
760
|
+
system,
|
|
761
|
+
user_message,
|
|
762
|
+
max_tokens=self.max_write_tokens // len(chunks) + 2000,
|
|
763
|
+
)
|
|
764
|
+
|
|
765
|
+
# Track cost and check limits
|
|
766
|
+
_, should_stop = self._track_cost(tier, input_tokens, output_tokens)
|
|
767
|
+
|
|
768
|
+
all_content.append(response)
|
|
769
|
+
total_input_tokens += input_tokens
|
|
770
|
+
total_output_tokens += output_tokens
|
|
771
|
+
|
|
772
|
+
logger.info(
|
|
773
|
+
f"Chunk {chunk_idx + 1}/{len(chunks)} complete: "
|
|
774
|
+
f"{len(response)} chars, {output_tokens} tokens, "
|
|
775
|
+
f"cost so far: ${self._accumulated_cost:.2f}",
|
|
776
|
+
)
|
|
777
|
+
|
|
778
|
+
# Check cost limit
|
|
779
|
+
if should_stop:
|
|
780
|
+
stopped_early = True
|
|
781
|
+
remaining = len(chunks) - chunk_idx - 1
|
|
782
|
+
error_message = (
|
|
783
|
+
f"Cost limit reached (${self._accumulated_cost:.2f}). "
|
|
784
|
+
f"Stopped after {chunk_idx + 1}/{len(chunks)} chunks. "
|
|
785
|
+
f"{remaining} chunks not generated."
|
|
786
|
+
)
|
|
787
|
+
logger.warning(error_message)
|
|
788
|
+
break
|
|
789
|
+
|
|
790
|
+
except Exception as e:
|
|
791
|
+
error_message = f"Error generating chunk {chunk_idx + 1}: {e}"
|
|
792
|
+
logger.error(error_message)
|
|
793
|
+
if not self.graceful_degradation:
|
|
794
|
+
raise
|
|
795
|
+
stopped_early = True
|
|
796
|
+
break
|
|
797
|
+
|
|
798
|
+
# Combine all chunks
|
|
799
|
+
combined_document = "\n\n".join(all_content)
|
|
800
|
+
self._total_content_tokens = total_output_tokens
|
|
801
|
+
|
|
802
|
+
# Store partial results for graceful degradation
|
|
803
|
+
self._partial_results = {
|
|
804
|
+
"draft_document": combined_document,
|
|
805
|
+
"sections_completed": len(all_content),
|
|
806
|
+
"sections_total": len(chunks),
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
result = {
|
|
810
|
+
"draft_document": combined_document,
|
|
811
|
+
"doc_type": doc_type,
|
|
812
|
+
"audience": audience,
|
|
813
|
+
"outline": outline,
|
|
814
|
+
"chunked": True,
|
|
815
|
+
"chunk_count": len(chunks),
|
|
816
|
+
"chunks_completed": len(all_content),
|
|
817
|
+
"stopped_early": stopped_early,
|
|
818
|
+
"accumulated_cost": self._accumulated_cost,
|
|
819
|
+
"source_code": content_to_document, # Pass through for API reference generation
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
if error_message:
|
|
823
|
+
result["warning"] = error_message
|
|
824
|
+
|
|
825
|
+
return (result, total_input_tokens, total_output_tokens)
|
|
826
|
+
|
|
827
|
+
async def _polish(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
828
|
+
"""Final review and consistency polish using LLM.
|
|
829
|
+
|
|
830
|
+
Enterprise-safe: chunks large documents to avoid truncation.
|
|
831
|
+
Supports XML-enhanced prompts when enabled in workflow config.
|
|
832
|
+
"""
|
|
833
|
+
draft_document = input_data.get("draft_document", "")
|
|
834
|
+
doc_type = input_data.get("doc_type", "general")
|
|
835
|
+
audience = input_data.get("audience", "developers")
|
|
836
|
+
|
|
837
|
+
# Check if document is too large and needs chunked polishing
|
|
838
|
+
# Rough estimate: 4 chars per token, 10k tokens threshold for chunking
|
|
839
|
+
estimated_tokens = len(draft_document) // 4
|
|
840
|
+
needs_chunked_polish = estimated_tokens > 10000
|
|
841
|
+
|
|
842
|
+
if needs_chunked_polish:
|
|
843
|
+
logger.info(
|
|
844
|
+
f"Large document detected (~{estimated_tokens} tokens). "
|
|
845
|
+
"Using chunked polish for enterprise safety.",
|
|
846
|
+
)
|
|
847
|
+
return await self._polish_chunked(input_data, tier)
|
|
848
|
+
|
|
849
|
+
# Build input payload for prompt
|
|
850
|
+
input_payload = f"""Document Type: {doc_type}
|
|
851
|
+
Target Audience: {audience}
|
|
852
|
+
|
|
853
|
+
Draft:
|
|
854
|
+
{draft_document}"""
|
|
855
|
+
|
|
856
|
+
# Check if XML prompts are enabled
|
|
857
|
+
if self._is_xml_enabled():
|
|
858
|
+
# Use XML-enhanced prompt
|
|
859
|
+
user_message = self._render_xml_prompt(
|
|
860
|
+
role="senior technical editor",
|
|
861
|
+
goal="Polish and improve the documentation for consistency and quality",
|
|
862
|
+
instructions=[
|
|
863
|
+
"Standardize terminology and formatting",
|
|
864
|
+
"Improve clarity and flow",
|
|
865
|
+
"Add missing cross-references",
|
|
866
|
+
"Fix grammatical issues",
|
|
867
|
+
"Identify gaps and add helpful notes",
|
|
868
|
+
"Ensure examples are complete and accurate",
|
|
869
|
+
],
|
|
870
|
+
constraints=[
|
|
871
|
+
"Maintain the original structure and intent",
|
|
872
|
+
"Keep content appropriate for the target audience",
|
|
873
|
+
"Preserve code examples while improving explanations",
|
|
874
|
+
],
|
|
875
|
+
input_type="documentation_draft",
|
|
876
|
+
input_payload=input_payload,
|
|
877
|
+
extra={
|
|
878
|
+
"doc_type": doc_type,
|
|
879
|
+
"audience": audience,
|
|
880
|
+
},
|
|
881
|
+
)
|
|
882
|
+
system = None # XML prompt includes all context
|
|
883
|
+
else:
|
|
884
|
+
# Use legacy plain text prompts
|
|
885
|
+
system = """You are a senior technical editor specializing in developer documentation.
|
|
886
|
+
|
|
887
|
+
Polish and improve this documentation. The writer was asked to complete TWO PHASES:
|
|
888
|
+
- Phase 1: Comprehensive content with real examples
|
|
889
|
+
- Phase 2: Structured API reference sections with **Args:**, **Returns:**, **Raises:**
|
|
890
|
+
|
|
891
|
+
Your job is to verify BOTH phases are complete and polish to production quality.
|
|
892
|
+
|
|
893
|
+
═══════════════════════════════════════════════════════════════
|
|
894
|
+
CRITICAL: Verify Phase 2 Completion
|
|
895
|
+
═══════════════════════════════════════════════════════════════
|
|
896
|
+
|
|
897
|
+
1. **Check for Missing API Reference Sections**:
|
|
898
|
+
- Scan the entire document for all functions and methods
|
|
899
|
+
- EVERY function MUST have these sections:
|
|
900
|
+
- **Args:** (write "None" if no parameters)
|
|
901
|
+
- **Returns:** (write "None" if void)
|
|
902
|
+
- **Raises:** (write "None" if no exceptions)
|
|
903
|
+
- If ANY function is missing these sections, ADD them now
|
|
904
|
+
- Format: **Args:**, **Returns:**, **Raises:** (bold headers with colons)
|
|
905
|
+
|
|
906
|
+
2. **Polish API Reference Sections**:
|
|
907
|
+
- Verify all parameters have types in backticks: `param` (`type`)
|
|
908
|
+
- Ensure return values are clearly described
|
|
909
|
+
- Check exception documentation is complete
|
|
910
|
+
- Validate code examples in each function section
|
|
911
|
+
|
|
912
|
+
3. **Polish General Content**:
|
|
913
|
+
- Verify code examples are complete and runnable
|
|
914
|
+
- Ensure proper imports and setup code
|
|
915
|
+
- Replace any placeholders with real code
|
|
916
|
+
- Standardize terminology throughout
|
|
917
|
+
- Fix formatting inconsistencies
|
|
918
|
+
- Improve clarity and flow
|
|
919
|
+
- Add cross-references between sections
|
|
920
|
+
|
|
921
|
+
4. **Production Readiness**:
|
|
922
|
+
- Remove any TODO or placeholder comments
|
|
923
|
+
- Ensure professional tone
|
|
924
|
+
- Add helpful notes, tips, and warnings
|
|
925
|
+
- Verify edge cases are covered
|
|
926
|
+
|
|
927
|
+
═══════════════════════════════════════════════════════════════
|
|
928
|
+
Return the complete, polished document. Add a brief "## Polish Notes" section at the end summarizing improvements made."""
|
|
929
|
+
|
|
930
|
+
user_message = f"""Polish this documentation to production quality.
|
|
931
|
+
|
|
932
|
+
The writer was asked to complete TWO PHASES:
|
|
933
|
+
1. Comprehensive content with real examples
|
|
934
|
+
2. Structured API reference with **Args:**, **Returns:**, **Raises:** for every function
|
|
935
|
+
|
|
936
|
+
Verify BOTH phases are complete, then polish:
|
|
937
|
+
|
|
938
|
+
{input_payload}
|
|
939
|
+
|
|
940
|
+
═══════════════════════════════════════════════════════════════
|
|
941
|
+
YOUR TASKS:
|
|
942
|
+
═══════════════════════════════════════════════════════════════
|
|
943
|
+
|
|
944
|
+
1. SCAN for missing API reference sections
|
|
945
|
+
- Find every function/method in the document
|
|
946
|
+
- Check if it has **Args:**, **Returns:**, **Raises:** sections
|
|
947
|
+
- ADD these sections if missing (use "None" if no parameters/returns/exceptions)
|
|
948
|
+
|
|
949
|
+
2. POLISH existing content
|
|
950
|
+
- Verify code examples are complete and runnable
|
|
951
|
+
- Ensure terminology is consistent
|
|
952
|
+
- Fix formatting issues
|
|
953
|
+
- Improve clarity and flow
|
|
954
|
+
|
|
955
|
+
3. VALIDATE production readiness
|
|
956
|
+
- Remove TODOs and placeholders
|
|
957
|
+
- Add warnings and best practices
|
|
958
|
+
- Ensure professional tone
|
|
959
|
+
|
|
960
|
+
Return the complete, polished documentation with all API reference sections present."""
|
|
961
|
+
|
|
962
|
+
# Calculate polish tokens based on draft size (at least as much as write stage)
|
|
963
|
+
polish_max_tokens = max(self.max_write_tokens, 20000)
|
|
964
|
+
|
|
965
|
+
# Try executor-based execution first (Phase 3 pattern)
|
|
966
|
+
if self._executor is not None or self._api_key:
|
|
967
|
+
try:
|
|
968
|
+
step = DOC_GEN_STEPS["polish"]
|
|
969
|
+
# Override step max_tokens with dynamic value
|
|
970
|
+
step.max_tokens = polish_max_tokens
|
|
971
|
+
response, input_tokens, output_tokens, cost = await self.run_step_with_executor(
|
|
972
|
+
step=step,
|
|
973
|
+
prompt=user_message,
|
|
974
|
+
system=system,
|
|
975
|
+
)
|
|
976
|
+
except Exception:
|
|
977
|
+
# Fall back to legacy _call_llm if executor fails
|
|
978
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
979
|
+
tier,
|
|
980
|
+
system or "",
|
|
981
|
+
user_message,
|
|
982
|
+
max_tokens=polish_max_tokens,
|
|
983
|
+
)
|
|
984
|
+
else:
|
|
985
|
+
# Legacy path for backward compatibility
|
|
986
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
987
|
+
tier,
|
|
988
|
+
system or "",
|
|
989
|
+
user_message,
|
|
990
|
+
max_tokens=polish_max_tokens,
|
|
991
|
+
)
|
|
992
|
+
|
|
993
|
+
# Parse XML response if enforcement is enabled
|
|
994
|
+
parsed_data = self._parse_xml_response(response)
|
|
995
|
+
|
|
996
|
+
# Add structured API reference sections (Step 4: Post-processing)
|
|
997
|
+
source_code = input_data.get("source_code", "")
|
|
998
|
+
if source_code:
|
|
999
|
+
logger.info("Adding structured API reference sections to polished document...")
|
|
1000
|
+
response = await self._add_api_reference_sections(
|
|
1001
|
+
narrative_doc=response,
|
|
1002
|
+
source_code=source_code,
|
|
1003
|
+
tier=ModelTier.CHEAP, # Use cheap tier for structured extraction
|
|
1004
|
+
)
|
|
1005
|
+
else:
|
|
1006
|
+
logger.warning("No source code available for API reference generation")
|
|
1007
|
+
|
|
1008
|
+
result = {
|
|
1009
|
+
"document": response,
|
|
1010
|
+
"doc_type": doc_type,
|
|
1011
|
+
"audience": audience,
|
|
1012
|
+
"model_tier_used": tier.value,
|
|
1013
|
+
"accumulated_cost": self._accumulated_cost, # Track total cost
|
|
1014
|
+
"auth_mode_used": self._auth_mode_used, # Track recommended auth mode
|
|
1015
|
+
}
|
|
1016
|
+
|
|
1017
|
+
# Merge parsed XML data if available
|
|
1018
|
+
if parsed_data.get("xml_parsed"):
|
|
1019
|
+
result.update(
|
|
1020
|
+
{
|
|
1021
|
+
"xml_parsed": True,
|
|
1022
|
+
"summary": parsed_data.get("summary"),
|
|
1023
|
+
"findings": parsed_data.get("findings", []),
|
|
1024
|
+
"checklist": parsed_data.get("checklist", []),
|
|
1025
|
+
},
|
|
1026
|
+
)
|
|
1027
|
+
|
|
1028
|
+
# Add formatted report for human readability
|
|
1029
|
+
result["formatted_report"] = format_doc_gen_report(result, input_data)
|
|
1030
|
+
|
|
1031
|
+
# Export documentation if export_path is configured
|
|
1032
|
+
doc_path, report_path = self._export_document(
|
|
1033
|
+
document=response,
|
|
1034
|
+
doc_type=doc_type,
|
|
1035
|
+
report=result["formatted_report"],
|
|
1036
|
+
)
|
|
1037
|
+
if doc_path:
|
|
1038
|
+
result["export_path"] = str(doc_path)
|
|
1039
|
+
result["report_path"] = str(report_path) if report_path else None
|
|
1040
|
+
logger.info(f"Documentation saved to: {doc_path}")
|
|
1041
|
+
|
|
1042
|
+
# Chunk output for display if needed
|
|
1043
|
+
output_chunks = self._chunk_output_for_display(
|
|
1044
|
+
result["formatted_report"],
|
|
1045
|
+
chunk_prefix="DOC OUTPUT",
|
|
1046
|
+
)
|
|
1047
|
+
if len(output_chunks) > 1:
|
|
1048
|
+
result["output_chunks"] = output_chunks
|
|
1049
|
+
result["output_chunk_count"] = len(output_chunks)
|
|
1050
|
+
logger.info(
|
|
1051
|
+
f"Report split into {len(output_chunks)} chunks for display "
|
|
1052
|
+
f"(total {len(result['formatted_report'])} chars)",
|
|
1053
|
+
)
|
|
1054
|
+
|
|
1055
|
+
return (result, input_tokens, output_tokens)
|
|
1056
|
+
|
|
1057
|
+
async def _polish_chunked(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
1058
|
+
"""Polish large documents in chunks to avoid truncation.
|
|
1059
|
+
|
|
1060
|
+
Splits the document by section headers and polishes each chunk separately,
|
|
1061
|
+
then combines the results.
|
|
1062
|
+
"""
|
|
1063
|
+
import re
|
|
1064
|
+
|
|
1065
|
+
draft_document = input_data.get("draft_document", "")
|
|
1066
|
+
doc_type = input_data.get("doc_type", "general")
|
|
1067
|
+
audience = input_data.get("audience", "developers")
|
|
1068
|
+
|
|
1069
|
+
# Split document by major section headers (## headers)
|
|
1070
|
+
sections = re.split(r"(?=^## )", draft_document, flags=re.MULTILINE)
|
|
1071
|
+
sections = [s.strip() for s in sections if s.strip()]
|
|
1072
|
+
|
|
1073
|
+
if len(sections) <= 1:
|
|
1074
|
+
# If we can't split by sections, split by character count
|
|
1075
|
+
chunk_size = 15000 # ~3750 tokens per chunk
|
|
1076
|
+
sections = [
|
|
1077
|
+
draft_document[i : i + chunk_size]
|
|
1078
|
+
for i in range(0, len(draft_document), chunk_size)
|
|
1079
|
+
]
|
|
1080
|
+
|
|
1081
|
+
logger.info(f"Polishing document in {len(sections)} chunks")
|
|
1082
|
+
|
|
1083
|
+
polished_chunks: list[str] = []
|
|
1084
|
+
total_input_tokens: int = 0
|
|
1085
|
+
total_output_tokens: int = 0
|
|
1086
|
+
|
|
1087
|
+
for chunk_idx, section in enumerate(sections):
|
|
1088
|
+
system = """You are a senior technical editor specializing in developer documentation.
|
|
1089
|
+
|
|
1090
|
+
Polish this section to production quality. The writer was asked to complete TWO PHASES:
|
|
1091
|
+
1. Comprehensive content with real examples
|
|
1092
|
+
2. Structured API reference with **Args:**, **Returns:**, **Raises:** for every function
|
|
1093
|
+
|
|
1094
|
+
Verify both phases are complete in this section:
|
|
1095
|
+
|
|
1096
|
+
═══════════════════════════════════════════════════════════════
|
|
1097
|
+
CRITICAL: Check for Missing API Reference Format
|
|
1098
|
+
═══════════════════════════════════════════════════════════════
|
|
1099
|
+
|
|
1100
|
+
1. **Scan for functions/methods in this section**
|
|
1101
|
+
- If any function is missing **Args:**, **Returns:**, **Raises:** sections, ADD them
|
|
1102
|
+
- Format: **Args:**, **Returns:**, **Raises:** (bold headers with colons)
|
|
1103
|
+
- Write "None" if no parameters/returns/exceptions
|
|
1104
|
+
|
|
1105
|
+
2. **Polish API Documentation**:
|
|
1106
|
+
- Verify parameters documented with types in backticks
|
|
1107
|
+
- Ensure return values and exceptions are clear
|
|
1108
|
+
- Validate code examples are complete
|
|
1109
|
+
|
|
1110
|
+
3. **Polish General Content**:
|
|
1111
|
+
- Ensure all examples are runnable with proper imports
|
|
1112
|
+
- Standardize terminology and formatting
|
|
1113
|
+
- Fix grammatical issues
|
|
1114
|
+
- Remove TODOs and placeholders
|
|
1115
|
+
|
|
1116
|
+
Return ONLY the polished section. Do not add commentary about changes."""
|
|
1117
|
+
|
|
1118
|
+
user_message = f"""Polish this section to production quality (part {chunk_idx + 1} of {len(sections)}):
|
|
1119
|
+
|
|
1120
|
+
Document Type: {doc_type}
|
|
1121
|
+
Target Audience: {audience}
|
|
1122
|
+
|
|
1123
|
+
Section to polish:
|
|
1124
|
+
{section}
|
|
1125
|
+
|
|
1126
|
+
Check if all functions have **Args:**, **Returns:**, **Raises:** sections - add if missing.
|
|
1127
|
+
Make all code examples complete and executable."""
|
|
1128
|
+
|
|
1129
|
+
try:
|
|
1130
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
1131
|
+
tier,
|
|
1132
|
+
system,
|
|
1133
|
+
user_message,
|
|
1134
|
+
max_tokens=8000,
|
|
1135
|
+
)
|
|
1136
|
+
|
|
1137
|
+
# Track cost
|
|
1138
|
+
_, should_stop = self._track_cost(tier, input_tokens, output_tokens)
|
|
1139
|
+
|
|
1140
|
+
polished_chunks.append(response)
|
|
1141
|
+
total_input_tokens += input_tokens
|
|
1142
|
+
total_output_tokens += output_tokens
|
|
1143
|
+
|
|
1144
|
+
logger.info(
|
|
1145
|
+
f"Polish chunk {chunk_idx + 1}/{len(sections)} complete, "
|
|
1146
|
+
f"cost so far: ${self._accumulated_cost:.2f}",
|
|
1147
|
+
)
|
|
1148
|
+
|
|
1149
|
+
if should_stop:
|
|
1150
|
+
logger.warning(
|
|
1151
|
+
f"Cost limit reached during polish. "
|
|
1152
|
+
f"Returning {len(polished_chunks)}/{len(sections)} polished chunks.",
|
|
1153
|
+
)
|
|
1154
|
+
# Add remaining sections unpolished
|
|
1155
|
+
polished_chunks.extend(sections[chunk_idx + 1 :])
|
|
1156
|
+
break
|
|
1157
|
+
|
|
1158
|
+
except Exception as e:
|
|
1159
|
+
logger.error(f"Error polishing chunk {chunk_idx + 1}: {e}")
|
|
1160
|
+
if self.graceful_degradation:
|
|
1161
|
+
# Keep original section on error
|
|
1162
|
+
polished_chunks.append(section)
|
|
1163
|
+
else:
|
|
1164
|
+
raise
|
|
1165
|
+
|
|
1166
|
+
# Combine polished chunks
|
|
1167
|
+
polished_document = "\n\n".join(polished_chunks)
|
|
1168
|
+
|
|
1169
|
+
# Add structured API reference sections (Step 4: Post-processing)
|
|
1170
|
+
source_code = input_data.get("source_code", "")
|
|
1171
|
+
if source_code:
|
|
1172
|
+
logger.info("Adding structured API reference sections to chunked polished document...")
|
|
1173
|
+
polished_document = await self._add_api_reference_sections(
|
|
1174
|
+
narrative_doc=polished_document,
|
|
1175
|
+
source_code=source_code,
|
|
1176
|
+
tier=ModelTier.CHEAP, # Use cheap tier for structured extraction
|
|
1177
|
+
)
|
|
1178
|
+
else:
|
|
1179
|
+
logger.warning("No source code available for API reference generation")
|
|
1180
|
+
|
|
1181
|
+
result = {
|
|
1182
|
+
"document": polished_document,
|
|
1183
|
+
"doc_type": doc_type,
|
|
1184
|
+
"audience": audience,
|
|
1185
|
+
"model_tier_used": tier.value,
|
|
1186
|
+
"polish_chunked": True,
|
|
1187
|
+
"polish_chunks": len(sections),
|
|
1188
|
+
"accumulated_cost": self._accumulated_cost,
|
|
1189
|
+
}
|
|
1190
|
+
|
|
1191
|
+
# Add formatted report
|
|
1192
|
+
result["formatted_report"] = format_doc_gen_report(result, input_data)
|
|
1193
|
+
|
|
1194
|
+
# Export documentation if export_path is configured
|
|
1195
|
+
doc_path, report_path = self._export_document(
|
|
1196
|
+
document=polished_document,
|
|
1197
|
+
doc_type=doc_type,
|
|
1198
|
+
report=result["formatted_report"],
|
|
1199
|
+
)
|
|
1200
|
+
if doc_path:
|
|
1201
|
+
result["export_path"] = str(doc_path)
|
|
1202
|
+
result["report_path"] = str(report_path) if report_path else None
|
|
1203
|
+
logger.info(f"Documentation saved to: {doc_path}")
|
|
1204
|
+
|
|
1205
|
+
# Chunk output for display if needed
|
|
1206
|
+
output_chunks = self._chunk_output_for_display(
|
|
1207
|
+
result["formatted_report"],
|
|
1208
|
+
chunk_prefix="DOC OUTPUT",
|
|
1209
|
+
)
|
|
1210
|
+
if len(output_chunks) > 1:
|
|
1211
|
+
result["output_chunks"] = output_chunks
|
|
1212
|
+
result["output_chunk_count"] = len(output_chunks)
|
|
1213
|
+
logger.info(
|
|
1214
|
+
f"Report split into {len(output_chunks)} chunks for display "
|
|
1215
|
+
f"(total {len(result['formatted_report'])} chars)",
|
|
1216
|
+
)
|
|
1217
|
+
|
|
1218
|
+
return (result, total_input_tokens, total_output_tokens)
|
|
1219
|
+
|
|
1220
|
+
def _extract_functions_from_source(self, source_code: str) -> list[dict]:
|
|
1221
|
+
"""Extract function information from source code using AST.
|
|
1222
|
+
|
|
1223
|
+
Args:
|
|
1224
|
+
source_code: Python source code to parse
|
|
1225
|
+
|
|
1226
|
+
Returns:
|
|
1227
|
+
List of dicts with function information (name, args, returns, docstring)
|
|
1228
|
+
"""
|
|
1229
|
+
import ast
|
|
1230
|
+
|
|
1231
|
+
functions = []
|
|
1232
|
+
|
|
1233
|
+
try:
|
|
1234
|
+
tree = ast.parse(source_code)
|
|
1235
|
+
except SyntaxError as e:
|
|
1236
|
+
logger.warning(f"Failed to parse source code: {e}")
|
|
1237
|
+
return functions
|
|
1238
|
+
|
|
1239
|
+
for node in ast.walk(tree):
|
|
1240
|
+
# Extract top-level functions and class methods
|
|
1241
|
+
if isinstance(node, ast.FunctionDef):
|
|
1242
|
+
# Skip private functions (starting with _)
|
|
1243
|
+
if node.name.startswith("_"):
|
|
1244
|
+
continue
|
|
1245
|
+
|
|
1246
|
+
# Extract function signature
|
|
1247
|
+
args_list = []
|
|
1248
|
+
for arg in node.args.args:
|
|
1249
|
+
arg_name = arg.arg
|
|
1250
|
+
# Get type annotation if available
|
|
1251
|
+
arg_type = ast.unparse(arg.annotation) if arg.annotation else "Any"
|
|
1252
|
+
args_list.append({"name": arg_name, "type": arg_type})
|
|
1253
|
+
|
|
1254
|
+
# Extract return type
|
|
1255
|
+
return_type = ast.unparse(node.returns) if node.returns else "Any"
|
|
1256
|
+
|
|
1257
|
+
# Extract docstring
|
|
1258
|
+
docstring = ast.get_docstring(node) or ""
|
|
1259
|
+
|
|
1260
|
+
functions.append({
|
|
1261
|
+
"name": node.name,
|
|
1262
|
+
"args": args_list,
|
|
1263
|
+
"return_type": return_type,
|
|
1264
|
+
"docstring": docstring,
|
|
1265
|
+
"lineno": node.lineno,
|
|
1266
|
+
})
|
|
1267
|
+
|
|
1268
|
+
return functions
|
|
1269
|
+
|
|
1270
|
+
async def _generate_api_section_for_function(
|
|
1271
|
+
self,
|
|
1272
|
+
func_info: dict,
|
|
1273
|
+
tier: ModelTier,
|
|
1274
|
+
) -> str:
|
|
1275
|
+
"""Generate structured API reference section for a single function.
|
|
1276
|
+
|
|
1277
|
+
This is a focused prompt that ONLY asks for Args/Returns/Raises format,
|
|
1278
|
+
not narrative documentation.
|
|
1279
|
+
|
|
1280
|
+
Args:
|
|
1281
|
+
func_info: Function information from AST extraction
|
|
1282
|
+
tier: Model tier to use for generation
|
|
1283
|
+
|
|
1284
|
+
Returns:
|
|
1285
|
+
Markdown formatted API reference section
|
|
1286
|
+
"""
|
|
1287
|
+
func_name = func_info["name"]
|
|
1288
|
+
args_list = func_info["args"]
|
|
1289
|
+
return_type = func_info["return_type"]
|
|
1290
|
+
docstring = func_info["docstring"]
|
|
1291
|
+
|
|
1292
|
+
# Build function signature
|
|
1293
|
+
args_str = ", ".join([f"{arg['name']}: {arg['type']}" for arg in args_list])
|
|
1294
|
+
signature = f"def {func_name}({args_str}) -> {return_type}"
|
|
1295
|
+
|
|
1296
|
+
system = """You are an API documentation generator. Output ONLY structured API reference sections in the EXACT format specified below.
|
|
1297
|
+
|
|
1298
|
+
CRITICAL: Do NOT write explanatory text, questions, or narrative. Output ONLY the formatted section.
|
|
1299
|
+
|
|
1300
|
+
REQUIRED FORMAT (copy this structure EXACTLY, replace bracketed content):
|
|
1301
|
+
|
|
1302
|
+
### `function_name()`
|
|
1303
|
+
|
|
1304
|
+
**Function Signature:**
|
|
1305
|
+
```python
|
|
1306
|
+
def function_name(param: type) -> return_type
|
|
1307
|
+
```
|
|
1308
|
+
|
|
1309
|
+
**Description:**
|
|
1310
|
+
Brief 1-2 sentence description.
|
|
1311
|
+
|
|
1312
|
+
**Args:**
|
|
1313
|
+
- `param_name` (`type`): Parameter description
|
|
1314
|
+
|
|
1315
|
+
**Returns:**
|
|
1316
|
+
- `return_type`: Return value description
|
|
1317
|
+
|
|
1318
|
+
**Raises:**
|
|
1319
|
+
- `ExceptionType`: When this exception occurs
|
|
1320
|
+
|
|
1321
|
+
IMPORTANT:
|
|
1322
|
+
- Use "**Args:**" (NOT "Parameters" or "params")
|
|
1323
|
+
- Write "None" if no Args/Returns/Raises
|
|
1324
|
+
- NO conversational text - just the formatted section"""
|
|
1325
|
+
|
|
1326
|
+
user_message = f"""Generate API reference section using EXACT format specified in system prompt.
|
|
1327
|
+
|
|
1328
|
+
Function:
|
|
1329
|
+
```python
|
|
1330
|
+
{signature}
|
|
1331
|
+
```
|
|
1332
|
+
|
|
1333
|
+
Docstring:
|
|
1334
|
+
{docstring if docstring else "No docstring"}
|
|
1335
|
+
|
|
1336
|
+
Output the formatted section EXACTLY as shown in system prompt. Use **Args:** (not Parameters). NO conversational text."""
|
|
1337
|
+
|
|
1338
|
+
try:
|
|
1339
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
1340
|
+
tier,
|
|
1341
|
+
system,
|
|
1342
|
+
user_message,
|
|
1343
|
+
max_tokens=1000, # Small response - just the structured section
|
|
1344
|
+
)
|
|
1345
|
+
|
|
1346
|
+
# Track cost
|
|
1347
|
+
self._track_cost(tier, input_tokens, output_tokens)
|
|
1348
|
+
|
|
1349
|
+
return response
|
|
1350
|
+
|
|
1351
|
+
except Exception as e:
|
|
1352
|
+
logger.error(f"Failed to generate API section for {func_name}: {e}")
|
|
1353
|
+
# Return minimal fallback
|
|
1354
|
+
return f"""### `{func_name}()`
|
|
1355
|
+
|
|
1356
|
+
**Function Signature:**
|
|
1357
|
+
```python
|
|
1358
|
+
{signature}
|
|
1359
|
+
```
|
|
1360
|
+
|
|
1361
|
+
**Description:**
|
|
1362
|
+
{docstring.split('.')[0] if docstring else "No description available."}
|
|
1363
|
+
|
|
1364
|
+
**Args:**
|
|
1365
|
+
None
|
|
1366
|
+
|
|
1367
|
+
**Returns:**
|
|
1368
|
+
- `{return_type}`: Return value
|
|
1369
|
+
|
|
1370
|
+
**Raises:**
|
|
1371
|
+
None
|
|
1372
|
+
"""
|
|
1373
|
+
|
|
1374
|
+
async def _add_api_reference_sections(
|
|
1375
|
+
self,
|
|
1376
|
+
narrative_doc: str,
|
|
1377
|
+
source_code: str,
|
|
1378
|
+
tier: ModelTier,
|
|
1379
|
+
) -> str:
|
|
1380
|
+
"""Add structured API reference sections to narrative documentation.
|
|
1381
|
+
|
|
1382
|
+
This is Step 4 of the pipeline: after outline, write, and polish,
|
|
1383
|
+
we add structured API reference sections extracted from source code.
|
|
1384
|
+
|
|
1385
|
+
Args:
|
|
1386
|
+
narrative_doc: The polished narrative documentation
|
|
1387
|
+
source_code: Original source code to extract functions from
|
|
1388
|
+
tier: Model tier to use for API section generation
|
|
1389
|
+
|
|
1390
|
+
Returns:
|
|
1391
|
+
Complete documentation with API reference appendix
|
|
1392
|
+
"""
|
|
1393
|
+
logger.info("Adding structured API reference sections...")
|
|
1394
|
+
|
|
1395
|
+
# Extract functions from source code
|
|
1396
|
+
functions = self._extract_functions_from_source(source_code)
|
|
1397
|
+
|
|
1398
|
+
if not functions:
|
|
1399
|
+
logger.warning("No public functions found in source code")
|
|
1400
|
+
return narrative_doc
|
|
1401
|
+
|
|
1402
|
+
logger.info(f"Found {len(functions)} public functions to document")
|
|
1403
|
+
|
|
1404
|
+
# Generate API section for each function
|
|
1405
|
+
api_sections = []
|
|
1406
|
+
for func_info in functions:
|
|
1407
|
+
func_name = func_info["name"]
|
|
1408
|
+
logger.debug(f"Generating API reference for {func_name}()")
|
|
1409
|
+
|
|
1410
|
+
api_section = await self._generate_api_section_for_function(
|
|
1411
|
+
func_info, tier
|
|
1412
|
+
)
|
|
1413
|
+
api_sections.append(api_section)
|
|
1414
|
+
|
|
1415
|
+
# Append API reference section to narrative doc
|
|
1416
|
+
full_doc = narrative_doc
|
|
1417
|
+
full_doc += "\n\n---\n\n"
|
|
1418
|
+
full_doc += "## API Reference\n\n"
|
|
1419
|
+
full_doc += "Complete structured reference for all public functions:\n\n"
|
|
1420
|
+
full_doc += "\n\n".join(api_sections)
|
|
1421
|
+
|
|
1422
|
+
logger.info(f"Added {len(api_sections)} API reference sections")
|
|
1423
|
+
|
|
1424
|
+
return full_doc
|
|
1425
|
+
|
|
1426
|
+
|