attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,1205 @@
|
|
|
1
|
+
"""Documentation Orchestrator - Combined Scout + Writer Workflow
|
|
2
|
+
|
|
3
|
+
Combines ManageDocumentationCrew (scout/analyst) with DocumentGenerationWorkflow
|
|
4
|
+
(writer) to provide an end-to-end documentation management solution:
|
|
5
|
+
|
|
6
|
+
1. SCOUT Phase: ManageDocumentationCrew scans for stale docs and gaps
|
|
7
|
+
2. PRIORITIZE Phase: Filters and ranks items by severity and impact
|
|
8
|
+
3. GENERATE Phase: DocumentGenerationWorkflow creates/updates documentation
|
|
9
|
+
4. UPDATE Phase: ProjectIndex is updated with new documentation status
|
|
10
|
+
|
|
11
|
+
This orchestrator provides intelligent documentation maintenance:
|
|
12
|
+
- Detects when source code changes make docs stale
|
|
13
|
+
- Identifies undocumented files by priority (LOC, complexity)
|
|
14
|
+
- Generates documentation using cost-optimized 3-stage pipeline
|
|
15
|
+
- Tracks all costs and provides detailed reporting
|
|
16
|
+
|
|
17
|
+
Copyright 2025 Smart-AI-Memory
|
|
18
|
+
Licensed under Fair Source License 0.9
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
import asyncio
|
|
22
|
+
import logging
|
|
23
|
+
from dataclasses import dataclass, field
|
|
24
|
+
from datetime import datetime
|
|
25
|
+
from pathlib import Path
|
|
26
|
+
from typing import Any
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger(__name__)
|
|
29
|
+
|
|
30
|
+
# Import scout workflow
|
|
31
|
+
ManageDocumentationCrew = None
|
|
32
|
+
ManageDocumentationCrewResult = None
|
|
33
|
+
HAS_SCOUT = False
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
from .manage_documentation import ManageDocumentationCrew as _ManageDocumentationCrew
|
|
37
|
+
from .manage_documentation import (
|
|
38
|
+
ManageDocumentationCrewResult as _ManageDocumentationCrewResult,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
ManageDocumentationCrew = _ManageDocumentationCrew
|
|
42
|
+
ManageDocumentationCrewResult = _ManageDocumentationCrewResult
|
|
43
|
+
HAS_SCOUT = True
|
|
44
|
+
except ImportError:
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
# Import writer workflow
|
|
48
|
+
DocumentGenerationWorkflow = None
|
|
49
|
+
HAS_WRITER = False
|
|
50
|
+
|
|
51
|
+
try:
|
|
52
|
+
from .document_gen import DocumentGenerationWorkflow as _DocumentGenerationWorkflow
|
|
53
|
+
|
|
54
|
+
DocumentGenerationWorkflow = _DocumentGenerationWorkflow
|
|
55
|
+
HAS_WRITER = True
|
|
56
|
+
except ImportError:
|
|
57
|
+
pass
|
|
58
|
+
|
|
59
|
+
# Import ProjectIndex for tracking
|
|
60
|
+
ProjectIndex = None
|
|
61
|
+
HAS_PROJECT_INDEX = False
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
from attune.project_index import ProjectIndex as _ProjectIndex
|
|
65
|
+
|
|
66
|
+
ProjectIndex = _ProjectIndex
|
|
67
|
+
HAS_PROJECT_INDEX = True
|
|
68
|
+
except ImportError:
|
|
69
|
+
pass
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@dataclass
|
|
73
|
+
class DocumentationItem:
|
|
74
|
+
"""A single item that needs documentation work."""
|
|
75
|
+
|
|
76
|
+
file_path: str
|
|
77
|
+
issue_type: str # "missing_docstring" | "stale_doc" | "no_documentation"
|
|
78
|
+
severity: str # "high" | "medium" | "low"
|
|
79
|
+
priority: int # 1-5, lower is higher priority
|
|
80
|
+
details: str = ""
|
|
81
|
+
related_source: list[str] = field(default_factory=list)
|
|
82
|
+
days_stale: int = 0
|
|
83
|
+
loc: int = 0
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class OrchestratorResult:
|
|
88
|
+
"""Result from DocumentationOrchestrator execution."""
|
|
89
|
+
|
|
90
|
+
success: bool
|
|
91
|
+
phase: str # "scout" | "prioritize" | "generate" | "complete"
|
|
92
|
+
|
|
93
|
+
# Scout phase results
|
|
94
|
+
items_found: int = 0
|
|
95
|
+
stale_docs: int = 0
|
|
96
|
+
missing_docs: int = 0
|
|
97
|
+
|
|
98
|
+
# Generation phase results
|
|
99
|
+
items_processed: int = 0
|
|
100
|
+
docs_generated: list[str] = field(default_factory=list)
|
|
101
|
+
docs_updated: list[str] = field(default_factory=list)
|
|
102
|
+
docs_skipped: list[str] = field(default_factory=list)
|
|
103
|
+
|
|
104
|
+
# Cost tracking
|
|
105
|
+
scout_cost: float = 0.0
|
|
106
|
+
generation_cost: float = 0.0
|
|
107
|
+
total_cost: float = 0.0
|
|
108
|
+
|
|
109
|
+
# Timing
|
|
110
|
+
duration_ms: int = 0
|
|
111
|
+
|
|
112
|
+
# Details
|
|
113
|
+
errors: list[str] = field(default_factory=list)
|
|
114
|
+
warnings: list[str] = field(default_factory=list)
|
|
115
|
+
summary: str = ""
|
|
116
|
+
|
|
117
|
+
def to_dict(self) -> dict:
|
|
118
|
+
return {
|
|
119
|
+
"success": self.success,
|
|
120
|
+
"phase": self.phase,
|
|
121
|
+
"items_found": self.items_found,
|
|
122
|
+
"stale_docs": self.stale_docs,
|
|
123
|
+
"missing_docs": self.missing_docs,
|
|
124
|
+
"items_processed": self.items_processed,
|
|
125
|
+
"docs_generated": self.docs_generated,
|
|
126
|
+
"docs_updated": self.docs_updated,
|
|
127
|
+
"docs_skipped": self.docs_skipped,
|
|
128
|
+
"scout_cost": self.scout_cost,
|
|
129
|
+
"generation_cost": self.generation_cost,
|
|
130
|
+
"total_cost": self.total_cost,
|
|
131
|
+
"duration_ms": self.duration_ms,
|
|
132
|
+
"errors": self.errors,
|
|
133
|
+
"warnings": self.warnings,
|
|
134
|
+
"summary": self.summary,
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class DocumentationOrchestrator:
|
|
139
|
+
"""End-to-end documentation management orchestrator.
|
|
140
|
+
|
|
141
|
+
Combines the ManageDocumentationCrew (scout) with DocumentGenerationWorkflow
|
|
142
|
+
(writer) to provide intelligent, automated documentation maintenance.
|
|
143
|
+
|
|
144
|
+
Phases:
|
|
145
|
+
1. SCOUT: Analyze codebase for documentation gaps and staleness
|
|
146
|
+
2. PRIORITIZE: Rank items by severity, LOC, and business impact
|
|
147
|
+
3. GENERATE: Create/update documentation for priority items
|
|
148
|
+
4. UPDATE: Update ProjectIndex with new documentation status
|
|
149
|
+
|
|
150
|
+
Usage:
|
|
151
|
+
orchestrator = DocumentationOrchestrator(
|
|
152
|
+
project_root=".",
|
|
153
|
+
max_items=5, # Process top 5 priority items
|
|
154
|
+
max_cost=2.0, # Stop at $2 total cost
|
|
155
|
+
auto_approve=False, # Require approval before generation
|
|
156
|
+
)
|
|
157
|
+
result = await orchestrator.execute()
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
name = "documentation-orchestrator"
|
|
161
|
+
description = "End-to-end documentation management: scout gaps, prioritize, generate docs"
|
|
162
|
+
|
|
163
|
+
# Patterns to exclude from SCANNING - things we don't want to analyze for documentation gaps
|
|
164
|
+
# Note: The ALLOWED_OUTPUT_EXTENSIONS whitelist is the primary safety mechanism for writes
|
|
165
|
+
DEFAULT_EXCLUDE_PATTERNS = [
|
|
166
|
+
# Generated/build directories (would bloat results)
|
|
167
|
+
"site/**",
|
|
168
|
+
"dist/**",
|
|
169
|
+
"build/**",
|
|
170
|
+
"out/**",
|
|
171
|
+
"node_modules/**",
|
|
172
|
+
"__pycache__/**",
|
|
173
|
+
".git/**",
|
|
174
|
+
"*.egg-info/**",
|
|
175
|
+
# Framework internal/working directories
|
|
176
|
+
".attune/**",
|
|
177
|
+
".empathy_index/**",
|
|
178
|
+
".claude/**",
|
|
179
|
+
# Book/large doc source folders
|
|
180
|
+
"book/**",
|
|
181
|
+
"docs/book/**",
|
|
182
|
+
"docs/generated/**",
|
|
183
|
+
"docs/word/**",
|
|
184
|
+
"docs/pdf/**",
|
|
185
|
+
# Dependency/config files (not source code - don't need documentation)
|
|
186
|
+
"requirements*.txt",
|
|
187
|
+
"package.json",
|
|
188
|
+
"package-lock.json",
|
|
189
|
+
"yarn.lock",
|
|
190
|
+
"Pipfile",
|
|
191
|
+
"Pipfile.lock",
|
|
192
|
+
"poetry.lock",
|
|
193
|
+
"pyproject.toml",
|
|
194
|
+
"setup.py",
|
|
195
|
+
"setup.cfg",
|
|
196
|
+
"*.toml",
|
|
197
|
+
"*.cfg",
|
|
198
|
+
"*.ini",
|
|
199
|
+
"*.env",
|
|
200
|
+
".env*",
|
|
201
|
+
"Makefile",
|
|
202
|
+
"Dockerfile",
|
|
203
|
+
"docker-compose*.yml",
|
|
204
|
+
"*.yaml",
|
|
205
|
+
"*.yml",
|
|
206
|
+
# Binary files (cannot be documented as code)
|
|
207
|
+
"*.png",
|
|
208
|
+
"*.jpg",
|
|
209
|
+
"*.jpeg",
|
|
210
|
+
"*.gif",
|
|
211
|
+
"*.ico",
|
|
212
|
+
"*.svg",
|
|
213
|
+
"*.pdf",
|
|
214
|
+
"*.woff",
|
|
215
|
+
"*.woff2",
|
|
216
|
+
"*.ttf",
|
|
217
|
+
"*.eot",
|
|
218
|
+
"*.pyc",
|
|
219
|
+
"*.pyo",
|
|
220
|
+
"*.so",
|
|
221
|
+
"*.dll",
|
|
222
|
+
"*.exe",
|
|
223
|
+
"*.zip",
|
|
224
|
+
"*.tar",
|
|
225
|
+
"*.gz",
|
|
226
|
+
"*.vsix",
|
|
227
|
+
"*.docx",
|
|
228
|
+
"*.doc",
|
|
229
|
+
]
|
|
230
|
+
|
|
231
|
+
# ALLOWED file extensions for OUTPUT - documentation can ONLY create/modify these types
|
|
232
|
+
# This is the PRIMARY safety mechanism - even if scanning includes wrong files,
|
|
233
|
+
# only markdown documentation files can ever be written
|
|
234
|
+
ALLOWED_OUTPUT_EXTENSIONS = [
|
|
235
|
+
".md", # Markdown documentation
|
|
236
|
+
".mdx", # MDX (Markdown with JSX)
|
|
237
|
+
".rst", # reStructuredText
|
|
238
|
+
]
|
|
239
|
+
|
|
240
|
+
def __init__(
|
|
241
|
+
self,
|
|
242
|
+
project_root: str = ".",
|
|
243
|
+
max_items: int = 5,
|
|
244
|
+
max_cost: float = 5.0,
|
|
245
|
+
auto_approve: bool = False,
|
|
246
|
+
export_path: str | Path | None = None,
|
|
247
|
+
include_stale: bool = True,
|
|
248
|
+
include_missing: bool = True,
|
|
249
|
+
min_severity: str = "low", # "high" | "medium" | "low"
|
|
250
|
+
doc_type: str = "api_reference",
|
|
251
|
+
audience: str = "developers",
|
|
252
|
+
dry_run: bool = False,
|
|
253
|
+
exclude_patterns: list[str] | None = None,
|
|
254
|
+
**kwargs: Any,
|
|
255
|
+
):
|
|
256
|
+
"""Initialize the orchestrator.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
project_root: Root directory of the project
|
|
260
|
+
max_items: Maximum number of items to process (default 5)
|
|
261
|
+
max_cost: Maximum total cost in USD (default $5)
|
|
262
|
+
auto_approve: If True, generate docs without confirmation
|
|
263
|
+
export_path: Directory to export generated docs
|
|
264
|
+
include_stale: Include stale docs in processing
|
|
265
|
+
include_missing: Include missing docs in processing
|
|
266
|
+
min_severity: Minimum severity to include ("high", "medium", "low")
|
|
267
|
+
doc_type: Type of documentation to generate
|
|
268
|
+
audience: Target audience for documentation
|
|
269
|
+
dry_run: If True, scout only without generating
|
|
270
|
+
exclude_patterns: Additional patterns to exclude (merged with defaults)
|
|
271
|
+
|
|
272
|
+
"""
|
|
273
|
+
self.project_root = Path(project_root)
|
|
274
|
+
self.max_items = max_items
|
|
275
|
+
self.max_cost = max_cost
|
|
276
|
+
self.auto_approve = auto_approve
|
|
277
|
+
|
|
278
|
+
# Merge default exclusions with any custom patterns
|
|
279
|
+
self.exclude_patterns = list(self.DEFAULT_EXCLUDE_PATTERNS)
|
|
280
|
+
if exclude_patterns:
|
|
281
|
+
self.exclude_patterns.extend(exclude_patterns)
|
|
282
|
+
self.export_path = (
|
|
283
|
+
Path(export_path) if export_path else self.project_root / "docs" / "generated"
|
|
284
|
+
)
|
|
285
|
+
self.include_stale = include_stale
|
|
286
|
+
self.include_missing = include_missing
|
|
287
|
+
self.min_severity = min_severity
|
|
288
|
+
self.doc_type = doc_type
|
|
289
|
+
self.audience = audience
|
|
290
|
+
self.dry_run = dry_run
|
|
291
|
+
self.config = kwargs
|
|
292
|
+
self._quiet = False # Set to True for JSON output mode
|
|
293
|
+
|
|
294
|
+
# Initialize components
|
|
295
|
+
self._scout: Any = None
|
|
296
|
+
self._writer: Any = None
|
|
297
|
+
self._project_index: Any = None
|
|
298
|
+
|
|
299
|
+
self._total_cost = 0.0
|
|
300
|
+
self._items: list[DocumentationItem] = []
|
|
301
|
+
self._excluded_files: list[dict] = [] # Track files excluded by patterns
|
|
302
|
+
|
|
303
|
+
# Initialize scout if available
|
|
304
|
+
if HAS_SCOUT and ManageDocumentationCrew is not None:
|
|
305
|
+
self._scout = ManageDocumentationCrew(project_root=str(self.project_root))
|
|
306
|
+
|
|
307
|
+
# Initialize writer if available
|
|
308
|
+
if HAS_WRITER and DocumentGenerationWorkflow is not None:
|
|
309
|
+
self._writer = DocumentGenerationWorkflow(
|
|
310
|
+
export_path=str(self.export_path),
|
|
311
|
+
max_cost=max_cost / 2, # Reserve half budget for generation
|
|
312
|
+
graceful_degradation=True,
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
# Initialize project index if available
|
|
316
|
+
if HAS_PROJECT_INDEX and ProjectIndex is not None:
|
|
317
|
+
try:
|
|
318
|
+
self._project_index = ProjectIndex(str(self.project_root))
|
|
319
|
+
if not self._project_index.load():
|
|
320
|
+
self._project_index.refresh()
|
|
321
|
+
except Exception as e:
|
|
322
|
+
logger.warning(f"Could not initialize ProjectIndex: {e}")
|
|
323
|
+
|
|
324
|
+
def describe(self) -> str:
|
|
325
|
+
"""Get a human-readable description of the workflow."""
|
|
326
|
+
lines = [
|
|
327
|
+
f"Workflow: {self.name}",
|
|
328
|
+
f"Description: {self.description}",
|
|
329
|
+
"",
|
|
330
|
+
"Phases:",
|
|
331
|
+
" 1. SCOUT - Analyze codebase for documentation gaps and staleness",
|
|
332
|
+
" 2. PRIORITIZE - Rank items by severity, LOC, and business impact",
|
|
333
|
+
" 3. GENERATE - Create/update documentation for priority items",
|
|
334
|
+
" 4. UPDATE - Update ProjectIndex with new documentation status",
|
|
335
|
+
"",
|
|
336
|
+
"Configuration:",
|
|
337
|
+
f" max_items: {self.max_items}",
|
|
338
|
+
f" max_cost: ${self.max_cost:.2f}",
|
|
339
|
+
f" auto_approve: {self.auto_approve}",
|
|
340
|
+
f" dry_run: {self.dry_run}",
|
|
341
|
+
f" include_stale: {self.include_stale}",
|
|
342
|
+
f" include_missing: {self.include_missing}",
|
|
343
|
+
"",
|
|
344
|
+
"Components:",
|
|
345
|
+
f" Scout (ManageDocumentationCrew): {'Available' if self._scout else 'Not available'}",
|
|
346
|
+
f" Writer (DocumentGenerationWorkflow): {'Available' if self._writer else 'Not available'}",
|
|
347
|
+
f" ProjectIndex: {'Available' if self._project_index else 'Not available'}",
|
|
348
|
+
]
|
|
349
|
+
return "\n".join(lines)
|
|
350
|
+
|
|
351
|
+
def _severity_to_priority(self, severity: str) -> int:
|
|
352
|
+
"""Convert severity string to numeric priority (1=highest)."""
|
|
353
|
+
return {"high": 1, "medium": 2, "low": 3}.get(severity.lower(), 3)
|
|
354
|
+
|
|
355
|
+
def _should_include_severity(self, severity: str) -> bool:
|
|
356
|
+
"""Check if severity meets minimum threshold."""
|
|
357
|
+
severity_order = {"high": 1, "medium": 2, "low": 3}
|
|
358
|
+
item_level = severity_order.get(severity.lower(), 3)
|
|
359
|
+
min_level = severity_order.get(self.min_severity.lower(), 3)
|
|
360
|
+
return item_level <= min_level
|
|
361
|
+
|
|
362
|
+
def _should_exclude(self, file_path: str, track: bool = False) -> bool:
|
|
363
|
+
"""Check if a file should be excluded from documentation generation.
|
|
364
|
+
|
|
365
|
+
Uses fnmatch-style pattern matching against exclude_patterns.
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
file_path: Path to check (relative or absolute)
|
|
369
|
+
track: If True, add to _excluded_files list when excluded
|
|
370
|
+
|
|
371
|
+
Returns:
|
|
372
|
+
True if file should be excluded
|
|
373
|
+
|
|
374
|
+
"""
|
|
375
|
+
import fnmatch
|
|
376
|
+
|
|
377
|
+
# Normalize path for matching
|
|
378
|
+
path_str = str(file_path)
|
|
379
|
+
# Also check just the filename for simple patterns
|
|
380
|
+
filename = Path(file_path).name
|
|
381
|
+
|
|
382
|
+
for pattern in self.exclude_patterns:
|
|
383
|
+
# Check full path
|
|
384
|
+
if fnmatch.fnmatch(path_str, pattern):
|
|
385
|
+
if track:
|
|
386
|
+
self._excluded_files.append(
|
|
387
|
+
{
|
|
388
|
+
"file_path": path_str,
|
|
389
|
+
"matched_pattern": pattern,
|
|
390
|
+
"reason": self._get_exclusion_reason(pattern),
|
|
391
|
+
},
|
|
392
|
+
)
|
|
393
|
+
return True
|
|
394
|
+
# Check just filename
|
|
395
|
+
if fnmatch.fnmatch(filename, pattern):
|
|
396
|
+
if track:
|
|
397
|
+
self._excluded_files.append(
|
|
398
|
+
{
|
|
399
|
+
"file_path": path_str,
|
|
400
|
+
"matched_pattern": pattern,
|
|
401
|
+
"reason": self._get_exclusion_reason(pattern),
|
|
402
|
+
},
|
|
403
|
+
)
|
|
404
|
+
return True
|
|
405
|
+
# Check if path contains the pattern (for directory patterns)
|
|
406
|
+
if "**" in pattern:
|
|
407
|
+
# Convert ** pattern to a simpler check
|
|
408
|
+
base_pattern = pattern.replace("/**", "").replace("**", "")
|
|
409
|
+
if base_pattern in path_str:
|
|
410
|
+
if track:
|
|
411
|
+
self._excluded_files.append(
|
|
412
|
+
{
|
|
413
|
+
"file_path": path_str,
|
|
414
|
+
"matched_pattern": pattern,
|
|
415
|
+
"reason": self._get_exclusion_reason(pattern),
|
|
416
|
+
},
|
|
417
|
+
)
|
|
418
|
+
return True
|
|
419
|
+
|
|
420
|
+
return False
|
|
421
|
+
|
|
422
|
+
def _get_exclusion_reason(self, pattern: str) -> str:
|
|
423
|
+
"""Get a human-readable reason for why a pattern excludes a file."""
|
|
424
|
+
# Generated directories
|
|
425
|
+
if any(
|
|
426
|
+
p in pattern
|
|
427
|
+
for p in [
|
|
428
|
+
"site/**",
|
|
429
|
+
"dist/**",
|
|
430
|
+
"build/**",
|
|
431
|
+
"out/**",
|
|
432
|
+
"node_modules/**",
|
|
433
|
+
"__pycache__/**",
|
|
434
|
+
".git/**",
|
|
435
|
+
"egg-info",
|
|
436
|
+
]
|
|
437
|
+
):
|
|
438
|
+
return "Generated/build directory"
|
|
439
|
+
# Binary files
|
|
440
|
+
if any(
|
|
441
|
+
p in pattern
|
|
442
|
+
for p in [
|
|
443
|
+
".png",
|
|
444
|
+
".jpg",
|
|
445
|
+
".jpeg",
|
|
446
|
+
".gif",
|
|
447
|
+
".ico",
|
|
448
|
+
".svg",
|
|
449
|
+
".pdf",
|
|
450
|
+
".woff",
|
|
451
|
+
".ttf",
|
|
452
|
+
".pyc",
|
|
453
|
+
".so",
|
|
454
|
+
".dll",
|
|
455
|
+
".exe",
|
|
456
|
+
".zip",
|
|
457
|
+
".tar",
|
|
458
|
+
".gz",
|
|
459
|
+
".vsix",
|
|
460
|
+
]
|
|
461
|
+
):
|
|
462
|
+
return "Binary/asset file"
|
|
463
|
+
# Empathy internal
|
|
464
|
+
if any(p in pattern for p in [".attune/**", ".claude/**", ".empathy_index/**"]):
|
|
465
|
+
return "Framework internal file"
|
|
466
|
+
# Book/docs
|
|
467
|
+
if any(
|
|
468
|
+
p in pattern
|
|
469
|
+
for p in [
|
|
470
|
+
"book/**",
|
|
471
|
+
"docs/generated/**",
|
|
472
|
+
"docs/word/**",
|
|
473
|
+
"docs/pdf/**",
|
|
474
|
+
".docx",
|
|
475
|
+
".doc",
|
|
476
|
+
]
|
|
477
|
+
):
|
|
478
|
+
return "Book/document source"
|
|
479
|
+
return "Excluded by pattern"
|
|
480
|
+
|
|
481
|
+
def _is_allowed_output(self, file_path: str) -> bool:
|
|
482
|
+
"""Check if a file is allowed to be created/modified.
|
|
483
|
+
|
|
484
|
+
Uses the ALLOWED_OUTPUT_EXTENSIONS whitelist - this is the PRIMARY
|
|
485
|
+
safety mechanism to ensure only documentation files can be written.
|
|
486
|
+
|
|
487
|
+
Args:
|
|
488
|
+
file_path: Path to check
|
|
489
|
+
|
|
490
|
+
Returns:
|
|
491
|
+
True if the file extension is in the allowed whitelist
|
|
492
|
+
|
|
493
|
+
"""
|
|
494
|
+
ext = Path(file_path).suffix.lower()
|
|
495
|
+
return ext in self.ALLOWED_OUTPUT_EXTENSIONS
|
|
496
|
+
|
|
497
|
+
async def _run_scout_phase(self) -> tuple[list[DocumentationItem], float]:
|
|
498
|
+
"""Run the scout phase to identify documentation gaps.
|
|
499
|
+
|
|
500
|
+
Returns:
|
|
501
|
+
Tuple of (items found, cost)
|
|
502
|
+
|
|
503
|
+
"""
|
|
504
|
+
items: list[DocumentationItem] = []
|
|
505
|
+
cost = 0.0
|
|
506
|
+
|
|
507
|
+
if self._scout is None:
|
|
508
|
+
logger.warning("Scout (ManageDocumentationCrew) not available")
|
|
509
|
+
# Fall back to ProjectIndex if available
|
|
510
|
+
if self._project_index is not None:
|
|
511
|
+
items = self._items_from_index()
|
|
512
|
+
return items, cost
|
|
513
|
+
|
|
514
|
+
logger.info("Starting scout phase...")
|
|
515
|
+
print("\n[SCOUT PHASE] Analyzing codebase for documentation gaps...")
|
|
516
|
+
|
|
517
|
+
result = await self._scout.execute(path=str(self.project_root))
|
|
518
|
+
cost = result.cost
|
|
519
|
+
|
|
520
|
+
if not result.success:
|
|
521
|
+
logger.error("Scout phase failed")
|
|
522
|
+
return items, cost
|
|
523
|
+
|
|
524
|
+
# Parse scout findings into DocumentationItems
|
|
525
|
+
items = self._parse_scout_findings(result)
|
|
526
|
+
|
|
527
|
+
# Supplement with ProjectIndex data if available
|
|
528
|
+
if self._project_index is not None:
|
|
529
|
+
index_items = self._items_from_index()
|
|
530
|
+
# Merge, preferring scout items but adding unique index items
|
|
531
|
+
existing_paths = {item.file_path for item in items}
|
|
532
|
+
for idx_item in index_items:
|
|
533
|
+
if idx_item.file_path not in existing_paths:
|
|
534
|
+
items.append(idx_item)
|
|
535
|
+
|
|
536
|
+
logger.info(f"Scout phase found {len(items)} items (cost: ${cost:.4f})")
|
|
537
|
+
return items, cost
|
|
538
|
+
|
|
539
|
+
def _items_from_index(self) -> list[DocumentationItem]:
|
|
540
|
+
"""Extract documentation items from ProjectIndex."""
|
|
541
|
+
items: list[DocumentationItem] = []
|
|
542
|
+
|
|
543
|
+
if self._project_index is None:
|
|
544
|
+
return items
|
|
545
|
+
|
|
546
|
+
try:
|
|
547
|
+
context = self._project_index.get_context_for_workflow("documentation")
|
|
548
|
+
|
|
549
|
+
# Get files without docstrings
|
|
550
|
+
if self.include_missing:
|
|
551
|
+
files_without_docs = context.get("files_without_docstrings", [])
|
|
552
|
+
for f in files_without_docs[:20]: # Limit
|
|
553
|
+
file_path = f.get("path", "")
|
|
554
|
+
if self._should_exclude(file_path, track=True):
|
|
555
|
+
continue
|
|
556
|
+
items.append(
|
|
557
|
+
DocumentationItem(
|
|
558
|
+
file_path=file_path,
|
|
559
|
+
issue_type="missing_docstring",
|
|
560
|
+
severity="medium",
|
|
561
|
+
priority=2,
|
|
562
|
+
details=f"Missing docstring - {f.get('loc', 0)} LOC",
|
|
563
|
+
loc=f.get("loc", 0),
|
|
564
|
+
),
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
# Get stale docs
|
|
568
|
+
if self.include_stale:
|
|
569
|
+
docs_needing_review = context.get("docs_needing_review", [])
|
|
570
|
+
for d in docs_needing_review[:10]:
|
|
571
|
+
if d.get("source_modified_after_doc"):
|
|
572
|
+
file_path = d.get("doc_file", "")
|
|
573
|
+
if self._should_exclude(file_path, track=True):
|
|
574
|
+
continue
|
|
575
|
+
items.append(
|
|
576
|
+
DocumentationItem(
|
|
577
|
+
file_path=file_path,
|
|
578
|
+
issue_type="stale_doc",
|
|
579
|
+
severity="high",
|
|
580
|
+
priority=1,
|
|
581
|
+
details="Source modified after doc update",
|
|
582
|
+
related_source=d.get("related_source_files", [])[:3],
|
|
583
|
+
days_stale=d.get("days_since_doc_update", 0),
|
|
584
|
+
),
|
|
585
|
+
)
|
|
586
|
+
except Exception as e:
|
|
587
|
+
logger.warning(f"Error extracting items from index: {e}")
|
|
588
|
+
|
|
589
|
+
return items
|
|
590
|
+
|
|
591
|
+
def _parse_scout_findings(self, result: Any) -> list[DocumentationItem]:
|
|
592
|
+
"""Parse scout result into DocumentationItems."""
|
|
593
|
+
items: list[DocumentationItem] = []
|
|
594
|
+
|
|
595
|
+
# Scout returns findings as list of dicts with agent responses
|
|
596
|
+
for finding in result.findings:
|
|
597
|
+
response = finding.get("response", "")
|
|
598
|
+
agent = finding.get("agent", "")
|
|
599
|
+
|
|
600
|
+
# Try to extract structured data from analyst response
|
|
601
|
+
if "Analyst" in agent:
|
|
602
|
+
# Parse mock or real findings
|
|
603
|
+
# Look for JSON-like structures in the response
|
|
604
|
+
import re
|
|
605
|
+
|
|
606
|
+
# Find file paths mentioned
|
|
607
|
+
file_pattern = r'"file_path":\s*"([^"]+)"'
|
|
608
|
+
issue_pattern = r'"issue_type":\s*"([^"]+)"'
|
|
609
|
+
severity_pattern = r'"severity":\s*"([^"]+)"'
|
|
610
|
+
|
|
611
|
+
file_matches = re.findall(file_pattern, response)
|
|
612
|
+
issue_matches = re.findall(issue_pattern, response)
|
|
613
|
+
severity_matches = re.findall(severity_pattern, response)
|
|
614
|
+
|
|
615
|
+
for i, file_path in enumerate(file_matches):
|
|
616
|
+
issue_type = issue_matches[i] if i < len(issue_matches) else "unknown"
|
|
617
|
+
severity = severity_matches[i] if i < len(severity_matches) else "medium"
|
|
618
|
+
|
|
619
|
+
# Filter by settings
|
|
620
|
+
if issue_type == "stale_doc" and not self.include_stale:
|
|
621
|
+
continue
|
|
622
|
+
if (
|
|
623
|
+
issue_type in ("missing_docstring", "no_documentation")
|
|
624
|
+
and not self.include_missing
|
|
625
|
+
):
|
|
626
|
+
continue
|
|
627
|
+
if not self._should_include_severity(severity):
|
|
628
|
+
continue
|
|
629
|
+
# Skip excluded files (requirements.txt, package.json, etc.)
|
|
630
|
+
if self._should_exclude(file_path):
|
|
631
|
+
continue
|
|
632
|
+
|
|
633
|
+
items.append(
|
|
634
|
+
DocumentationItem(
|
|
635
|
+
file_path=file_path,
|
|
636
|
+
issue_type=issue_type,
|
|
637
|
+
severity=severity,
|
|
638
|
+
priority=self._severity_to_priority(severity),
|
|
639
|
+
details=f"Found by {agent}",
|
|
640
|
+
),
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
return items
|
|
644
|
+
|
|
645
|
+
def _prioritize_items(self, items: list[DocumentationItem]) -> list[DocumentationItem]:
|
|
646
|
+
"""Prioritize items for generation.
|
|
647
|
+
|
|
648
|
+
Priority order:
|
|
649
|
+
1. Stale docs (source changed) - highest urgency
|
|
650
|
+
2. High-severity missing docs
|
|
651
|
+
3. Files with most LOC
|
|
652
|
+
4. Medium/low severity
|
|
653
|
+
"""
|
|
654
|
+
# Sort by: priority (asc), days_stale (desc), loc (desc)
|
|
655
|
+
sorted_items = sorted(
|
|
656
|
+
items,
|
|
657
|
+
key=lambda x: (
|
|
658
|
+
x.priority,
|
|
659
|
+
-x.days_stale,
|
|
660
|
+
-x.loc,
|
|
661
|
+
),
|
|
662
|
+
)
|
|
663
|
+
|
|
664
|
+
return sorted_items[: self.max_items]
|
|
665
|
+
|
|
666
|
+
async def _run_generate_phase(
|
|
667
|
+
self,
|
|
668
|
+
items: list[DocumentationItem],
|
|
669
|
+
) -> tuple[list[str], list[str], list[str], float]:
|
|
670
|
+
"""Run the generation phase for prioritized items.
|
|
671
|
+
|
|
672
|
+
Returns:
|
|
673
|
+
Tuple of (generated, updated, skipped, cost)
|
|
674
|
+
|
|
675
|
+
"""
|
|
676
|
+
generated: list[str] = []
|
|
677
|
+
updated: list[str] = []
|
|
678
|
+
skipped: list[str] = []
|
|
679
|
+
cost = 0.0
|
|
680
|
+
|
|
681
|
+
if self._writer is None:
|
|
682
|
+
logger.warning("Writer (DocumentGenerationWorkflow) not available")
|
|
683
|
+
return generated, updated, [item.file_path for item in items], cost
|
|
684
|
+
|
|
685
|
+
logger.info(f"Starting generation phase for {len(items)} items...")
|
|
686
|
+
print(f"\n[GENERATE PHASE] Processing {len(items)} documentation items...")
|
|
687
|
+
|
|
688
|
+
for i, item in enumerate(items):
|
|
689
|
+
# Check cost limit
|
|
690
|
+
if self._total_cost + cost >= self.max_cost:
|
|
691
|
+
remaining = items[i:]
|
|
692
|
+
skipped.extend([r.file_path for r in remaining])
|
|
693
|
+
logger.warning(f"Cost limit reached. Skipping {len(remaining)} items.")
|
|
694
|
+
print(f" [!] Cost limit ${self.max_cost:.2f} reached. Skipping remaining items.")
|
|
695
|
+
break
|
|
696
|
+
|
|
697
|
+
print(f" [{i + 1}/{len(items)}] {item.issue_type}: {item.file_path}")
|
|
698
|
+
|
|
699
|
+
try:
|
|
700
|
+
# Read source file content
|
|
701
|
+
source_path = self.project_root / item.file_path
|
|
702
|
+
source_content = ""
|
|
703
|
+
|
|
704
|
+
if source_path.exists():
|
|
705
|
+
try:
|
|
706
|
+
source_content = source_path.read_text(encoding="utf-8")
|
|
707
|
+
except Exception as e:
|
|
708
|
+
logger.warning(f"Could not read {source_path}: {e}")
|
|
709
|
+
|
|
710
|
+
# Run documentation generation
|
|
711
|
+
result = await self._writer.execute(
|
|
712
|
+
source_code=source_content,
|
|
713
|
+
target=item.file_path,
|
|
714
|
+
doc_type=self.doc_type,
|
|
715
|
+
audience=self.audience,
|
|
716
|
+
)
|
|
717
|
+
|
|
718
|
+
# Track cost from result
|
|
719
|
+
if isinstance(result, dict):
|
|
720
|
+
step_cost = result.get("accumulated_cost", 0.0)
|
|
721
|
+
cost += step_cost
|
|
722
|
+
|
|
723
|
+
# Categorize result
|
|
724
|
+
if item.issue_type == "stale_doc":
|
|
725
|
+
updated.append(item.file_path)
|
|
726
|
+
else:
|
|
727
|
+
generated.append(item.file_path)
|
|
728
|
+
|
|
729
|
+
export_path = result.get("export_path")
|
|
730
|
+
if export_path:
|
|
731
|
+
print(f" -> Saved to: {export_path}")
|
|
732
|
+
else:
|
|
733
|
+
skipped.append(item.file_path)
|
|
734
|
+
|
|
735
|
+
except Exception as e:
|
|
736
|
+
logger.error(f"Error generating docs for {item.file_path}: {e}")
|
|
737
|
+
skipped.append(item.file_path)
|
|
738
|
+
|
|
739
|
+
logger.info(
|
|
740
|
+
f"Generation phase: {len(generated)} generated, {len(updated)} updated, {len(skipped)} skipped",
|
|
741
|
+
)
|
|
742
|
+
return generated, updated, skipped, cost
|
|
743
|
+
|
|
744
|
+
def _update_project_index(self, generated: list[str], updated: list[str]) -> None:
|
|
745
|
+
"""Update ProjectIndex with newly documented files."""
|
|
746
|
+
if self._project_index is None:
|
|
747
|
+
return
|
|
748
|
+
|
|
749
|
+
try:
|
|
750
|
+
# Mark files as documented
|
|
751
|
+
for file_path in generated + updated:
|
|
752
|
+
# Update record if it exists
|
|
753
|
+
record = self._project_index.get_record(file_path)
|
|
754
|
+
if record:
|
|
755
|
+
record.has_docstring = True
|
|
756
|
+
record.last_modified = datetime.now()
|
|
757
|
+
|
|
758
|
+
# Save index
|
|
759
|
+
self._project_index.save()
|
|
760
|
+
logger.info(
|
|
761
|
+
f"ProjectIndex updated with {len(generated) + len(updated)} documented files",
|
|
762
|
+
)
|
|
763
|
+
except Exception as e:
|
|
764
|
+
logger.warning(f"Could not update ProjectIndex: {e}")
|
|
765
|
+
|
|
766
|
+
def _generate_summary(
|
|
767
|
+
self,
|
|
768
|
+
result: OrchestratorResult,
|
|
769
|
+
items: list[DocumentationItem],
|
|
770
|
+
) -> str:
|
|
771
|
+
"""Generate human-readable summary."""
|
|
772
|
+
lines = [
|
|
773
|
+
"=" * 60,
|
|
774
|
+
"DOCUMENTATION ORCHESTRATOR REPORT",
|
|
775
|
+
"=" * 60,
|
|
776
|
+
"",
|
|
777
|
+
f"Project: {self.project_root}",
|
|
778
|
+
f"Status: {'SUCCESS' if result.success else 'PARTIAL'}",
|
|
779
|
+
"",
|
|
780
|
+
"-" * 60,
|
|
781
|
+
"SCOUT PHASE",
|
|
782
|
+
"-" * 60,
|
|
783
|
+
f" Items found: {result.items_found}",
|
|
784
|
+
f" Stale docs: {result.stale_docs}",
|
|
785
|
+
f" Missing docs: {result.missing_docs}",
|
|
786
|
+
f" Cost: ${result.scout_cost:.4f}",
|
|
787
|
+
"",
|
|
788
|
+
]
|
|
789
|
+
|
|
790
|
+
if items:
|
|
791
|
+
lines.extend(
|
|
792
|
+
[
|
|
793
|
+
"Priority Items:",
|
|
794
|
+
],
|
|
795
|
+
)
|
|
796
|
+
for i, item in enumerate(items[:10]):
|
|
797
|
+
lines.append(f" {i + 1}. [{item.severity.upper()}] {item.file_path}")
|
|
798
|
+
lines.append(f" Type: {item.issue_type}")
|
|
799
|
+
if item.days_stale:
|
|
800
|
+
lines.append(f" Days stale: {item.days_stale}")
|
|
801
|
+
lines.append("")
|
|
802
|
+
|
|
803
|
+
if not self.dry_run:
|
|
804
|
+
lines.extend(
|
|
805
|
+
[
|
|
806
|
+
"-" * 60,
|
|
807
|
+
"GENERATION PHASE",
|
|
808
|
+
"-" * 60,
|
|
809
|
+
f" Items processed: {result.items_processed}",
|
|
810
|
+
f" Docs generated: {len(result.docs_generated)}",
|
|
811
|
+
f" Docs updated: {len(result.docs_updated)}",
|
|
812
|
+
f" Skipped: {len(result.docs_skipped)}",
|
|
813
|
+
f" Cost: ${result.generation_cost:.4f}",
|
|
814
|
+
"",
|
|
815
|
+
],
|
|
816
|
+
)
|
|
817
|
+
|
|
818
|
+
if result.docs_generated:
|
|
819
|
+
lines.append("Generated:")
|
|
820
|
+
for doc in result.docs_generated[:5]:
|
|
821
|
+
lines.append(f" + {doc}")
|
|
822
|
+
if len(result.docs_generated) > 5:
|
|
823
|
+
lines.append(f" ... and {len(result.docs_generated) - 5} more")
|
|
824
|
+
lines.append("")
|
|
825
|
+
|
|
826
|
+
if result.docs_updated:
|
|
827
|
+
lines.append("Updated:")
|
|
828
|
+
for doc in result.docs_updated[:5]:
|
|
829
|
+
lines.append(f" ~ {doc}")
|
|
830
|
+
lines.append("")
|
|
831
|
+
|
|
832
|
+
if result.errors:
|
|
833
|
+
lines.extend(
|
|
834
|
+
[
|
|
835
|
+
"-" * 60,
|
|
836
|
+
"ERRORS",
|
|
837
|
+
"-" * 60,
|
|
838
|
+
],
|
|
839
|
+
)
|
|
840
|
+
for error in result.errors:
|
|
841
|
+
lines.append(f" ! {error}")
|
|
842
|
+
lines.append("")
|
|
843
|
+
|
|
844
|
+
if result.warnings:
|
|
845
|
+
lines.extend(
|
|
846
|
+
[
|
|
847
|
+
"-" * 60,
|
|
848
|
+
"WARNINGS",
|
|
849
|
+
"-" * 60,
|
|
850
|
+
],
|
|
851
|
+
)
|
|
852
|
+
for warning in result.warnings:
|
|
853
|
+
lines.append(f" * {warning}")
|
|
854
|
+
lines.append("")
|
|
855
|
+
|
|
856
|
+
lines.extend(
|
|
857
|
+
[
|
|
858
|
+
"-" * 60,
|
|
859
|
+
"TOTALS",
|
|
860
|
+
"-" * 60,
|
|
861
|
+
f" Total cost: ${result.total_cost:.4f}",
|
|
862
|
+
f" Duration: {result.duration_ms}ms",
|
|
863
|
+
f" Export path: {self.export_path}",
|
|
864
|
+
"",
|
|
865
|
+
"=" * 60,
|
|
866
|
+
],
|
|
867
|
+
)
|
|
868
|
+
|
|
869
|
+
return "\n".join(lines)
|
|
870
|
+
|
|
871
|
+
async def execute(
|
|
872
|
+
self,
|
|
873
|
+
context: dict | None = None,
|
|
874
|
+
**kwargs: Any,
|
|
875
|
+
) -> OrchestratorResult:
|
|
876
|
+
"""Execute the full documentation orchestration pipeline.
|
|
877
|
+
|
|
878
|
+
Args:
|
|
879
|
+
context: Additional context for the workflows
|
|
880
|
+
**kwargs: Additional arguments
|
|
881
|
+
|
|
882
|
+
Returns:
|
|
883
|
+
OrchestratorResult with full details
|
|
884
|
+
|
|
885
|
+
"""
|
|
886
|
+
started_at = datetime.now()
|
|
887
|
+
result = OrchestratorResult(success=False, phase="scout")
|
|
888
|
+
errors: list[str] = []
|
|
889
|
+
warnings: list[str] = []
|
|
890
|
+
|
|
891
|
+
# Validate dependencies
|
|
892
|
+
if not HAS_SCOUT:
|
|
893
|
+
warnings.append("ManageDocumentationCrew not available - using ProjectIndex fallback")
|
|
894
|
+
if not HAS_WRITER:
|
|
895
|
+
errors.append("DocumentGenerationWorkflow not available - cannot generate docs")
|
|
896
|
+
if not self.dry_run:
|
|
897
|
+
result.errors = errors
|
|
898
|
+
result.warnings = warnings
|
|
899
|
+
return result
|
|
900
|
+
if not HAS_PROJECT_INDEX:
|
|
901
|
+
warnings.append("ProjectIndex not available - limited file tracking")
|
|
902
|
+
|
|
903
|
+
# Phase 1: Scout
|
|
904
|
+
print("\n" + "=" * 60)
|
|
905
|
+
print("DOCUMENTATION ORCHESTRATOR")
|
|
906
|
+
print("=" * 60)
|
|
907
|
+
|
|
908
|
+
items, scout_cost = await self._run_scout_phase()
|
|
909
|
+
self._total_cost += scout_cost
|
|
910
|
+
|
|
911
|
+
result.items_found = len(items)
|
|
912
|
+
result.stale_docs = sum(1 for i in items if i.issue_type == "stale_doc")
|
|
913
|
+
result.missing_docs = sum(1 for i in items if i.issue_type != "stale_doc")
|
|
914
|
+
result.scout_cost = scout_cost
|
|
915
|
+
result.phase = "prioritize"
|
|
916
|
+
|
|
917
|
+
if not items:
|
|
918
|
+
print("\n[✓] No documentation gaps found!")
|
|
919
|
+
result.success = True
|
|
920
|
+
result.phase = "complete"
|
|
921
|
+
result.duration_ms = int((datetime.now() - started_at).total_seconds() * 1000)
|
|
922
|
+
result.total_cost = self._total_cost
|
|
923
|
+
result.summary = self._generate_summary(result, items)
|
|
924
|
+
return result
|
|
925
|
+
|
|
926
|
+
# Phase 2: Prioritize
|
|
927
|
+
print(f"\n[PRIORITIZE] Found {len(items)} items, selecting top {self.max_items}...")
|
|
928
|
+
priority_items = self._prioritize_items(items)
|
|
929
|
+
self._items = priority_items
|
|
930
|
+
|
|
931
|
+
print("\nTop priority items:")
|
|
932
|
+
for i, item in enumerate(priority_items):
|
|
933
|
+
status = "STALE" if item.issue_type == "stale_doc" else "MISSING"
|
|
934
|
+
print(f" {i + 1}. [{status}] {item.file_path}")
|
|
935
|
+
|
|
936
|
+
# Check for dry run
|
|
937
|
+
if self.dry_run:
|
|
938
|
+
print("\n[DRY RUN] Skipping generation phase")
|
|
939
|
+
result.success = True
|
|
940
|
+
result.phase = "complete"
|
|
941
|
+
result.docs_skipped = [i.file_path for i in priority_items]
|
|
942
|
+
result.duration_ms = int((datetime.now() - started_at).total_seconds() * 1000)
|
|
943
|
+
result.total_cost = self._total_cost
|
|
944
|
+
result.summary = self._generate_summary(result, priority_items)
|
|
945
|
+
return result
|
|
946
|
+
|
|
947
|
+
# Check for approval if not auto_approve
|
|
948
|
+
if not self.auto_approve:
|
|
949
|
+
print(f"\n[!] Ready to generate documentation for {len(priority_items)} items")
|
|
950
|
+
print(f" Estimated max cost: ${self.max_cost:.2f}")
|
|
951
|
+
print("\n Set auto_approve=True to proceed automatically")
|
|
952
|
+
result.success = True
|
|
953
|
+
result.phase = "awaiting_approval"
|
|
954
|
+
result.docs_skipped = [i.file_path for i in priority_items]
|
|
955
|
+
result.warnings = warnings
|
|
956
|
+
result.duration_ms = int((datetime.now() - started_at).total_seconds() * 1000)
|
|
957
|
+
result.total_cost = self._total_cost
|
|
958
|
+
result.summary = self._generate_summary(result, priority_items)
|
|
959
|
+
return result
|
|
960
|
+
|
|
961
|
+
# Phase 3: Generate
|
|
962
|
+
result.phase = "generate"
|
|
963
|
+
generated, updated, skipped, gen_cost = await self._run_generate_phase(priority_items)
|
|
964
|
+
self._total_cost += gen_cost
|
|
965
|
+
|
|
966
|
+
result.docs_generated = generated
|
|
967
|
+
result.docs_updated = updated
|
|
968
|
+
result.docs_skipped = skipped
|
|
969
|
+
result.generation_cost = gen_cost
|
|
970
|
+
result.items_processed = len(generated) + len(updated)
|
|
971
|
+
|
|
972
|
+
# Phase 4: Update index
|
|
973
|
+
result.phase = "update"
|
|
974
|
+
self._update_project_index(generated, updated)
|
|
975
|
+
|
|
976
|
+
# Finalize
|
|
977
|
+
result.success = True
|
|
978
|
+
result.phase = "complete"
|
|
979
|
+
result.total_cost = self._total_cost
|
|
980
|
+
result.errors = errors
|
|
981
|
+
result.warnings = warnings
|
|
982
|
+
result.duration_ms = int((datetime.now() - started_at).total_seconds() * 1000)
|
|
983
|
+
result.summary = self._generate_summary(result, priority_items)
|
|
984
|
+
|
|
985
|
+
print(result.summary)
|
|
986
|
+
|
|
987
|
+
return result
|
|
988
|
+
|
|
989
|
+
async def scout_only(self) -> OrchestratorResult:
|
|
990
|
+
"""Run only the scout phase (equivalent to dry_run=True)."""
|
|
991
|
+
self.dry_run = True
|
|
992
|
+
return await self.execute()
|
|
993
|
+
|
|
994
|
+
async def scout_as_json(self) -> dict:
|
|
995
|
+
"""Run scout phase and return JSON-serializable results.
|
|
996
|
+
|
|
997
|
+
Used by VSCode extension to display results in Documentation Analysis panel.
|
|
998
|
+
|
|
999
|
+
Returns:
|
|
1000
|
+
Dict with stats and items list ready for JSON serialization
|
|
1001
|
+
|
|
1002
|
+
"""
|
|
1003
|
+
import io
|
|
1004
|
+
import sys
|
|
1005
|
+
|
|
1006
|
+
self.dry_run = True
|
|
1007
|
+
# Suppress console output during scout
|
|
1008
|
+
old_stdout = sys.stdout
|
|
1009
|
+
sys.stdout = io.StringIO()
|
|
1010
|
+
try:
|
|
1011
|
+
result = await self.execute()
|
|
1012
|
+
finally:
|
|
1013
|
+
sys.stdout = old_stdout
|
|
1014
|
+
|
|
1015
|
+
return {
|
|
1016
|
+
"success": result.success,
|
|
1017
|
+
"stats": {
|
|
1018
|
+
"items_found": result.items_found,
|
|
1019
|
+
"stale_docs": result.stale_docs,
|
|
1020
|
+
"missing_docs": result.missing_docs,
|
|
1021
|
+
"scout_cost": result.scout_cost,
|
|
1022
|
+
"duration_ms": result.duration_ms,
|
|
1023
|
+
"excluded_count": len(self._excluded_files),
|
|
1024
|
+
},
|
|
1025
|
+
"items": [
|
|
1026
|
+
{
|
|
1027
|
+
"id": f"{item.file_path}:{item.issue_type}",
|
|
1028
|
+
"file_path": item.file_path,
|
|
1029
|
+
"issue_type": item.issue_type,
|
|
1030
|
+
"severity": item.severity,
|
|
1031
|
+
"priority": item.priority,
|
|
1032
|
+
"details": item.details,
|
|
1033
|
+
"days_stale": item.days_stale,
|
|
1034
|
+
"loc": item.loc,
|
|
1035
|
+
"related_source": item.related_source[:3] if item.related_source else [],
|
|
1036
|
+
}
|
|
1037
|
+
for item in self._items
|
|
1038
|
+
],
|
|
1039
|
+
"excluded": self._excluded_files, # Files excluded from scanning
|
|
1040
|
+
}
|
|
1041
|
+
|
|
1042
|
+
async def generate_for_files(
|
|
1043
|
+
self,
|
|
1044
|
+
file_paths: list[str],
|
|
1045
|
+
**kwargs: Any,
|
|
1046
|
+
) -> dict:
|
|
1047
|
+
"""Generate documentation for a list of specific files.
|
|
1048
|
+
|
|
1049
|
+
Bypasses scout phase and generates directly for each file.
|
|
1050
|
+
|
|
1051
|
+
Args:
|
|
1052
|
+
file_paths: List of file paths to document
|
|
1053
|
+
**kwargs: Additional arguments for DocumentGenerationWorkflow
|
|
1054
|
+
|
|
1055
|
+
Returns:
|
|
1056
|
+
Dict with results for each file
|
|
1057
|
+
|
|
1058
|
+
"""
|
|
1059
|
+
generated: list[dict[str, str | float | None]] = []
|
|
1060
|
+
failed: list[dict[str, str]] = []
|
|
1061
|
+
skipped: list[dict[str, str]] = []
|
|
1062
|
+
total_cost = 0.0
|
|
1063
|
+
success = True
|
|
1064
|
+
|
|
1065
|
+
for file_path in file_paths:
|
|
1066
|
+
# Skip excluded files (requirements.txt, package.json, etc.)
|
|
1067
|
+
if self._should_exclude(file_path):
|
|
1068
|
+
skipped.append(
|
|
1069
|
+
{
|
|
1070
|
+
"file": file_path,
|
|
1071
|
+
"reason": "Excluded by pattern (dependency/config/binary file)",
|
|
1072
|
+
},
|
|
1073
|
+
)
|
|
1074
|
+
continue
|
|
1075
|
+
|
|
1076
|
+
try:
|
|
1077
|
+
result = await self.generate_for_file(file_path, **kwargs)
|
|
1078
|
+
if isinstance(result, dict) and result.get("error"):
|
|
1079
|
+
failed.append({"file": file_path, "error": result["error"]})
|
|
1080
|
+
else:
|
|
1081
|
+
export_path = result.get("export_path") if isinstance(result, dict) else None
|
|
1082
|
+
cost = result.get("accumulated_cost", 0) if isinstance(result, dict) else 0
|
|
1083
|
+
generated.append(
|
|
1084
|
+
{
|
|
1085
|
+
"file": file_path,
|
|
1086
|
+
"export_path": export_path,
|
|
1087
|
+
"cost": cost,
|
|
1088
|
+
},
|
|
1089
|
+
)
|
|
1090
|
+
total_cost += cost
|
|
1091
|
+
except Exception as e:
|
|
1092
|
+
failed.append({"file": file_path, "error": str(e)})
|
|
1093
|
+
success = False
|
|
1094
|
+
|
|
1095
|
+
if failed:
|
|
1096
|
+
success = len(generated) > 0 # Partial success
|
|
1097
|
+
|
|
1098
|
+
return {
|
|
1099
|
+
"success": success,
|
|
1100
|
+
"generated": generated,
|
|
1101
|
+
"failed": failed,
|
|
1102
|
+
"skipped": skipped,
|
|
1103
|
+
"total_cost": total_cost,
|
|
1104
|
+
}
|
|
1105
|
+
|
|
1106
|
+
async def generate_for_file(
|
|
1107
|
+
self,
|
|
1108
|
+
file_path: str,
|
|
1109
|
+
**kwargs: Any,
|
|
1110
|
+
) -> dict:
|
|
1111
|
+
"""Generate documentation for a specific file.
|
|
1112
|
+
|
|
1113
|
+
Bypasses scout phase and generates directly.
|
|
1114
|
+
|
|
1115
|
+
Args:
|
|
1116
|
+
file_path: Path to the file to document
|
|
1117
|
+
**kwargs: Additional arguments for DocumentGenerationWorkflow
|
|
1118
|
+
|
|
1119
|
+
Returns:
|
|
1120
|
+
Generation result dict
|
|
1121
|
+
|
|
1122
|
+
"""
|
|
1123
|
+
if self._writer is None:
|
|
1124
|
+
return {"error": "DocumentGenerationWorkflow not available"}
|
|
1125
|
+
|
|
1126
|
+
source_path = self.project_root / file_path
|
|
1127
|
+
source_content = ""
|
|
1128
|
+
|
|
1129
|
+
if source_path.exists():
|
|
1130
|
+
try:
|
|
1131
|
+
source_content = source_path.read_text(encoding="utf-8")
|
|
1132
|
+
except Exception as e:
|
|
1133
|
+
return {"error": f"Could not read file: {e}"}
|
|
1134
|
+
|
|
1135
|
+
result: dict = await self._writer.execute(
|
|
1136
|
+
source_code=source_content,
|
|
1137
|
+
target=file_path,
|
|
1138
|
+
doc_type=kwargs.get("doc_type", self.doc_type),
|
|
1139
|
+
audience=kwargs.get("audience", self.audience),
|
|
1140
|
+
)
|
|
1141
|
+
|
|
1142
|
+
# Update index
|
|
1143
|
+
if isinstance(result, dict) and result.get("document"):
|
|
1144
|
+
self._update_project_index([file_path], [])
|
|
1145
|
+
|
|
1146
|
+
return result
|
|
1147
|
+
|
|
1148
|
+
|
|
1149
|
+
# CLI entry point
|
|
1150
|
+
if __name__ == "__main__":
|
|
1151
|
+
import json
|
|
1152
|
+
import sys
|
|
1153
|
+
|
|
1154
|
+
async def main():
|
|
1155
|
+
path = sys.argv[1] if len(sys.argv) > 1 and not sys.argv[1].startswith("-") else "."
|
|
1156
|
+
dry_run = "--dry-run" in sys.argv
|
|
1157
|
+
auto_approve = "--auto" in sys.argv
|
|
1158
|
+
scout_json = "--scout-json" in sys.argv
|
|
1159
|
+
|
|
1160
|
+
# Parse --generate-files argument
|
|
1161
|
+
generate_files: list[str] | None = None
|
|
1162
|
+
for i, arg in enumerate(sys.argv):
|
|
1163
|
+
if arg == "--generate-files" and i + 1 < len(sys.argv):
|
|
1164
|
+
try:
|
|
1165
|
+
generate_files = json.loads(sys.argv[i + 1])
|
|
1166
|
+
except json.JSONDecodeError:
|
|
1167
|
+
print("Error: --generate-files must be valid JSON array", file=sys.stderr)
|
|
1168
|
+
sys.exit(1)
|
|
1169
|
+
|
|
1170
|
+
orchestrator = DocumentationOrchestrator(
|
|
1171
|
+
project_root=path,
|
|
1172
|
+
max_items=10,
|
|
1173
|
+
max_cost=5.0,
|
|
1174
|
+
dry_run=dry_run,
|
|
1175
|
+
auto_approve=auto_approve,
|
|
1176
|
+
)
|
|
1177
|
+
|
|
1178
|
+
# JSON scout output for VSCode extension
|
|
1179
|
+
if scout_json:
|
|
1180
|
+
result = await orchestrator.scout_as_json()
|
|
1181
|
+
print(json.dumps(result))
|
|
1182
|
+
return
|
|
1183
|
+
|
|
1184
|
+
# Generate specific files
|
|
1185
|
+
if generate_files:
|
|
1186
|
+
result = await orchestrator.generate_for_files(generate_files)
|
|
1187
|
+
print(json.dumps(result))
|
|
1188
|
+
return
|
|
1189
|
+
|
|
1190
|
+
# Normal execution
|
|
1191
|
+
print("\nDocumentationOrchestrator")
|
|
1192
|
+
print(f"Project: {path}")
|
|
1193
|
+
print(f"Mode: {'DRY RUN' if dry_run else 'FULL' if auto_approve else 'SCOUT + AWAIT'}")
|
|
1194
|
+
|
|
1195
|
+
print("\nComponents:")
|
|
1196
|
+
print(f" Scout (ManageDocumentationCrew): {'✓' if orchestrator._scout else '✗'}")
|
|
1197
|
+
print(f" Writer (DocumentGenerationWorkflow): {'✓' if orchestrator._writer else '✗'}")
|
|
1198
|
+
print(f" ProjectIndex: {'✓' if orchestrator._project_index else '✗'}")
|
|
1199
|
+
|
|
1200
|
+
result = await orchestrator.execute()
|
|
1201
|
+
|
|
1202
|
+
if not result.summary:
|
|
1203
|
+
print(f"\nResult: {result.to_dict()}")
|
|
1204
|
+
|
|
1205
|
+
asyncio.run(main())
|