attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,780 @@
|
|
|
1
|
+
"""One-Command Workflows for Empathy Framework
|
|
2
|
+
|
|
3
|
+
Power-user commands that automate common developer workflows:
|
|
4
|
+
- morning: Start-of-day briefing with patterns, debt, and focus areas
|
|
5
|
+
- ship: Pre-commit validation pipeline
|
|
6
|
+
- fix-all: Auto-fix all fixable issues
|
|
7
|
+
- learn: Watch for bug fixes and extract patterns
|
|
8
|
+
|
|
9
|
+
Copyright 2025 Smart-AI-Memory
|
|
10
|
+
Licensed under Fair Source License 0.9
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import json
|
|
14
|
+
import subprocess
|
|
15
|
+
from datetime import datetime, timedelta
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
from attune.config import _validate_file_path
|
|
20
|
+
from attune.logging_config import get_logger
|
|
21
|
+
|
|
22
|
+
logger = get_logger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _load_patterns(patterns_dir: str = "./patterns") -> dict[str, list]:
|
|
26
|
+
"""Load patterns from the patterns directory."""
|
|
27
|
+
patterns: dict[str, list] = {"debugging": [], "security": [], "tech_debt": [], "inspection": []}
|
|
28
|
+
|
|
29
|
+
patterns_path = Path(patterns_dir)
|
|
30
|
+
if not patterns_path.exists():
|
|
31
|
+
return patterns
|
|
32
|
+
|
|
33
|
+
for pattern_type in patterns:
|
|
34
|
+
file_path = patterns_path / f"{pattern_type}.json"
|
|
35
|
+
if file_path.exists():
|
|
36
|
+
try:
|
|
37
|
+
validated_path = _validate_file_path(str(file_path))
|
|
38
|
+
with open(validated_path) as f:
|
|
39
|
+
data = json.load(f)
|
|
40
|
+
patterns[pattern_type] = data.get("patterns", data.get("items", []))
|
|
41
|
+
except (OSError, json.JSONDecodeError, ValueError):
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
return patterns
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _load_stats(empathy_dir: str = ".empathy") -> dict[str, Any]:
|
|
48
|
+
"""Load usage statistics."""
|
|
49
|
+
stats_file = Path(empathy_dir) / "stats.json"
|
|
50
|
+
if stats_file.exists():
|
|
51
|
+
try:
|
|
52
|
+
validated_path = _validate_file_path(str(stats_file))
|
|
53
|
+
with open(validated_path) as f:
|
|
54
|
+
result: dict[str, Any] = json.load(f)
|
|
55
|
+
return result
|
|
56
|
+
except (OSError, json.JSONDecodeError, ValueError):
|
|
57
|
+
pass
|
|
58
|
+
return {"commands": {}, "last_session": None, "patterns_learned": 0}
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _save_stats(stats: dict, empathy_dir: str = ".empathy") -> None:
|
|
62
|
+
"""Save usage statistics."""
|
|
63
|
+
stats_dir = Path(empathy_dir)
|
|
64
|
+
stats_dir.mkdir(parents=True, exist_ok=True)
|
|
65
|
+
|
|
66
|
+
validated_path = _validate_file_path(str(stats_dir / "stats.json"))
|
|
67
|
+
with open(validated_path, "w") as f:
|
|
68
|
+
json.dump(stats, f, indent=2, default=str)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _run_command(cmd: list, capture: bool = True) -> tuple:
|
|
72
|
+
"""Run a shell command and return (success, output)."""
|
|
73
|
+
try:
|
|
74
|
+
result = subprocess.run(cmd, check=False, capture_output=capture, text=True, timeout=300)
|
|
75
|
+
return result.returncode == 0, result.stdout + result.stderr
|
|
76
|
+
except subprocess.TimeoutExpired:
|
|
77
|
+
return False, "Command timed out"
|
|
78
|
+
except FileNotFoundError:
|
|
79
|
+
return False, f"Command not found: {cmd[0]}"
|
|
80
|
+
except Exception as e:
|
|
81
|
+
return False, str(e)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _get_tech_debt_trend(patterns_dir: str = "./patterns") -> str:
|
|
85
|
+
"""Analyze tech debt trajectory."""
|
|
86
|
+
tech_debt_file = Path(patterns_dir) / "tech_debt.json"
|
|
87
|
+
if not tech_debt_file.exists():
|
|
88
|
+
return "unknown"
|
|
89
|
+
|
|
90
|
+
try:
|
|
91
|
+
validated_path = _validate_file_path(str(tech_debt_file))
|
|
92
|
+
with open(validated_path) as f:
|
|
93
|
+
data = json.load(f)
|
|
94
|
+
|
|
95
|
+
snapshots = data.get("snapshots", [])
|
|
96
|
+
if len(snapshots) < 2:
|
|
97
|
+
return "insufficient_data"
|
|
98
|
+
|
|
99
|
+
recent = snapshots[-1].get("total_items", 0)
|
|
100
|
+
previous = snapshots[-2].get("total_items", 0)
|
|
101
|
+
|
|
102
|
+
if recent > previous:
|
|
103
|
+
return "increasing"
|
|
104
|
+
if recent < previous:
|
|
105
|
+
return "decreasing"
|
|
106
|
+
return "stable"
|
|
107
|
+
except (OSError, json.JSONDecodeError, KeyError):
|
|
108
|
+
return "unknown"
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def morning_workflow(
|
|
112
|
+
patterns_dir: str = "./patterns",
|
|
113
|
+
project_root: str = ".",
|
|
114
|
+
verbose: bool = False,
|
|
115
|
+
) -> int:
|
|
116
|
+
"""Start-of-day developer briefing.
|
|
117
|
+
|
|
118
|
+
Shows:
|
|
119
|
+
- Health check summary
|
|
120
|
+
- Patterns learned since last session
|
|
121
|
+
- Tech debt trajectory
|
|
122
|
+
- Suggested focus areas
|
|
123
|
+
|
|
124
|
+
Returns exit code (0 = success).
|
|
125
|
+
"""
|
|
126
|
+
print("\n" + "=" * 60)
|
|
127
|
+
print(" MORNING BRIEFING")
|
|
128
|
+
print(" " + datetime.now().strftime("%A, %B %d, %Y"))
|
|
129
|
+
print("=" * 60 + "\n")
|
|
130
|
+
|
|
131
|
+
# Load stats and patterns
|
|
132
|
+
stats = _load_stats()
|
|
133
|
+
patterns = _load_patterns(patterns_dir)
|
|
134
|
+
|
|
135
|
+
# 1. Patterns summary
|
|
136
|
+
print("PATTERNS LEARNED")
|
|
137
|
+
print("-" * 40)
|
|
138
|
+
|
|
139
|
+
total_bugs = len(patterns.get("debugging", []))
|
|
140
|
+
resolved_bugs = sum(1 for p in patterns.get("debugging", []) if p.get("status") == "resolved")
|
|
141
|
+
security_decisions = len(patterns.get("security", []))
|
|
142
|
+
|
|
143
|
+
print(f" Bug patterns: {total_bugs} ({resolved_bugs} resolved)")
|
|
144
|
+
print(f" Security decisions: {security_decisions}")
|
|
145
|
+
print(f" Inspection patterns: {len(patterns.get('inspection', []))}")
|
|
146
|
+
|
|
147
|
+
# Recent patterns (last 7 days)
|
|
148
|
+
week_ago = datetime.now() - timedelta(days=7)
|
|
149
|
+
recent_bugs = []
|
|
150
|
+
for bug in patterns.get("debugging", []):
|
|
151
|
+
try:
|
|
152
|
+
timestamp = bug.get("timestamp", bug.get("resolved_at", ""))
|
|
153
|
+
if timestamp:
|
|
154
|
+
bug_date = datetime.fromisoformat(timestamp.replace("Z", "+00:00"))
|
|
155
|
+
if bug_date.replace(tzinfo=None) > week_ago:
|
|
156
|
+
recent_bugs.append(bug)
|
|
157
|
+
except (ValueError, TypeError):
|
|
158
|
+
pass
|
|
159
|
+
|
|
160
|
+
if recent_bugs:
|
|
161
|
+
print(f"\n New this week: {len(recent_bugs)} patterns")
|
|
162
|
+
for bug in recent_bugs[:3]:
|
|
163
|
+
print(f" - {bug.get('bug_type', '?')}: {bug.get('root_cause', '?')[:40]}")
|
|
164
|
+
|
|
165
|
+
# 2. Tech debt trajectory
|
|
166
|
+
print("\n" + "TECH DEBT TRAJECTORY")
|
|
167
|
+
print("-" * 40)
|
|
168
|
+
|
|
169
|
+
trend = _get_tech_debt_trend(patterns_dir)
|
|
170
|
+
trend_icons = {
|
|
171
|
+
"increasing": " Trending UP - Consider allocating time for cleanup",
|
|
172
|
+
"decreasing": " Trending DOWN - Great progress!",
|
|
173
|
+
"stable": " Stable - Holding steady",
|
|
174
|
+
"unknown": " Run 'empathy inspect' to start tracking",
|
|
175
|
+
"insufficient_data": " Not enough data yet - keep coding!",
|
|
176
|
+
}
|
|
177
|
+
print(trend_icons.get(trend, " Unknown"))
|
|
178
|
+
|
|
179
|
+
# Show hotspots if available
|
|
180
|
+
tech_debt_file = Path(patterns_dir) / "tech_debt.json"
|
|
181
|
+
if tech_debt_file.exists():
|
|
182
|
+
try:
|
|
183
|
+
with open(tech_debt_file) as f:
|
|
184
|
+
data = json.load(f)
|
|
185
|
+
snapshots = data.get("snapshots", [])
|
|
186
|
+
if snapshots:
|
|
187
|
+
latest = snapshots[-1]
|
|
188
|
+
hotspots = latest.get("hotspots", [])[:3]
|
|
189
|
+
if hotspots:
|
|
190
|
+
print("\n Top hotspots:")
|
|
191
|
+
for hotspot in hotspots:
|
|
192
|
+
print(f" - {hotspot}")
|
|
193
|
+
except (OSError, json.JSONDecodeError):
|
|
194
|
+
pass
|
|
195
|
+
|
|
196
|
+
# 3. Quick health check
|
|
197
|
+
print("\n" + "QUICK HEALTH CHECK")
|
|
198
|
+
print("-" * 40)
|
|
199
|
+
|
|
200
|
+
checks_passed = 0
|
|
201
|
+
checks_total = 0
|
|
202
|
+
|
|
203
|
+
# Check for ruff
|
|
204
|
+
checks_total += 1
|
|
205
|
+
success, output = _run_command(["ruff", "check", project_root, "--statistics", "-q"])
|
|
206
|
+
if success:
|
|
207
|
+
checks_passed += 1
|
|
208
|
+
print(" Lint: OK")
|
|
209
|
+
else:
|
|
210
|
+
issues = sum(1 for line in output.split("\n") if line.strip())
|
|
211
|
+
print(f" Lint: {issues} issues")
|
|
212
|
+
|
|
213
|
+
# Check for uncommitted changes
|
|
214
|
+
checks_total += 1
|
|
215
|
+
success, output = _run_command(["git", "status", "--porcelain"])
|
|
216
|
+
if success:
|
|
217
|
+
changes = sum(1 for line in output.split("\n") if line.strip())
|
|
218
|
+
if changes == 0:
|
|
219
|
+
checks_passed += 1
|
|
220
|
+
print(" Git: Clean")
|
|
221
|
+
else:
|
|
222
|
+
print(f" Git: {changes} uncommitted files")
|
|
223
|
+
|
|
224
|
+
print(f"\n Overall: {checks_passed}/{checks_total} checks passed")
|
|
225
|
+
|
|
226
|
+
# 4. Suggested focus
|
|
227
|
+
print("\n" + "SUGGESTED FOCUS TODAY")
|
|
228
|
+
print("-" * 40)
|
|
229
|
+
|
|
230
|
+
suggestions = []
|
|
231
|
+
|
|
232
|
+
# Based on patterns
|
|
233
|
+
investigating_bugs = [
|
|
234
|
+
p for p in patterns.get("debugging", []) if p.get("status") == "investigating"
|
|
235
|
+
]
|
|
236
|
+
if investigating_bugs:
|
|
237
|
+
suggestions.append(
|
|
238
|
+
f"Resolve {len(investigating_bugs)} investigating bug(s) via 'empathy patterns resolve'",
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
if trend == "increasing":
|
|
242
|
+
suggestions.append("Address tech debt - run 'empathy status' for priorities")
|
|
243
|
+
|
|
244
|
+
if total_bugs == 0:
|
|
245
|
+
suggestions.append("Start learning patterns - run 'empathy learn' or 'empathy inspect'")
|
|
246
|
+
|
|
247
|
+
if not suggestions:
|
|
248
|
+
suggestions.append("Ship something great! Run 'empathy ship' before committing")
|
|
249
|
+
|
|
250
|
+
for i, suggestion in enumerate(suggestions[:3], 1):
|
|
251
|
+
print(f" {i}. {suggestion}")
|
|
252
|
+
|
|
253
|
+
# Update stats
|
|
254
|
+
stats["last_session"] = datetime.now().isoformat()
|
|
255
|
+
stats["commands"]["morning"] = stats["commands"].get("morning", 0) + 1
|
|
256
|
+
_save_stats(stats)
|
|
257
|
+
|
|
258
|
+
print("\n" + "=" * 60)
|
|
259
|
+
print(" Have a productive day!")
|
|
260
|
+
print("=" * 60 + "\n")
|
|
261
|
+
|
|
262
|
+
return 0
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def _run_tests_only(project_root: str = ".", verbose: bool = False) -> int:
|
|
266
|
+
"""Run tests only (used by ship --tests-only)."""
|
|
267
|
+
print("\n" + "=" * 60)
|
|
268
|
+
print(" TEST RESULTS")
|
|
269
|
+
print("=" * 60 + "\n")
|
|
270
|
+
|
|
271
|
+
# Try pytest first
|
|
272
|
+
success, output = _run_command(["python", "-m", "pytest", project_root, "-v", "--tb=short"])
|
|
273
|
+
|
|
274
|
+
if success:
|
|
275
|
+
print("All tests passed!")
|
|
276
|
+
print("\n" + "=" * 60 + "\n")
|
|
277
|
+
return 0
|
|
278
|
+
print("Test Results:")
|
|
279
|
+
print("-" * 40)
|
|
280
|
+
print(output)
|
|
281
|
+
print("\n" + "=" * 60 + "\n")
|
|
282
|
+
return 1
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def _run_security_only(project_root: str = ".", verbose: bool = False) -> int:
|
|
286
|
+
"""Run security checks only (used by ship --security-only)."""
|
|
287
|
+
print("\n" + "=" * 60)
|
|
288
|
+
print(" SECURITY SCAN")
|
|
289
|
+
print("=" * 60 + "\n")
|
|
290
|
+
|
|
291
|
+
issues = []
|
|
292
|
+
|
|
293
|
+
# Try bandit (Python security scanner)
|
|
294
|
+
print("1. Running Bandit security scan...")
|
|
295
|
+
success, output = _run_command(["bandit", "-r", project_root, "-ll", "-q"])
|
|
296
|
+
if success:
|
|
297
|
+
print(" PASS - No high/medium security issues")
|
|
298
|
+
elif "bandit" in output.lower() and "not found" in output.lower():
|
|
299
|
+
print(" SKIP - Bandit not installed (pip install bandit)")
|
|
300
|
+
else:
|
|
301
|
+
issue_count = output.count(">> Issue:")
|
|
302
|
+
issues.append(f"Bandit: {issue_count} security issues")
|
|
303
|
+
print(f" WARN - {issue_count} issues found")
|
|
304
|
+
if verbose:
|
|
305
|
+
print(output)
|
|
306
|
+
|
|
307
|
+
# Check for secrets in code
|
|
308
|
+
print("2. Checking for hardcoded secrets...")
|
|
309
|
+
success, output = _run_command(
|
|
310
|
+
["grep", "-rn", "--include=*.py", "password.*=.*['\"]", project_root],
|
|
311
|
+
)
|
|
312
|
+
if not success or not output.strip():
|
|
313
|
+
print(" PASS - No obvious hardcoded secrets")
|
|
314
|
+
else:
|
|
315
|
+
lines = sum(1 for line in output.split("\n") if line.strip())
|
|
316
|
+
issues.append(f"Secrets: {lines} potential hardcoded secrets")
|
|
317
|
+
print(f" WARN - {lines} potential hardcoded values found")
|
|
318
|
+
|
|
319
|
+
# Check for .env files that might be committed
|
|
320
|
+
print("3. Checking for sensitive files...")
|
|
321
|
+
success, output = _run_command(["git", "ls-files", ".env", "*.pem", "*.key"])
|
|
322
|
+
if not output.strip():
|
|
323
|
+
print(" PASS - No sensitive files tracked")
|
|
324
|
+
else:
|
|
325
|
+
files = sum(1 for line in output.split("\n") if line.strip())
|
|
326
|
+
issues.append(f"Files: {files} sensitive files in git")
|
|
327
|
+
print(f" WARN - {files} sensitive files tracked in git")
|
|
328
|
+
|
|
329
|
+
# Summary
|
|
330
|
+
print("\n" + "-" * 60)
|
|
331
|
+
if issues:
|
|
332
|
+
print("\nSECURITY ISSUES FOUND:")
|
|
333
|
+
for issue in issues:
|
|
334
|
+
print(f" - {issue}")
|
|
335
|
+
print("\n" + "=" * 60 + "\n")
|
|
336
|
+
return 1
|
|
337
|
+
|
|
338
|
+
print("\nNo security issues found!")
|
|
339
|
+
print("\n" + "=" * 60 + "\n")
|
|
340
|
+
return 0
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
def ship_workflow(
|
|
344
|
+
patterns_dir: str = "./patterns",
|
|
345
|
+
project_root: str = ".",
|
|
346
|
+
skip_sync: bool = False,
|
|
347
|
+
tests_only: bool = False,
|
|
348
|
+
security_only: bool = False,
|
|
349
|
+
verbose: bool = False,
|
|
350
|
+
) -> int:
|
|
351
|
+
"""Pre-commit validation pipeline.
|
|
352
|
+
|
|
353
|
+
Runs:
|
|
354
|
+
1. empathy inspect (code analysis)
|
|
355
|
+
2. empathy health (quick checks)
|
|
356
|
+
3. empathy sync-claude (pattern sync)
|
|
357
|
+
4. Summary
|
|
358
|
+
|
|
359
|
+
Args:
|
|
360
|
+
patterns_dir: Path to patterns directory
|
|
361
|
+
project_root: Project root directory
|
|
362
|
+
skip_sync: Skip syncing patterns to Claude
|
|
363
|
+
tests_only: Run tests only (skip lint/format checks)
|
|
364
|
+
security_only: Run security checks only
|
|
365
|
+
verbose: Show detailed output
|
|
366
|
+
|
|
367
|
+
Returns exit code (0 = ready to ship, non-zero = issues found).
|
|
368
|
+
|
|
369
|
+
"""
|
|
370
|
+
if tests_only:
|
|
371
|
+
return _run_tests_only(project_root, verbose)
|
|
372
|
+
|
|
373
|
+
if security_only:
|
|
374
|
+
return _run_security_only(project_root, verbose)
|
|
375
|
+
|
|
376
|
+
print("\n" + "=" * 60)
|
|
377
|
+
print(" PRE-SHIP CHECKLIST")
|
|
378
|
+
print("=" * 60 + "\n")
|
|
379
|
+
|
|
380
|
+
issues = []
|
|
381
|
+
warnings = []
|
|
382
|
+
|
|
383
|
+
# 1. Lint check
|
|
384
|
+
print("1. Running lint check...")
|
|
385
|
+
success, output = _run_command(["ruff", "check", project_root])
|
|
386
|
+
if success:
|
|
387
|
+
print(" PASS - No lint issues")
|
|
388
|
+
else:
|
|
389
|
+
issue_count = len(
|
|
390
|
+
[line for line in output.split("\n") if line.strip() and not line.startswith("Found")],
|
|
391
|
+
)
|
|
392
|
+
issues.append(f"Lint: {issue_count} issues")
|
|
393
|
+
print(f" FAIL - {issue_count} issues found")
|
|
394
|
+
if verbose:
|
|
395
|
+
print(output)
|
|
396
|
+
|
|
397
|
+
# 2. Format check
|
|
398
|
+
print("2. Checking formatting...")
|
|
399
|
+
success, output = _run_command(["ruff", "format", "--check", project_root])
|
|
400
|
+
if success:
|
|
401
|
+
print(" PASS - Code is formatted")
|
|
402
|
+
else:
|
|
403
|
+
files = len(
|
|
404
|
+
[
|
|
405
|
+
line
|
|
406
|
+
for line in output.split("\n")
|
|
407
|
+
if "would be reformatted" in line.lower() or line.strip().endswith(".py")
|
|
408
|
+
],
|
|
409
|
+
)
|
|
410
|
+
warnings.append(f"Format: {files} files need formatting")
|
|
411
|
+
print(f" WARN - {files} files need formatting (run 'empathy fix-all')")
|
|
412
|
+
|
|
413
|
+
# 3. Type check (if mypy available)
|
|
414
|
+
print("3. Checking types...")
|
|
415
|
+
success, output = _run_command(
|
|
416
|
+
["python", "-m", "mypy", project_root, "--ignore-missing-imports", "--no-error-summary"],
|
|
417
|
+
capture=True,
|
|
418
|
+
)
|
|
419
|
+
if success or "error:" not in output.lower():
|
|
420
|
+
print(" PASS - No type errors")
|
|
421
|
+
else:
|
|
422
|
+
error_count = output.lower().count("error:")
|
|
423
|
+
warnings.append(f"Types: {error_count} type issues")
|
|
424
|
+
print(f" WARN - {error_count} type issues")
|
|
425
|
+
|
|
426
|
+
# 4. Git status
|
|
427
|
+
print("4. Checking git status...")
|
|
428
|
+
success, output = _run_command(["git", "status", "--porcelain"])
|
|
429
|
+
if success:
|
|
430
|
+
staged = sum(
|
|
431
|
+
1 for line in output.split("\n") if line.startswith(("A ", "M ", "D ", "R "))
|
|
432
|
+
)
|
|
433
|
+
unstaged = sum(1 for line in output.split("\n") if line.startswith((" M", " D", "??")))
|
|
434
|
+
if staged > 0:
|
|
435
|
+
print(f" INFO - {staged} staged, {unstaged} unstaged")
|
|
436
|
+
elif unstaged > 0:
|
|
437
|
+
warnings.append(f"Git: {unstaged} unstaged files")
|
|
438
|
+
print(f" WARN - No staged changes ({unstaged} unstaged files)")
|
|
439
|
+
else:
|
|
440
|
+
print(" INFO - Working tree clean")
|
|
441
|
+
|
|
442
|
+
# 5. Sync to Claude (optional)
|
|
443
|
+
if not skip_sync:
|
|
444
|
+
print("5. Syncing patterns to Claude Code...")
|
|
445
|
+
# Import here to avoid circular imports
|
|
446
|
+
try:
|
|
447
|
+
from pathlib import Path
|
|
448
|
+
|
|
449
|
+
from attune_llm.cli.sync_claude import sync_patterns
|
|
450
|
+
|
|
451
|
+
result = sync_patterns(project_root=Path(), verbose=False)
|
|
452
|
+
synced_count = len(result.get("synced", []))
|
|
453
|
+
if synced_count > 0:
|
|
454
|
+
print(f" PASS - {synced_count} patterns synced")
|
|
455
|
+
else:
|
|
456
|
+
print(" SKIP - No patterns to sync")
|
|
457
|
+
except ImportError:
|
|
458
|
+
print(" SKIP - sync-claude not available")
|
|
459
|
+
except Exception as e:
|
|
460
|
+
print(f" SKIP - {e}")
|
|
461
|
+
else:
|
|
462
|
+
print("5. Skipping Claude sync (--skip-sync)")
|
|
463
|
+
|
|
464
|
+
# Summary
|
|
465
|
+
print("\n" + "-" * 60)
|
|
466
|
+
|
|
467
|
+
if issues:
|
|
468
|
+
print("\nBLOCKERS (must fix before shipping):")
|
|
469
|
+
for issue in issues:
|
|
470
|
+
print(f" - {issue}")
|
|
471
|
+
print("\n Run 'empathy fix-all' to auto-fix what's possible")
|
|
472
|
+
print("\n" + "=" * 60)
|
|
473
|
+
print(" NOT READY TO SHIP")
|
|
474
|
+
print("=" * 60 + "\n")
|
|
475
|
+
return 1
|
|
476
|
+
|
|
477
|
+
if warnings:
|
|
478
|
+
print("\nWARNINGS (recommended to fix):")
|
|
479
|
+
for warning in warnings:
|
|
480
|
+
print(f" - {warning}")
|
|
481
|
+
|
|
482
|
+
print("\n" + "=" * 60)
|
|
483
|
+
print(" READY TO SHIP!")
|
|
484
|
+
print("=" * 60 + "\n")
|
|
485
|
+
|
|
486
|
+
# Update stats
|
|
487
|
+
stats = _load_stats()
|
|
488
|
+
stats["commands"]["ship"] = stats["commands"].get("ship", 0) + 1
|
|
489
|
+
_save_stats(stats)
|
|
490
|
+
|
|
491
|
+
return 0
|
|
492
|
+
|
|
493
|
+
|
|
494
|
+
def fix_all_workflow(project_root: str = ".", dry_run: bool = False, verbose: bool = False) -> int:
|
|
495
|
+
"""Auto-fix all fixable issues.
|
|
496
|
+
|
|
497
|
+
Runs:
|
|
498
|
+
1. ruff --fix (lint fixes)
|
|
499
|
+
2. ruff format (formatting)
|
|
500
|
+
3. isort (import sorting)
|
|
501
|
+
4. Report what changed
|
|
502
|
+
|
|
503
|
+
Returns exit code (0 = success).
|
|
504
|
+
"""
|
|
505
|
+
print("\n" + "=" * 60)
|
|
506
|
+
print(" AUTO-FIX ALL")
|
|
507
|
+
if dry_run:
|
|
508
|
+
print(" (DRY RUN - no changes will be made)")
|
|
509
|
+
print("=" * 60 + "\n")
|
|
510
|
+
|
|
511
|
+
fixed_count = 0
|
|
512
|
+
|
|
513
|
+
# 1. Ruff lint fixes
|
|
514
|
+
print("1. Fixing lint issues...")
|
|
515
|
+
if dry_run:
|
|
516
|
+
success, output = _run_command(["ruff", "check", project_root, "--fix", "--diff"])
|
|
517
|
+
else:
|
|
518
|
+
success, output = _run_command(["ruff", "check", project_root, "--fix"])
|
|
519
|
+
|
|
520
|
+
if success:
|
|
521
|
+
fixed = output.count("Fixed")
|
|
522
|
+
fixed_count += fixed
|
|
523
|
+
print(f" Fixed {fixed} issues")
|
|
524
|
+
else:
|
|
525
|
+
# Some issues couldn't be auto-fixed
|
|
526
|
+
unfixable = sum(1 for line in output.split("\n") if "error" in line.lower())
|
|
527
|
+
print(f" {unfixable} issues require manual fix")
|
|
528
|
+
if verbose:
|
|
529
|
+
print(output)
|
|
530
|
+
|
|
531
|
+
# 2. Ruff formatting
|
|
532
|
+
print("2. Formatting code...")
|
|
533
|
+
if dry_run:
|
|
534
|
+
success, output = _run_command(["ruff", "format", project_root, "--diff"])
|
|
535
|
+
formatted = output.count("@@ ")
|
|
536
|
+
else:
|
|
537
|
+
success, output = _run_command(["ruff", "format", project_root])
|
|
538
|
+
formatted = len(
|
|
539
|
+
[
|
|
540
|
+
line
|
|
541
|
+
for line in output.split("\n")
|
|
542
|
+
if line.strip().endswith(".py") and "reformatted" in output.lower()
|
|
543
|
+
],
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
print(f" Formatted {formatted} files")
|
|
547
|
+
|
|
548
|
+
# 3. isort (if available)
|
|
549
|
+
print("3. Sorting imports...")
|
|
550
|
+
if dry_run:
|
|
551
|
+
success, output = _run_command(["isort", project_root, "--check-only", "--diff"])
|
|
552
|
+
else:
|
|
553
|
+
success, output = _run_command(["isort", project_root])
|
|
554
|
+
|
|
555
|
+
if "Skipped" in output or "isort" in output:
|
|
556
|
+
sorted_count = output.count("Fixing") if not dry_run else output.count("---")
|
|
557
|
+
print(f" Sorted imports in {sorted_count} files")
|
|
558
|
+
else:
|
|
559
|
+
print(" No import changes needed")
|
|
560
|
+
|
|
561
|
+
# Summary
|
|
562
|
+
print("\n" + "-" * 60)
|
|
563
|
+
|
|
564
|
+
if dry_run:
|
|
565
|
+
print("\nDRY RUN complete - no files were modified")
|
|
566
|
+
print("Run without --dry-run to apply changes")
|
|
567
|
+
else:
|
|
568
|
+
print(f"\nTotal fixes applied: {fixed_count}+")
|
|
569
|
+
print("Run 'empathy ship' to verify everything is ready")
|
|
570
|
+
|
|
571
|
+
print("\n" + "=" * 60 + "\n")
|
|
572
|
+
|
|
573
|
+
# Update stats
|
|
574
|
+
stats = _load_stats()
|
|
575
|
+
stats["commands"]["fix-all"] = stats["commands"].get("fix-all", 0) + 1
|
|
576
|
+
_save_stats(stats)
|
|
577
|
+
|
|
578
|
+
return 0
|
|
579
|
+
|
|
580
|
+
|
|
581
|
+
def learn_workflow(
|
|
582
|
+
patterns_dir: str = "./patterns",
|
|
583
|
+
analyze_commits: int | None = None,
|
|
584
|
+
watch: bool = False,
|
|
585
|
+
verbose: bool = False,
|
|
586
|
+
) -> int:
|
|
587
|
+
"""Watch for bug fixes and extract patterns.
|
|
588
|
+
|
|
589
|
+
Modes:
|
|
590
|
+
- analyze: Analyze recent commits for bug fix patterns
|
|
591
|
+
- watch: Watch for new commits and learn in real-time
|
|
592
|
+
|
|
593
|
+
Returns exit code (0 = success).
|
|
594
|
+
"""
|
|
595
|
+
print("\n" + "=" * 60)
|
|
596
|
+
print(" PATTERN LEARNING")
|
|
597
|
+
print("=" * 60 + "\n")
|
|
598
|
+
|
|
599
|
+
patterns_path = Path(patterns_dir)
|
|
600
|
+
patterns_path.mkdir(parents=True, exist_ok=True)
|
|
601
|
+
|
|
602
|
+
if watch:
|
|
603
|
+
print("Watch mode not yet implemented.")
|
|
604
|
+
print("Use 'empathy learn --analyze N' to analyze recent commits.\n")
|
|
605
|
+
return 1
|
|
606
|
+
|
|
607
|
+
# Default to analyzing last 10 commits
|
|
608
|
+
commit_count = analyze_commits or 10
|
|
609
|
+
|
|
610
|
+
print(f"Analyzing last {commit_count} commits for bug fix patterns...\n")
|
|
611
|
+
|
|
612
|
+
# Get recent commits
|
|
613
|
+
success, output = _run_command(
|
|
614
|
+
["git", "log", f"-{commit_count}", "--oneline", "--format=%H|%s|%an|%ai"],
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
if not success:
|
|
618
|
+
print("Failed to read git log. Are you in a git repository?")
|
|
619
|
+
return 1
|
|
620
|
+
|
|
621
|
+
commits = output.strip().split("\n")
|
|
622
|
+
bug_fix_keywords = [
|
|
623
|
+
"fix",
|
|
624
|
+
"bug",
|
|
625
|
+
"issue",
|
|
626
|
+
"error",
|
|
627
|
+
"crash",
|
|
628
|
+
"broken",
|
|
629
|
+
"repair",
|
|
630
|
+
"patch",
|
|
631
|
+
"resolve",
|
|
632
|
+
]
|
|
633
|
+
|
|
634
|
+
learned = []
|
|
635
|
+
|
|
636
|
+
for commit_line in commits:
|
|
637
|
+
if not commit_line.strip():
|
|
638
|
+
continue
|
|
639
|
+
|
|
640
|
+
parts = commit_line.split("|")
|
|
641
|
+
if len(parts) < 2:
|
|
642
|
+
continue
|
|
643
|
+
|
|
644
|
+
commit_hash = parts[0][:8]
|
|
645
|
+
message = parts[1].lower()
|
|
646
|
+
author = parts[2] if len(parts) > 2 else "unknown"
|
|
647
|
+
date = parts[3][:10] if len(parts) > 3 else ""
|
|
648
|
+
|
|
649
|
+
# Check if this looks like a bug fix
|
|
650
|
+
is_bug_fix = any(kw in message for kw in bug_fix_keywords)
|
|
651
|
+
|
|
652
|
+
if is_bug_fix:
|
|
653
|
+
# Get the diff for this commit
|
|
654
|
+
success, diff_output = _run_command(["git", "show", commit_hash, "--stat", "--oneline"])
|
|
655
|
+
|
|
656
|
+
files_changed = []
|
|
657
|
+
if success:
|
|
658
|
+
for line in diff_output.split("\n"):
|
|
659
|
+
if "|" in line and ("+" in line or "-" in line):
|
|
660
|
+
file_name = line.split("|")[0].strip()
|
|
661
|
+
files_changed.append(file_name)
|
|
662
|
+
|
|
663
|
+
# Classify bug type from message
|
|
664
|
+
bug_type = "unknown"
|
|
665
|
+
if any(kw in message for kw in ["null", "none", "undefined", "empty"]):
|
|
666
|
+
bug_type = "null_reference"
|
|
667
|
+
elif any(kw in message for kw in ["async", "await", "promise", "timeout"]):
|
|
668
|
+
bug_type = "async_timing"
|
|
669
|
+
elif any(kw in message for kw in ["type", "cast", "convert"]):
|
|
670
|
+
bug_type = "type_mismatch"
|
|
671
|
+
elif any(kw in message for kw in ["import", "module", "package"]):
|
|
672
|
+
bug_type = "import_error"
|
|
673
|
+
|
|
674
|
+
pattern = {
|
|
675
|
+
"pattern_id": f"bug_{date.replace('-', '')}_{commit_hash}",
|
|
676
|
+
"bug_type": bug_type,
|
|
677
|
+
"status": "resolved",
|
|
678
|
+
"root_cause": parts[1], # Original message
|
|
679
|
+
"fix": f"See commit {commit_hash}",
|
|
680
|
+
"resolved_by": f"@{author.split()[0].lower()}",
|
|
681
|
+
"resolved_at": date,
|
|
682
|
+
"files_affected": files_changed[:3],
|
|
683
|
+
"source": "git_history",
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
learned.append(pattern)
|
|
687
|
+
|
|
688
|
+
if verbose:
|
|
689
|
+
print(f" Found: {bug_type} in {commit_hash}")
|
|
690
|
+
print(f" {parts[1][:60]}")
|
|
691
|
+
|
|
692
|
+
# Load existing patterns and merge
|
|
693
|
+
debugging_file = patterns_path / "debugging.json"
|
|
694
|
+
existing: dict[str, Any] = {"patterns": []}
|
|
695
|
+
|
|
696
|
+
if debugging_file.exists():
|
|
697
|
+
try:
|
|
698
|
+
with open(debugging_file) as f:
|
|
699
|
+
existing = json.load(f)
|
|
700
|
+
except (OSError, json.JSONDecodeError):
|
|
701
|
+
pass
|
|
702
|
+
|
|
703
|
+
# Add new patterns (avoid duplicates)
|
|
704
|
+
existing_ids = {p.get("pattern_id") for p in existing.get("patterns", [])}
|
|
705
|
+
new_patterns = [p for p in learned if p["pattern_id"] not in existing_ids]
|
|
706
|
+
|
|
707
|
+
if new_patterns:
|
|
708
|
+
existing["patterns"].extend(new_patterns)
|
|
709
|
+
existing["last_updated"] = datetime.now().isoformat()
|
|
710
|
+
|
|
711
|
+
with open(debugging_file, "w") as f:
|
|
712
|
+
json.dump(existing, f, indent=2)
|
|
713
|
+
|
|
714
|
+
# Summary
|
|
715
|
+
print("-" * 40)
|
|
716
|
+
print(f"\nAnalyzed: {len(commits)} commits")
|
|
717
|
+
print(f"Bug fixes found: {len(learned)}")
|
|
718
|
+
print(f"New patterns learned: {len(new_patterns)}")
|
|
719
|
+
|
|
720
|
+
if learned:
|
|
721
|
+
print("\nBug types discovered:")
|
|
722
|
+
types: dict[str, int] = {}
|
|
723
|
+
for p in learned:
|
|
724
|
+
t = p["bug_type"]
|
|
725
|
+
types[t] = types.get(t, 0) + 1
|
|
726
|
+
for bug_type, count in sorted(types.items(), key=lambda x: -x[1]):
|
|
727
|
+
print(f" {bug_type}: {count}")
|
|
728
|
+
|
|
729
|
+
print("\n" + "=" * 60)
|
|
730
|
+
print(" Run 'empathy sync-claude' to use these patterns with Claude Code")
|
|
731
|
+
print("=" * 60 + "\n")
|
|
732
|
+
|
|
733
|
+
# Update stats
|
|
734
|
+
stats = _load_stats()
|
|
735
|
+
stats["commands"]["learn"] = stats["commands"].get("learn", 0) + 1
|
|
736
|
+
stats["patterns_learned"] = stats.get("patterns_learned", 0) + len(new_patterns)
|
|
737
|
+
_save_stats(stats)
|
|
738
|
+
|
|
739
|
+
return 0
|
|
740
|
+
|
|
741
|
+
|
|
742
|
+
# CLI command handlers
|
|
743
|
+
def cmd_morning(args):
|
|
744
|
+
"""Morning briefing command handler."""
|
|
745
|
+
return morning_workflow(
|
|
746
|
+
patterns_dir=getattr(args, "patterns_dir", "./patterns"),
|
|
747
|
+
project_root=getattr(args, "project_root", "."),
|
|
748
|
+
verbose=getattr(args, "verbose", False),
|
|
749
|
+
)
|
|
750
|
+
|
|
751
|
+
|
|
752
|
+
def cmd_ship(args):
|
|
753
|
+
"""Ship command handler."""
|
|
754
|
+
return ship_workflow(
|
|
755
|
+
patterns_dir=getattr(args, "patterns_dir", "./patterns"),
|
|
756
|
+
project_root=getattr(args, "project_root", "."),
|
|
757
|
+
skip_sync=getattr(args, "skip_sync", False),
|
|
758
|
+
tests_only=getattr(args, "tests_only", False),
|
|
759
|
+
security_only=getattr(args, "security_only", False),
|
|
760
|
+
verbose=getattr(args, "verbose", False),
|
|
761
|
+
)
|
|
762
|
+
|
|
763
|
+
|
|
764
|
+
def cmd_fix_all(args):
|
|
765
|
+
"""Fix-all command handler."""
|
|
766
|
+
return fix_all_workflow(
|
|
767
|
+
project_root=getattr(args, "project_root", "."),
|
|
768
|
+
dry_run=getattr(args, "dry_run", False),
|
|
769
|
+
verbose=getattr(args, "verbose", False),
|
|
770
|
+
)
|
|
771
|
+
|
|
772
|
+
|
|
773
|
+
def cmd_learn(args):
|
|
774
|
+
"""Learn command handler."""
|
|
775
|
+
return learn_workflow(
|
|
776
|
+
patterns_dir=getattr(args, "patterns_dir", "./patterns"),
|
|
777
|
+
analyze_commits=getattr(args, "analyze", None),
|
|
778
|
+
watch=getattr(args, "watch", False),
|
|
779
|
+
verbose=getattr(args, "verbose", False),
|
|
780
|
+
)
|