solokit 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- solokit/__init__.py +10 -0
- solokit/__version__.py +3 -0
- solokit/cli.py +374 -0
- solokit/core/__init__.py +1 -0
- solokit/core/cache.py +102 -0
- solokit/core/command_runner.py +278 -0
- solokit/core/config.py +453 -0
- solokit/core/config_validator.py +204 -0
- solokit/core/constants.py +291 -0
- solokit/core/error_formatter.py +279 -0
- solokit/core/error_handlers.py +346 -0
- solokit/core/exceptions.py +1567 -0
- solokit/core/file_ops.py +309 -0
- solokit/core/logging_config.py +166 -0
- solokit/core/output.py +99 -0
- solokit/core/performance.py +57 -0
- solokit/core/protocols.py +141 -0
- solokit/core/types.py +312 -0
- solokit/deployment/__init__.py +1 -0
- solokit/deployment/executor.py +411 -0
- solokit/git/__init__.py +1 -0
- solokit/git/integration.py +619 -0
- solokit/init/__init__.py +41 -0
- solokit/init/claude_commands_installer.py +87 -0
- solokit/init/dependency_installer.py +313 -0
- solokit/init/docs_structure.py +90 -0
- solokit/init/env_generator.py +160 -0
- solokit/init/environment_validator.py +334 -0
- solokit/init/git_hooks_installer.py +71 -0
- solokit/init/git_setup.py +188 -0
- solokit/init/gitignore_updater.py +195 -0
- solokit/init/initial_commit.py +145 -0
- solokit/init/initial_scans.py +109 -0
- solokit/init/orchestrator.py +246 -0
- solokit/init/readme_generator.py +207 -0
- solokit/init/session_structure.py +239 -0
- solokit/init/template_installer.py +424 -0
- solokit/learning/__init__.py +1 -0
- solokit/learning/archiver.py +115 -0
- solokit/learning/categorizer.py +126 -0
- solokit/learning/curator.py +428 -0
- solokit/learning/extractor.py +352 -0
- solokit/learning/reporter.py +351 -0
- solokit/learning/repository.py +254 -0
- solokit/learning/similarity.py +342 -0
- solokit/learning/validator.py +144 -0
- solokit/project/__init__.py +1 -0
- solokit/project/init.py +1162 -0
- solokit/project/stack.py +436 -0
- solokit/project/sync_plugin.py +438 -0
- solokit/project/tree.py +375 -0
- solokit/quality/__init__.py +1 -0
- solokit/quality/api_validator.py +424 -0
- solokit/quality/checkers/__init__.py +25 -0
- solokit/quality/checkers/base.py +114 -0
- solokit/quality/checkers/context7.py +221 -0
- solokit/quality/checkers/custom.py +162 -0
- solokit/quality/checkers/deployment.py +323 -0
- solokit/quality/checkers/documentation.py +179 -0
- solokit/quality/checkers/formatting.py +161 -0
- solokit/quality/checkers/integration.py +394 -0
- solokit/quality/checkers/linting.py +159 -0
- solokit/quality/checkers/security.py +261 -0
- solokit/quality/checkers/spec_completeness.py +127 -0
- solokit/quality/checkers/tests.py +184 -0
- solokit/quality/env_validator.py +306 -0
- solokit/quality/gates.py +655 -0
- solokit/quality/reporters/__init__.py +10 -0
- solokit/quality/reporters/base.py +25 -0
- solokit/quality/reporters/console.py +98 -0
- solokit/quality/reporters/json_reporter.py +34 -0
- solokit/quality/results.py +98 -0
- solokit/session/__init__.py +1 -0
- solokit/session/briefing/__init__.py +245 -0
- solokit/session/briefing/documentation_loader.py +53 -0
- solokit/session/briefing/formatter.py +476 -0
- solokit/session/briefing/git_context.py +282 -0
- solokit/session/briefing/learning_loader.py +212 -0
- solokit/session/briefing/milestone_builder.py +78 -0
- solokit/session/briefing/orchestrator.py +137 -0
- solokit/session/briefing/stack_detector.py +51 -0
- solokit/session/briefing/tree_generator.py +52 -0
- solokit/session/briefing/work_item_loader.py +209 -0
- solokit/session/briefing.py +353 -0
- solokit/session/complete.py +1188 -0
- solokit/session/status.py +246 -0
- solokit/session/validate.py +452 -0
- solokit/templates/.claude/commands/end.md +109 -0
- solokit/templates/.claude/commands/init.md +159 -0
- solokit/templates/.claude/commands/learn-curate.md +88 -0
- solokit/templates/.claude/commands/learn-search.md +62 -0
- solokit/templates/.claude/commands/learn-show.md +69 -0
- solokit/templates/.claude/commands/learn.md +136 -0
- solokit/templates/.claude/commands/start.md +114 -0
- solokit/templates/.claude/commands/status.md +22 -0
- solokit/templates/.claude/commands/validate.md +27 -0
- solokit/templates/.claude/commands/work-delete.md +119 -0
- solokit/templates/.claude/commands/work-graph.md +139 -0
- solokit/templates/.claude/commands/work-list.md +26 -0
- solokit/templates/.claude/commands/work-new.md +114 -0
- solokit/templates/.claude/commands/work-next.md +25 -0
- solokit/templates/.claude/commands/work-show.md +24 -0
- solokit/templates/.claude/commands/work-update.md +141 -0
- solokit/templates/CHANGELOG.md +17 -0
- solokit/templates/WORK_ITEM_TYPES.md +141 -0
- solokit/templates/__init__.py +1 -0
- solokit/templates/bug_spec.md +217 -0
- solokit/templates/config.schema.json +150 -0
- solokit/templates/dashboard_refine/base/.gitignore +36 -0
- solokit/templates/dashboard_refine/base/app/(dashboard)/layout.tsx +22 -0
- solokit/templates/dashboard_refine/base/app/(dashboard)/page.tsx +68 -0
- solokit/templates/dashboard_refine/base/app/(dashboard)/users/page.tsx +77 -0
- solokit/templates/dashboard_refine/base/app/globals.css +60 -0
- solokit/templates/dashboard_refine/base/app/layout.tsx +23 -0
- solokit/templates/dashboard_refine/base/app/page.tsx +9 -0
- solokit/templates/dashboard_refine/base/components/client-refine-wrapper.tsx +21 -0
- solokit/templates/dashboard_refine/base/components/layout/header.tsx +44 -0
- solokit/templates/dashboard_refine/base/components/layout/sidebar.tsx +82 -0
- solokit/templates/dashboard_refine/base/components/ui/button.tsx +53 -0
- solokit/templates/dashboard_refine/base/components/ui/card.tsx +78 -0
- solokit/templates/dashboard_refine/base/components/ui/table.tsx +116 -0
- solokit/templates/dashboard_refine/base/components.json +16 -0
- solokit/templates/dashboard_refine/base/lib/refine.tsx +65 -0
- solokit/templates/dashboard_refine/base/lib/utils.ts +13 -0
- solokit/templates/dashboard_refine/base/next.config.ts +10 -0
- solokit/templates/dashboard_refine/base/package.json.template +40 -0
- solokit/templates/dashboard_refine/base/postcss.config.mjs +8 -0
- solokit/templates/dashboard_refine/base/providers/refine-provider.tsx +26 -0
- solokit/templates/dashboard_refine/base/tailwind.config.ts +57 -0
- solokit/templates/dashboard_refine/base/tsconfig.json +27 -0
- solokit/templates/dashboard_refine/docker/Dockerfile +57 -0
- solokit/templates/dashboard_refine/docker/docker-compose.prod.yml +31 -0
- solokit/templates/dashboard_refine/docker/docker-compose.yml +21 -0
- solokit/templates/dashboard_refine/tier-1-essential/.eslintrc.json +7 -0
- solokit/templates/dashboard_refine/tier-1-essential/jest.config.ts +17 -0
- solokit/templates/dashboard_refine/tier-1-essential/jest.setup.ts +1 -0
- solokit/templates/dashboard_refine/tier-1-essential/package.json.tier1.template +57 -0
- solokit/templates/dashboard_refine/tier-1-essential/tests/setup.ts +26 -0
- solokit/templates/dashboard_refine/tier-1-essential/tests/unit/example.test.tsx +73 -0
- solokit/templates/dashboard_refine/tier-2-standard/package.json.tier2.template +62 -0
- solokit/templates/dashboard_refine/tier-3-comprehensive/eslint.config.mjs +22 -0
- solokit/templates/dashboard_refine/tier-3-comprehensive/package.json.tier3.template +79 -0
- solokit/templates/dashboard_refine/tier-3-comprehensive/playwright.config.ts +66 -0
- solokit/templates/dashboard_refine/tier-3-comprehensive/stryker.conf.json +38 -0
- solokit/templates/dashboard_refine/tier-3-comprehensive/tests/e2e/dashboard.spec.ts +88 -0
- solokit/templates/dashboard_refine/tier-3-comprehensive/tests/e2e/user-management.spec.ts +102 -0
- solokit/templates/dashboard_refine/tier-3-comprehensive/tests/integration/dashboard.test.tsx +90 -0
- solokit/templates/dashboard_refine/tier-3-comprehensive/type-coverage.json +16 -0
- solokit/templates/dashboard_refine/tier-4-production/instrumentation.ts +9 -0
- solokit/templates/dashboard_refine/tier-4-production/k6/dashboard-load-test.js +70 -0
- solokit/templates/dashboard_refine/tier-4-production/next.config.ts +46 -0
- solokit/templates/dashboard_refine/tier-4-production/package.json.tier4.template +89 -0
- solokit/templates/dashboard_refine/tier-4-production/sentry.client.config.ts +26 -0
- solokit/templates/dashboard_refine/tier-4-production/sentry.edge.config.ts +11 -0
- solokit/templates/dashboard_refine/tier-4-production/sentry.server.config.ts +11 -0
- solokit/templates/deployment_spec.md +500 -0
- solokit/templates/feature_spec.md +248 -0
- solokit/templates/fullstack_nextjs/base/.gitignore +36 -0
- solokit/templates/fullstack_nextjs/base/app/api/example/route.ts +65 -0
- solokit/templates/fullstack_nextjs/base/app/globals.css +27 -0
- solokit/templates/fullstack_nextjs/base/app/layout.tsx +20 -0
- solokit/templates/fullstack_nextjs/base/app/page.tsx +32 -0
- solokit/templates/fullstack_nextjs/base/components/example-component.tsx +20 -0
- solokit/templates/fullstack_nextjs/base/lib/prisma.ts +17 -0
- solokit/templates/fullstack_nextjs/base/lib/utils.ts +13 -0
- solokit/templates/fullstack_nextjs/base/lib/validations.ts +20 -0
- solokit/templates/fullstack_nextjs/base/next.config.ts +7 -0
- solokit/templates/fullstack_nextjs/base/package.json.template +32 -0
- solokit/templates/fullstack_nextjs/base/postcss.config.mjs +8 -0
- solokit/templates/fullstack_nextjs/base/prisma/schema.prisma +21 -0
- solokit/templates/fullstack_nextjs/base/tailwind.config.ts +19 -0
- solokit/templates/fullstack_nextjs/base/tsconfig.json +27 -0
- solokit/templates/fullstack_nextjs/docker/Dockerfile +60 -0
- solokit/templates/fullstack_nextjs/docker/docker-compose.prod.yml +57 -0
- solokit/templates/fullstack_nextjs/docker/docker-compose.yml +47 -0
- solokit/templates/fullstack_nextjs/tier-1-essential/.eslintrc.json +7 -0
- solokit/templates/fullstack_nextjs/tier-1-essential/jest.config.ts +17 -0
- solokit/templates/fullstack_nextjs/tier-1-essential/jest.setup.ts +1 -0
- solokit/templates/fullstack_nextjs/tier-1-essential/package.json.tier1.template +48 -0
- solokit/templates/fullstack_nextjs/tier-1-essential/tests/api/example.test.ts +88 -0
- solokit/templates/fullstack_nextjs/tier-1-essential/tests/setup.ts +22 -0
- solokit/templates/fullstack_nextjs/tier-1-essential/tests/unit/example.test.tsx +22 -0
- solokit/templates/fullstack_nextjs/tier-2-standard/package.json.tier2.template +52 -0
- solokit/templates/fullstack_nextjs/tier-3-comprehensive/eslint.config.mjs +39 -0
- solokit/templates/fullstack_nextjs/tier-3-comprehensive/package.json.tier3.template +68 -0
- solokit/templates/fullstack_nextjs/tier-3-comprehensive/playwright.config.ts +66 -0
- solokit/templates/fullstack_nextjs/tier-3-comprehensive/stryker.conf.json +33 -0
- solokit/templates/fullstack_nextjs/tier-3-comprehensive/tests/e2e/flow.spec.ts +59 -0
- solokit/templates/fullstack_nextjs/tier-3-comprehensive/tests/integration/api.test.ts +165 -0
- solokit/templates/fullstack_nextjs/tier-3-comprehensive/type-coverage.json +12 -0
- solokit/templates/fullstack_nextjs/tier-4-production/instrumentation.ts +9 -0
- solokit/templates/fullstack_nextjs/tier-4-production/k6/load-test.js +45 -0
- solokit/templates/fullstack_nextjs/tier-4-production/next.config.ts +46 -0
- solokit/templates/fullstack_nextjs/tier-4-production/package.json.tier4.template +77 -0
- solokit/templates/fullstack_nextjs/tier-4-production/sentry.client.config.ts +26 -0
- solokit/templates/fullstack_nextjs/tier-4-production/sentry.edge.config.ts +11 -0
- solokit/templates/fullstack_nextjs/tier-4-production/sentry.server.config.ts +11 -0
- solokit/templates/git-hooks/prepare-commit-msg +24 -0
- solokit/templates/integration_test_spec.md +363 -0
- solokit/templates/learnings.json +15 -0
- solokit/templates/ml_ai_fastapi/base/.gitignore +104 -0
- solokit/templates/ml_ai_fastapi/base/alembic/env.py +96 -0
- solokit/templates/ml_ai_fastapi/base/alembic.ini +114 -0
- solokit/templates/ml_ai_fastapi/base/pyproject.toml.template +91 -0
- solokit/templates/ml_ai_fastapi/base/requirements.txt.template +28 -0
- solokit/templates/ml_ai_fastapi/base/src/__init__.py +5 -0
- solokit/templates/ml_ai_fastapi/base/src/api/__init__.py +3 -0
- solokit/templates/ml_ai_fastapi/base/src/api/dependencies.py +20 -0
- solokit/templates/ml_ai_fastapi/base/src/api/routes/__init__.py +3 -0
- solokit/templates/ml_ai_fastapi/base/src/api/routes/example.py +134 -0
- solokit/templates/ml_ai_fastapi/base/src/api/routes/health.py +66 -0
- solokit/templates/ml_ai_fastapi/base/src/core/__init__.py +3 -0
- solokit/templates/ml_ai_fastapi/base/src/core/config.py +64 -0
- solokit/templates/ml_ai_fastapi/base/src/core/database.py +50 -0
- solokit/templates/ml_ai_fastapi/base/src/main.py +64 -0
- solokit/templates/ml_ai_fastapi/base/src/models/__init__.py +7 -0
- solokit/templates/ml_ai_fastapi/base/src/models/example.py +61 -0
- solokit/templates/ml_ai_fastapi/base/src/services/__init__.py +3 -0
- solokit/templates/ml_ai_fastapi/base/src/services/example.py +115 -0
- solokit/templates/ml_ai_fastapi/docker/Dockerfile +59 -0
- solokit/templates/ml_ai_fastapi/docker/docker-compose.prod.yml +112 -0
- solokit/templates/ml_ai_fastapi/docker/docker-compose.yml +77 -0
- solokit/templates/ml_ai_fastapi/tier-1-essential/pyproject.toml.tier1.template +112 -0
- solokit/templates/ml_ai_fastapi/tier-1-essential/pyrightconfig.json +41 -0
- solokit/templates/ml_ai_fastapi/tier-1-essential/pytest.ini +69 -0
- solokit/templates/ml_ai_fastapi/tier-1-essential/requirements-dev.txt +17 -0
- solokit/templates/ml_ai_fastapi/tier-1-essential/ruff.toml +81 -0
- solokit/templates/ml_ai_fastapi/tier-1-essential/tests/__init__.py +3 -0
- solokit/templates/ml_ai_fastapi/tier-1-essential/tests/conftest.py +72 -0
- solokit/templates/ml_ai_fastapi/tier-1-essential/tests/test_main.py +49 -0
- solokit/templates/ml_ai_fastapi/tier-1-essential/tests/unit/__init__.py +3 -0
- solokit/templates/ml_ai_fastapi/tier-1-essential/tests/unit/test_example.py +113 -0
- solokit/templates/ml_ai_fastapi/tier-2-standard/pyproject.toml.tier2.template +130 -0
- solokit/templates/ml_ai_fastapi/tier-3-comprehensive/locustfile.py +99 -0
- solokit/templates/ml_ai_fastapi/tier-3-comprehensive/mutmut_config.py +53 -0
- solokit/templates/ml_ai_fastapi/tier-3-comprehensive/pyproject.toml.tier3.template +150 -0
- solokit/templates/ml_ai_fastapi/tier-3-comprehensive/tests/integration/__init__.py +3 -0
- solokit/templates/ml_ai_fastapi/tier-3-comprehensive/tests/integration/conftest.py +74 -0
- solokit/templates/ml_ai_fastapi/tier-3-comprehensive/tests/integration/test_api.py +131 -0
- solokit/templates/ml_ai_fastapi/tier-4-production/pyproject.toml.tier4.template +162 -0
- solokit/templates/ml_ai_fastapi/tier-4-production/requirements-prod.txt +25 -0
- solokit/templates/ml_ai_fastapi/tier-4-production/src/api/routes/metrics.py +19 -0
- solokit/templates/ml_ai_fastapi/tier-4-production/src/core/logging.py +74 -0
- solokit/templates/ml_ai_fastapi/tier-4-production/src/core/monitoring.py +68 -0
- solokit/templates/ml_ai_fastapi/tier-4-production/src/core/sentry.py +66 -0
- solokit/templates/ml_ai_fastapi/tier-4-production/src/middleware/__init__.py +3 -0
- solokit/templates/ml_ai_fastapi/tier-4-production/src/middleware/logging.py +79 -0
- solokit/templates/ml_ai_fastapi/tier-4-production/src/middleware/tracing.py +60 -0
- solokit/templates/refactor_spec.md +287 -0
- solokit/templates/saas_t3/base/.gitignore +36 -0
- solokit/templates/saas_t3/base/app/api/trpc/[trpc]/route.ts +33 -0
- solokit/templates/saas_t3/base/app/globals.css +27 -0
- solokit/templates/saas_t3/base/app/layout.tsx +23 -0
- solokit/templates/saas_t3/base/app/page.tsx +31 -0
- solokit/templates/saas_t3/base/lib/api.tsx +77 -0
- solokit/templates/saas_t3/base/lib/utils.ts +13 -0
- solokit/templates/saas_t3/base/next.config.ts +7 -0
- solokit/templates/saas_t3/base/package.json.template +38 -0
- solokit/templates/saas_t3/base/postcss.config.mjs +8 -0
- solokit/templates/saas_t3/base/prisma/schema.prisma +20 -0
- solokit/templates/saas_t3/base/server/api/root.ts +19 -0
- solokit/templates/saas_t3/base/server/api/routers/example.ts +28 -0
- solokit/templates/saas_t3/base/server/api/trpc.ts +52 -0
- solokit/templates/saas_t3/base/server/db.ts +17 -0
- solokit/templates/saas_t3/base/tailwind.config.ts +19 -0
- solokit/templates/saas_t3/base/tsconfig.json +27 -0
- solokit/templates/saas_t3/docker/Dockerfile +60 -0
- solokit/templates/saas_t3/docker/docker-compose.prod.yml +59 -0
- solokit/templates/saas_t3/docker/docker-compose.yml +49 -0
- solokit/templates/saas_t3/tier-1-essential/.eslintrc.json +7 -0
- solokit/templates/saas_t3/tier-1-essential/jest.config.ts +17 -0
- solokit/templates/saas_t3/tier-1-essential/jest.setup.ts +1 -0
- solokit/templates/saas_t3/tier-1-essential/package.json.tier1.template +54 -0
- solokit/templates/saas_t3/tier-1-essential/tests/setup.ts +22 -0
- solokit/templates/saas_t3/tier-1-essential/tests/unit/example.test.tsx +24 -0
- solokit/templates/saas_t3/tier-2-standard/package.json.tier2.template +58 -0
- solokit/templates/saas_t3/tier-3-comprehensive/eslint.config.mjs +39 -0
- solokit/templates/saas_t3/tier-3-comprehensive/package.json.tier3.template +74 -0
- solokit/templates/saas_t3/tier-3-comprehensive/playwright.config.ts +66 -0
- solokit/templates/saas_t3/tier-3-comprehensive/stryker.conf.json +34 -0
- solokit/templates/saas_t3/tier-3-comprehensive/tests/e2e/home.spec.ts +41 -0
- solokit/templates/saas_t3/tier-3-comprehensive/tests/integration/api.test.ts +44 -0
- solokit/templates/saas_t3/tier-3-comprehensive/type-coverage.json +12 -0
- solokit/templates/saas_t3/tier-4-production/instrumentation.ts +9 -0
- solokit/templates/saas_t3/tier-4-production/k6/load-test.js +51 -0
- solokit/templates/saas_t3/tier-4-production/next.config.ts +46 -0
- solokit/templates/saas_t3/tier-4-production/package.json.tier4.template +83 -0
- solokit/templates/saas_t3/tier-4-production/sentry.client.config.ts +26 -0
- solokit/templates/saas_t3/tier-4-production/sentry.edge.config.ts +11 -0
- solokit/templates/saas_t3/tier-4-production/sentry.server.config.ts +11 -0
- solokit/templates/saas_t3/tier-4-production/vercel.json +37 -0
- solokit/templates/security_spec.md +287 -0
- solokit/templates/stack-versions.yaml +617 -0
- solokit/templates/status_update.json +6 -0
- solokit/templates/template-registry.json +257 -0
- solokit/templates/work_items.json +11 -0
- solokit/testing/__init__.py +1 -0
- solokit/testing/integration_runner.py +550 -0
- solokit/testing/performance.py +637 -0
- solokit/visualization/__init__.py +1 -0
- solokit/visualization/dependency_graph.py +788 -0
- solokit/work_items/__init__.py +1 -0
- solokit/work_items/creator.py +217 -0
- solokit/work_items/delete.py +264 -0
- solokit/work_items/get_dependencies.py +185 -0
- solokit/work_items/get_dependents.py +113 -0
- solokit/work_items/get_metadata.py +121 -0
- solokit/work_items/get_next_recommendations.py +133 -0
- solokit/work_items/manager.py +235 -0
- solokit/work_items/milestones.py +137 -0
- solokit/work_items/query.py +376 -0
- solokit/work_items/repository.py +267 -0
- solokit/work_items/scheduler.py +184 -0
- solokit/work_items/spec_parser.py +838 -0
- solokit/work_items/spec_validator.py +493 -0
- solokit/work_items/updater.py +157 -0
- solokit/work_items/validator.py +205 -0
- solokit-0.1.1.dist-info/METADATA +640 -0
- solokit-0.1.1.dist-info/RECORD +323 -0
- solokit-0.1.1.dist-info/WHEEL +5 -0
- solokit-0.1.1.dist-info/entry_points.txt +2 -0
- solokit-0.1.1.dist-info/licenses/LICENSE +21 -0
- solokit-0.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1188 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Complete current session with quality gates and summary generation.
|
|
4
|
+
Enhanced with full tracking updates and git workflow.
|
|
5
|
+
|
|
6
|
+
Updated in Phase 5.7.3 to use spec_parser for reading work item rationale.
|
|
7
|
+
Migrated to standardized error handling pattern.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import argparse
|
|
13
|
+
import json
|
|
14
|
+
import sys
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
# Add scripts directory to path for imports
|
|
20
|
+
from solokit.core.command_runner import CommandRunner
|
|
21
|
+
from solokit.core.constants import (
|
|
22
|
+
GIT_QUICK_TIMEOUT,
|
|
23
|
+
GIT_STANDARD_TIMEOUT,
|
|
24
|
+
SESSION_COMPLETE_TIMEOUT,
|
|
25
|
+
)
|
|
26
|
+
from solokit.core.error_handlers import log_errors
|
|
27
|
+
from solokit.core.exceptions import (
|
|
28
|
+
FileOperationError,
|
|
29
|
+
)
|
|
30
|
+
from solokit.core.logging_config import get_logger
|
|
31
|
+
from solokit.core.output import get_output
|
|
32
|
+
from solokit.core.types import WorkItemStatus, WorkItemType
|
|
33
|
+
from solokit.quality.gates import QualityGates
|
|
34
|
+
from solokit.work_items.spec_parser import parse_spec_file
|
|
35
|
+
|
|
36
|
+
logger = get_logger(__name__)
|
|
37
|
+
output = get_output()
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@log_errors()
|
|
41
|
+
def load_status() -> dict[str, Any] | None:
|
|
42
|
+
"""Load current session status.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
dict: Session status data, or None if no session exists
|
|
46
|
+
|
|
47
|
+
Raises:
|
|
48
|
+
FileOperationError: If file cannot be read or parsed
|
|
49
|
+
"""
|
|
50
|
+
status_file = Path(".session/tracking/status_update.json")
|
|
51
|
+
if not status_file.exists():
|
|
52
|
+
return None
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
with open(status_file) as f:
|
|
56
|
+
return json.load(f) # type: ignore[no-any-return]
|
|
57
|
+
except json.JSONDecodeError as e:
|
|
58
|
+
raise FileOperationError(
|
|
59
|
+
operation="parse",
|
|
60
|
+
file_path=str(status_file),
|
|
61
|
+
details=f"Invalid JSON: {e}",
|
|
62
|
+
cause=e,
|
|
63
|
+
) from e
|
|
64
|
+
except OSError as e:
|
|
65
|
+
raise FileOperationError(
|
|
66
|
+
operation="read",
|
|
67
|
+
file_path=str(status_file),
|
|
68
|
+
details=str(e),
|
|
69
|
+
cause=e,
|
|
70
|
+
) from e
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@log_errors()
|
|
74
|
+
def load_work_items() -> dict[str, Any]:
|
|
75
|
+
"""Load work items.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
dict: Work items data
|
|
79
|
+
|
|
80
|
+
Raises:
|
|
81
|
+
FileOperationError: If file cannot be read or parsed
|
|
82
|
+
"""
|
|
83
|
+
work_items_file = Path(".session/tracking/work_items.json")
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
with open(work_items_file) as f:
|
|
87
|
+
return json.load(f) # type: ignore[no-any-return]
|
|
88
|
+
except FileNotFoundError as e:
|
|
89
|
+
raise FileOperationError(
|
|
90
|
+
operation="read",
|
|
91
|
+
file_path=str(work_items_file),
|
|
92
|
+
details="File not found",
|
|
93
|
+
cause=e,
|
|
94
|
+
) from e
|
|
95
|
+
except json.JSONDecodeError as e:
|
|
96
|
+
raise FileOperationError(
|
|
97
|
+
operation="parse",
|
|
98
|
+
file_path=str(work_items_file),
|
|
99
|
+
details=f"Invalid JSON: {e}",
|
|
100
|
+
cause=e,
|
|
101
|
+
) from e
|
|
102
|
+
except OSError as e:
|
|
103
|
+
raise FileOperationError(
|
|
104
|
+
operation="read",
|
|
105
|
+
file_path=str(work_items_file),
|
|
106
|
+
details=str(e),
|
|
107
|
+
cause=e,
|
|
108
|
+
) from e
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@log_errors()
|
|
112
|
+
def run_quality_gates(work_item: dict | None = None) -> tuple[dict, bool, list]:
|
|
113
|
+
"""Run comprehensive quality gates using QualityGates class.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
work_item: Optional work item dict for custom validations
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
tuple: (all_results dict, all_passed bool, failed_gates list)
|
|
120
|
+
|
|
121
|
+
Raises:
|
|
122
|
+
QualityGateError: If quality gates fail and are required
|
|
123
|
+
"""
|
|
124
|
+
gates = QualityGates()
|
|
125
|
+
all_results = {}
|
|
126
|
+
all_passed = True
|
|
127
|
+
failed_gates = []
|
|
128
|
+
|
|
129
|
+
# Run tests
|
|
130
|
+
passed, test_results = gates.run_tests()
|
|
131
|
+
all_results["tests"] = test_results
|
|
132
|
+
if not passed and gates.config.test_execution.required:
|
|
133
|
+
all_passed = False
|
|
134
|
+
failed_gates.append("tests")
|
|
135
|
+
|
|
136
|
+
# Run security scanning
|
|
137
|
+
passed, security_results = gates.run_security_scan()
|
|
138
|
+
all_results["security"] = security_results
|
|
139
|
+
if not passed and gates.config.security.required:
|
|
140
|
+
all_passed = False
|
|
141
|
+
failed_gates.append("security")
|
|
142
|
+
|
|
143
|
+
# Run linting
|
|
144
|
+
passed, linting_results = gates.run_linting()
|
|
145
|
+
all_results["linting"] = linting_results
|
|
146
|
+
if not passed and gates.config.linting.required:
|
|
147
|
+
all_passed = False
|
|
148
|
+
failed_gates.append("linting")
|
|
149
|
+
|
|
150
|
+
# Run formatting
|
|
151
|
+
passed, formatting_results = gates.run_formatting()
|
|
152
|
+
all_results["formatting"] = formatting_results
|
|
153
|
+
if not passed and gates.config.formatting.required:
|
|
154
|
+
all_passed = False
|
|
155
|
+
failed_gates.append("formatting")
|
|
156
|
+
|
|
157
|
+
# Validate documentation
|
|
158
|
+
passed, doc_results = gates.validate_documentation(work_item)
|
|
159
|
+
all_results["documentation"] = doc_results
|
|
160
|
+
if not passed and gates.config.documentation.required:
|
|
161
|
+
all_passed = False
|
|
162
|
+
failed_gates.append("documentation")
|
|
163
|
+
|
|
164
|
+
# Verify Context7 libraries
|
|
165
|
+
passed, context7_results = gates.verify_context7_libraries()
|
|
166
|
+
all_results["context7"] = context7_results
|
|
167
|
+
# Context7 is optional and not in QualityGatesConfig, always treat as optional
|
|
168
|
+
if not passed:
|
|
169
|
+
# Context7 failures are warnings, not failures
|
|
170
|
+
logger.warning("Context7 library verification failed (non-blocking)")
|
|
171
|
+
|
|
172
|
+
# Run custom validations
|
|
173
|
+
if work_item:
|
|
174
|
+
passed, custom_results = gates.run_custom_validations(work_item)
|
|
175
|
+
all_results["custom"] = custom_results
|
|
176
|
+
if not passed:
|
|
177
|
+
all_passed = False
|
|
178
|
+
failed_gates.append("custom")
|
|
179
|
+
|
|
180
|
+
# Generate and print report
|
|
181
|
+
report = gates.generate_report(all_results)
|
|
182
|
+
output.info("\n" + report)
|
|
183
|
+
|
|
184
|
+
# Print remediation guidance if any gates failed
|
|
185
|
+
if failed_gates:
|
|
186
|
+
guidance = gates.get_remediation_guidance(failed_gates)
|
|
187
|
+
output.info(guidance)
|
|
188
|
+
|
|
189
|
+
return all_results, all_passed, failed_gates
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
@log_errors()
|
|
193
|
+
def update_all_tracking(session_num: int) -> bool:
|
|
194
|
+
"""Update stack, tree, and other tracking files.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
session_num: Current session number
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
bool: True if tracking updates completed (may have warnings)
|
|
201
|
+
|
|
202
|
+
Note:
|
|
203
|
+
This function logs warnings but does not raise exceptions for
|
|
204
|
+
tracking update failures, as they are non-critical.
|
|
205
|
+
"""
|
|
206
|
+
logger.info(f"Updating tracking files for session {session_num}")
|
|
207
|
+
|
|
208
|
+
# Get Solokit installation directory for absolute path resolution
|
|
209
|
+
script_dir = Path(__file__).parent
|
|
210
|
+
project_dir = script_dir.parent / "project"
|
|
211
|
+
|
|
212
|
+
runner = CommandRunner(default_timeout=SESSION_COMPLETE_TIMEOUT)
|
|
213
|
+
|
|
214
|
+
# Update stack
|
|
215
|
+
try:
|
|
216
|
+
result = runner.run(
|
|
217
|
+
[
|
|
218
|
+
"python",
|
|
219
|
+
str(project_dir / "stack.py"),
|
|
220
|
+
"--session",
|
|
221
|
+
str(session_num),
|
|
222
|
+
"--non-interactive",
|
|
223
|
+
]
|
|
224
|
+
)
|
|
225
|
+
if result.success:
|
|
226
|
+
output.success("Stack updated")
|
|
227
|
+
# Print output if there were changes
|
|
228
|
+
if result.stdout.strip():
|
|
229
|
+
for line in result.stdout.strip().split("\n"):
|
|
230
|
+
if line.strip():
|
|
231
|
+
output.info(f" {line}")
|
|
232
|
+
else:
|
|
233
|
+
logger.warning(f"Stack update failed (exit code {result.returncode})")
|
|
234
|
+
output.warning(f"Stack update failed (exit code {result.returncode})")
|
|
235
|
+
if result.stderr:
|
|
236
|
+
logger.warning(f"Stack update error: {result.stderr}")
|
|
237
|
+
output.info(f" Error: {result.stderr}")
|
|
238
|
+
except Exception as e:
|
|
239
|
+
logger.warning(f"Stack update failed: {e}", exc_info=True)
|
|
240
|
+
output.warning(f"Stack update failed: {e}")
|
|
241
|
+
|
|
242
|
+
# Update tree
|
|
243
|
+
try:
|
|
244
|
+
result = runner.run(
|
|
245
|
+
[
|
|
246
|
+
"python",
|
|
247
|
+
str(project_dir / "tree.py"),
|
|
248
|
+
"--session",
|
|
249
|
+
str(session_num),
|
|
250
|
+
"--non-interactive",
|
|
251
|
+
]
|
|
252
|
+
)
|
|
253
|
+
if result.success:
|
|
254
|
+
output.success("Tree updated")
|
|
255
|
+
# Print output if there were changes
|
|
256
|
+
if result.stdout.strip():
|
|
257
|
+
for line in result.stdout.strip().split("\n"):
|
|
258
|
+
if line.strip():
|
|
259
|
+
output.info(f" {line}")
|
|
260
|
+
else:
|
|
261
|
+
logger.warning(f"Tree update failed (exit code {result.returncode})")
|
|
262
|
+
output.warning(f"Tree update failed (exit code {result.returncode})")
|
|
263
|
+
if result.stderr:
|
|
264
|
+
logger.warning(f"Tree update error: {result.stderr}")
|
|
265
|
+
output.info(f" Error: {result.stderr}")
|
|
266
|
+
except Exception as e:
|
|
267
|
+
logger.warning(f"Tree update failed: {e}", exc_info=True)
|
|
268
|
+
output.warning(f"Tree update failed: {e}")
|
|
269
|
+
|
|
270
|
+
return True
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
@log_errors()
|
|
274
|
+
def trigger_curation_if_needed(session_num: int) -> None:
|
|
275
|
+
"""Check if curation should run and trigger it.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
session_num: Current session number
|
|
279
|
+
|
|
280
|
+
Note:
|
|
281
|
+
This function logs warnings but does not raise exceptions for
|
|
282
|
+
curation failures, as they are non-critical.
|
|
283
|
+
"""
|
|
284
|
+
# Use ConfigManager for centralized config management
|
|
285
|
+
from solokit.core.config import get_config_manager
|
|
286
|
+
|
|
287
|
+
config_path = Path(".session/config.json")
|
|
288
|
+
config_manager = get_config_manager()
|
|
289
|
+
config_manager.load_config(config_path)
|
|
290
|
+
curation_config = config_manager.curation
|
|
291
|
+
|
|
292
|
+
if not curation_config.auto_curate:
|
|
293
|
+
logger.debug("Auto-curation disabled in config")
|
|
294
|
+
return
|
|
295
|
+
|
|
296
|
+
frequency = curation_config.frequency
|
|
297
|
+
|
|
298
|
+
# Run curation every N sessions
|
|
299
|
+
if session_num % frequency == 0:
|
|
300
|
+
logger.info(f"Triggering automatic curation for session {session_num}")
|
|
301
|
+
output.info(f"\n{'=' * 50}")
|
|
302
|
+
output.info(f"Running automatic learning curation (session {session_num})...")
|
|
303
|
+
output.info(f"{'=' * 50}\n")
|
|
304
|
+
|
|
305
|
+
try:
|
|
306
|
+
from solokit.learning.curator import LearningsCurator
|
|
307
|
+
|
|
308
|
+
curator = LearningsCurator()
|
|
309
|
+
curator.curate(dry_run=False)
|
|
310
|
+
output.success("Learning curation completed\n")
|
|
311
|
+
logger.info("Learning curation completed successfully")
|
|
312
|
+
except Exception as e:
|
|
313
|
+
logger.warning(f"Learning curation failed: {e}", exc_info=True)
|
|
314
|
+
output.warning(f"Learning curation failed: {e}\n")
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
@log_errors()
|
|
318
|
+
def auto_extract_learnings(session_num: int) -> int:
|
|
319
|
+
"""Auto-extract learnings from session artifacts.
|
|
320
|
+
|
|
321
|
+
Args:
|
|
322
|
+
session_num: Current session number
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
int: Number of new learnings extracted
|
|
326
|
+
|
|
327
|
+
Note:
|
|
328
|
+
This function logs warnings but does not raise exceptions for
|
|
329
|
+
extraction failures, as they are non-critical.
|
|
330
|
+
"""
|
|
331
|
+
logger.info(f"Auto-extracting learnings from session {session_num} artifacts")
|
|
332
|
+
|
|
333
|
+
try:
|
|
334
|
+
# Import learning curator
|
|
335
|
+
from solokit.learning.curator import LearningsCurator
|
|
336
|
+
|
|
337
|
+
curator = LearningsCurator()
|
|
338
|
+
|
|
339
|
+
total_extracted = 0
|
|
340
|
+
|
|
341
|
+
# Extract from session summary (if it exists)
|
|
342
|
+
summary_file = Path(f".session/history/session_{session_num:03d}_summary.md")
|
|
343
|
+
if summary_file.exists():
|
|
344
|
+
from_summary = curator.extract_from_session_summary(summary_file)
|
|
345
|
+
for learning in from_summary:
|
|
346
|
+
if curator.add_learning_if_new(learning):
|
|
347
|
+
total_extracted += 1
|
|
348
|
+
|
|
349
|
+
# Extract from git commits
|
|
350
|
+
from_commits = curator.extract_from_git_commits()
|
|
351
|
+
for learning in from_commits:
|
|
352
|
+
if curator.add_learning_if_new(learning):
|
|
353
|
+
total_extracted += 1
|
|
354
|
+
|
|
355
|
+
# Extract from inline code comments
|
|
356
|
+
from_code = curator.extract_from_code_comments()
|
|
357
|
+
for learning in from_code:
|
|
358
|
+
if curator.add_learning_if_new(learning):
|
|
359
|
+
total_extracted += 1
|
|
360
|
+
|
|
361
|
+
if total_extracted > 0:
|
|
362
|
+
logger.info(f"Auto-extracted {total_extracted} new learnings")
|
|
363
|
+
output.info(f"✓ Auto-extracted {total_extracted} new learning(s)\n")
|
|
364
|
+
else:
|
|
365
|
+
logger.info("No new learnings extracted from session artifacts")
|
|
366
|
+
output.info("No new learnings extracted\n")
|
|
367
|
+
|
|
368
|
+
return total_extracted
|
|
369
|
+
|
|
370
|
+
except Exception as e:
|
|
371
|
+
logger.warning(f"Auto-extraction failed: {e}", exc_info=True)
|
|
372
|
+
output.warning(f"Auto-extraction failed: {e}\n")
|
|
373
|
+
return 0
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
@log_errors()
|
|
377
|
+
def extract_learnings_from_session(learnings_file: Path | None = None) -> list[str]:
|
|
378
|
+
"""Extract learnings from work done in session (manual input or file).
|
|
379
|
+
|
|
380
|
+
Args:
|
|
381
|
+
learnings_file: Path to file containing learnings (one per line)
|
|
382
|
+
|
|
383
|
+
Returns:
|
|
384
|
+
list: List of learning strings
|
|
385
|
+
|
|
386
|
+
Raises:
|
|
387
|
+
FileOperationError: If learnings file cannot be read
|
|
388
|
+
"""
|
|
389
|
+
# If learnings file provided, read from it
|
|
390
|
+
if learnings_file:
|
|
391
|
+
learnings_path = Path(learnings_file)
|
|
392
|
+
if learnings_path.exists():
|
|
393
|
+
try:
|
|
394
|
+
logger.info(f"Reading learnings from {learnings_file}")
|
|
395
|
+
with open(learnings_path) as f:
|
|
396
|
+
learnings = [line.strip() for line in f if line.strip()]
|
|
397
|
+
output.info(f"✓ Loaded {len(learnings)} learnings from file")
|
|
398
|
+
# Clean up temp file
|
|
399
|
+
learnings_path.unlink()
|
|
400
|
+
return learnings
|
|
401
|
+
except OSError as e:
|
|
402
|
+
logger.warning(f"Failed to read learnings file: {e}")
|
|
403
|
+
output.warning(f"Failed to read learnings file: {e}")
|
|
404
|
+
return []
|
|
405
|
+
else:
|
|
406
|
+
logger.warning(f"Learnings file not found: {learnings_file}")
|
|
407
|
+
output.warning(f"Learnings file not found: {learnings_file}")
|
|
408
|
+
return []
|
|
409
|
+
|
|
410
|
+
# Skip manual input in non-interactive mode (e.g., when run by Claude Code)
|
|
411
|
+
if not sys.stdin.isatty():
|
|
412
|
+
logger.info("Skipping manual learning extraction (non-interactive mode)")
|
|
413
|
+
output.info("\nSkipping manual learning extraction (non-interactive mode)")
|
|
414
|
+
return []
|
|
415
|
+
|
|
416
|
+
output.info("\nCapture additional learnings manually...")
|
|
417
|
+
output.info("(Type each learning, or 'done' to finish, or 'skip' to skip):")
|
|
418
|
+
|
|
419
|
+
learnings = []
|
|
420
|
+
while True:
|
|
421
|
+
try:
|
|
422
|
+
learning = input("> ")
|
|
423
|
+
if learning.lower() == "done":
|
|
424
|
+
break
|
|
425
|
+
if learning.lower() == "skip":
|
|
426
|
+
return []
|
|
427
|
+
if learning:
|
|
428
|
+
learnings.append(learning)
|
|
429
|
+
except EOFError:
|
|
430
|
+
# Handle EOF gracefully in case stdin is closed
|
|
431
|
+
logger.debug("EOF encountered during manual learning input")
|
|
432
|
+
break
|
|
433
|
+
|
|
434
|
+
return learnings
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
@log_errors()
|
|
438
|
+
def complete_git_workflow(
|
|
439
|
+
work_item_id: str, commit_message: str, session_num: int
|
|
440
|
+
) -> dict[str, Any]:
|
|
441
|
+
"""Complete git workflow (commit, push, optionally merge or create PR).
|
|
442
|
+
|
|
443
|
+
Args:
|
|
444
|
+
work_item_id: Work item identifier
|
|
445
|
+
commit_message: Git commit message
|
|
446
|
+
session_num: Current session number
|
|
447
|
+
|
|
448
|
+
Returns:
|
|
449
|
+
dict: Result dict with 'success' and 'message' keys
|
|
450
|
+
|
|
451
|
+
Note:
|
|
452
|
+
This function returns error dicts rather than raising exceptions
|
|
453
|
+
to maintain compatibility with existing error handling.
|
|
454
|
+
"""
|
|
455
|
+
try:
|
|
456
|
+
# Import git workflow from new location
|
|
457
|
+
from solokit.git.integration import GitWorkflow
|
|
458
|
+
|
|
459
|
+
workflow = GitWorkflow()
|
|
460
|
+
|
|
461
|
+
# Load work items to check status
|
|
462
|
+
work_items_file = Path(".session/tracking/work_items.json")
|
|
463
|
+
try:
|
|
464
|
+
with open(work_items_file) as f:
|
|
465
|
+
data = json.load(f)
|
|
466
|
+
except (FileNotFoundError, json.JSONDecodeError) as e:
|
|
467
|
+
logger.error(f"Failed to load work items: {e}")
|
|
468
|
+
return {"success": False, "message": f"Failed to load work items: {e}"}
|
|
469
|
+
|
|
470
|
+
if work_item_id not in data["work_items"]:
|
|
471
|
+
logger.error(f"Work item not found: {work_item_id}")
|
|
472
|
+
return {"success": False, "message": f"Work item not found: {work_item_id}"}
|
|
473
|
+
|
|
474
|
+
work_item = data["work_items"][work_item_id]
|
|
475
|
+
should_merge = work_item["status"] == WorkItemStatus.COMPLETED.value
|
|
476
|
+
|
|
477
|
+
# Complete work item in git (with session_num for PR creation)
|
|
478
|
+
result = workflow.complete_work_item(
|
|
479
|
+
work_item_id, commit_message, merge=should_merge, session_num=session_num
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
return result
|
|
483
|
+
except Exception as e:
|
|
484
|
+
logger.error(f"Git workflow error: {e}", exc_info=True)
|
|
485
|
+
return {"success": False, "message": f"Git workflow error: {e}"}
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
@log_errors()
|
|
489
|
+
def record_session_commits(work_item_id: str) -> None:
|
|
490
|
+
"""Record commits made during session to work item tracking (Bug #15 fix).
|
|
491
|
+
|
|
492
|
+
Args:
|
|
493
|
+
work_item_id: Work item identifier
|
|
494
|
+
|
|
495
|
+
Note:
|
|
496
|
+
This function logs warnings but does not raise exceptions, as commit
|
|
497
|
+
recording is non-critical tracking functionality.
|
|
498
|
+
"""
|
|
499
|
+
try:
|
|
500
|
+
work_items_file = Path(".session/tracking/work_items.json")
|
|
501
|
+
with open(work_items_file) as f:
|
|
502
|
+
data = json.load(f)
|
|
503
|
+
|
|
504
|
+
if work_item_id not in data["work_items"]:
|
|
505
|
+
logger.warning(f"Work item not found for commit recording: {work_item_id}")
|
|
506
|
+
return
|
|
507
|
+
|
|
508
|
+
work_item = data["work_items"][work_item_id]
|
|
509
|
+
git_info = work_item.get("git", {})
|
|
510
|
+
|
|
511
|
+
# Get branch information
|
|
512
|
+
branch_name = git_info.get("branch")
|
|
513
|
+
parent_branch = git_info.get("parent_branch", "main")
|
|
514
|
+
|
|
515
|
+
if not branch_name:
|
|
516
|
+
# No git branch tracking for this work item
|
|
517
|
+
logger.debug(f"No git branch tracking for work item: {work_item_id}")
|
|
518
|
+
return
|
|
519
|
+
|
|
520
|
+
# Get commits on session branch that aren't in parent branch
|
|
521
|
+
runner = CommandRunner(default_timeout=GIT_STANDARD_TIMEOUT)
|
|
522
|
+
result = runner.run(
|
|
523
|
+
["git", "log", "--pretty=format:%H|%s|%ai", f"{parent_branch}..{branch_name}"]
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
if not result.success:
|
|
527
|
+
# Branch might not exist or other git error - skip silently
|
|
528
|
+
logger.debug(f"Git log failed for branch {branch_name}: {result.stderr}")
|
|
529
|
+
return
|
|
530
|
+
|
|
531
|
+
commits = []
|
|
532
|
+
for line in result.stdout.strip().split("\n"):
|
|
533
|
+
if line:
|
|
534
|
+
parts = line.split("|", 2)
|
|
535
|
+
if len(parts) == 3:
|
|
536
|
+
sha, message, timestamp = parts
|
|
537
|
+
commits.append({"sha": sha, "message": message, "timestamp": timestamp})
|
|
538
|
+
|
|
539
|
+
# Update work_items.json with commits
|
|
540
|
+
if commits:
|
|
541
|
+
data["work_items"][work_item_id]["git"]["commits"] = commits
|
|
542
|
+
with open(work_items_file, "w") as f:
|
|
543
|
+
json.dump(data, f, indent=2)
|
|
544
|
+
logger.info(f"Recorded {len(commits)} commits for work item {work_item_id}")
|
|
545
|
+
|
|
546
|
+
except Exception as e:
|
|
547
|
+
# Silently skip if there's any error - this is non-critical tracking
|
|
548
|
+
logger.debug(f"Failed to record session commits: {e}", exc_info=True)
|
|
549
|
+
|
|
550
|
+
|
|
551
|
+
@log_errors()
|
|
552
|
+
def generate_commit_message(status: dict, work_item: dict) -> str:
|
|
553
|
+
"""Generate standardized commit message.
|
|
554
|
+
|
|
555
|
+
Updated in Phase 5.7.3 to read rationale from spec file instead of
|
|
556
|
+
deprecated JSON field.
|
|
557
|
+
|
|
558
|
+
Args:
|
|
559
|
+
status: Session status dict
|
|
560
|
+
work_item: Work item dict
|
|
561
|
+
|
|
562
|
+
Returns:
|
|
563
|
+
str: Formatted commit message
|
|
564
|
+
|
|
565
|
+
Note:
|
|
566
|
+
Spec file errors are logged but don't prevent message generation.
|
|
567
|
+
"""
|
|
568
|
+
session_num = status["current_session"]
|
|
569
|
+
work_type = work_item["type"]
|
|
570
|
+
title = work_item["title"]
|
|
571
|
+
|
|
572
|
+
message = f"Session {session_num:03d}: {work_type.title()} - {title}\n\n"
|
|
573
|
+
|
|
574
|
+
# Get rationale from spec file
|
|
575
|
+
try:
|
|
576
|
+
parsed_spec = parse_spec_file(work_item)
|
|
577
|
+
rationale = parsed_spec.get("rationale")
|
|
578
|
+
|
|
579
|
+
if rationale and rationale.strip():
|
|
580
|
+
# Trim to first paragraph if too long
|
|
581
|
+
first_para = rationale.split("\n\n")[0]
|
|
582
|
+
if len(first_para) > 200:
|
|
583
|
+
first_para = first_para[:197] + "..."
|
|
584
|
+
message += f"{first_para}\n\n"
|
|
585
|
+
except Exception as e:
|
|
586
|
+
# If spec file not found or invalid, continue without rationale
|
|
587
|
+
logger.debug(f"Could not read spec file rationale: {e}")
|
|
588
|
+
|
|
589
|
+
if work_item["status"] == WorkItemStatus.COMPLETED.value:
|
|
590
|
+
message += "✅ Work item completed\n"
|
|
591
|
+
else:
|
|
592
|
+
message += "🚧 Work in progress\n"
|
|
593
|
+
|
|
594
|
+
message += "\n🤖 Generated with [Claude Code](https://claude.com/claude-code)\n"
|
|
595
|
+
message += "\nCo-Authored-By: Claude <noreply@anthropic.com>"
|
|
596
|
+
|
|
597
|
+
return message
|
|
598
|
+
|
|
599
|
+
|
|
600
|
+
@log_errors()
|
|
601
|
+
def generate_summary(
|
|
602
|
+
status: dict, work_items_data: dict, gate_results: dict, learnings: list | None = None
|
|
603
|
+
) -> str:
|
|
604
|
+
"""Generate comprehensive session summary.
|
|
605
|
+
|
|
606
|
+
Args:
|
|
607
|
+
status: Session status dict
|
|
608
|
+
work_items_data: Work items data dict
|
|
609
|
+
gate_results: Quality gate results dict
|
|
610
|
+
learnings: Optional list of learnings
|
|
611
|
+
|
|
612
|
+
Returns:
|
|
613
|
+
str: Formatted markdown summary
|
|
614
|
+
|
|
615
|
+
Note:
|
|
616
|
+
Git diff errors are logged but don't prevent summary generation.
|
|
617
|
+
"""
|
|
618
|
+
work_item_id = status["current_work_item"]
|
|
619
|
+
work_item = work_items_data["work_items"][work_item_id]
|
|
620
|
+
|
|
621
|
+
summary = f"""# Session {status["current_session"]} Summary
|
|
622
|
+
|
|
623
|
+
{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
|
|
624
|
+
|
|
625
|
+
## Work Items
|
|
626
|
+
- **{work_item_id}**: {work_item["title"]} ({work_item["status"]})
|
|
627
|
+
|
|
628
|
+
"""
|
|
629
|
+
|
|
630
|
+
# Add commit details with file stats (Enhancement #11 Phase 1)
|
|
631
|
+
commits = work_item.get("git", {}).get("commits", [])
|
|
632
|
+
if commits:
|
|
633
|
+
summary += "## Commits Made\n\n"
|
|
634
|
+
for commit in commits:
|
|
635
|
+
# Show short SHA and first line of commit message
|
|
636
|
+
message_lines = commit["message"].split("\n")
|
|
637
|
+
first_line = message_lines[0] if message_lines else ""
|
|
638
|
+
summary += f"**{commit['sha'][:7]}** - {first_line}\n"
|
|
639
|
+
|
|
640
|
+
# Show full message if multi-line
|
|
641
|
+
if len(message_lines) > 1:
|
|
642
|
+
remaining_lines = "\n".join(message_lines[1:]).strip()
|
|
643
|
+
if remaining_lines:
|
|
644
|
+
summary += "\n```\n"
|
|
645
|
+
summary += remaining_lines
|
|
646
|
+
summary += "\n```\n\n"
|
|
647
|
+
|
|
648
|
+
# Get file stats using git diff
|
|
649
|
+
try:
|
|
650
|
+
runner = CommandRunner(default_timeout=GIT_STANDARD_TIMEOUT)
|
|
651
|
+
result = runner.run(["git", "diff", "--stat", f"{commit['sha']}^..{commit['sha']}"])
|
|
652
|
+
if result.success and result.stdout.strip():
|
|
653
|
+
summary += "\nFiles changed:\n```\n"
|
|
654
|
+
summary += result.stdout
|
|
655
|
+
summary += "```\n\n"
|
|
656
|
+
except Exception as e:
|
|
657
|
+
# Silently skip if git diff fails
|
|
658
|
+
logger.debug(f"Git diff failed for commit {commit['sha']}: {e}")
|
|
659
|
+
|
|
660
|
+
summary += "\n"
|
|
661
|
+
|
|
662
|
+
summary += "## Quality Gates\n"
|
|
663
|
+
|
|
664
|
+
# Add results for each gate
|
|
665
|
+
for gate_name, gate_result in gate_results.items():
|
|
666
|
+
status_text = gate_result.get("status", "unknown")
|
|
667
|
+
if status_text == "skipped":
|
|
668
|
+
summary += f"- {gate_name.title()}: ⊘ SKIPPED\n"
|
|
669
|
+
elif status_text == "passed":
|
|
670
|
+
summary += f"- {gate_name.title()}: ✓ PASSED\n"
|
|
671
|
+
else:
|
|
672
|
+
summary += f"- {gate_name.title()}: ✗ FAILED\n"
|
|
673
|
+
|
|
674
|
+
# Add coverage for tests
|
|
675
|
+
if gate_name == "tests" and gate_result.get("coverage"):
|
|
676
|
+
summary += f" - Coverage: {gate_result['coverage']}%\n"
|
|
677
|
+
|
|
678
|
+
# Add severity counts for security
|
|
679
|
+
if gate_name == "security" and gate_result.get("by_severity"):
|
|
680
|
+
for severity, count in gate_result["by_severity"].items():
|
|
681
|
+
summary += f" - {severity}: {count}\n"
|
|
682
|
+
|
|
683
|
+
if learnings:
|
|
684
|
+
summary += "\n## Learnings Captured\n"
|
|
685
|
+
for learning in learnings:
|
|
686
|
+
summary += f"- {learning}\n"
|
|
687
|
+
|
|
688
|
+
summary += "\n## Next Session\nTo be determined\n"
|
|
689
|
+
|
|
690
|
+
# Add integration test summary if applicable
|
|
691
|
+
integration_summary = generate_integration_test_summary(work_item, gate_results)
|
|
692
|
+
if integration_summary:
|
|
693
|
+
summary += integration_summary
|
|
694
|
+
|
|
695
|
+
# Add deployment summary if applicable
|
|
696
|
+
deployment_summary = generate_deployment_summary(work_item, gate_results)
|
|
697
|
+
if deployment_summary:
|
|
698
|
+
summary += deployment_summary
|
|
699
|
+
|
|
700
|
+
return summary
|
|
701
|
+
|
|
702
|
+
|
|
703
|
+
def generate_integration_test_summary(work_item: dict, gate_results: dict) -> str:
|
|
704
|
+
"""
|
|
705
|
+
Generate integration test summary for session completion.
|
|
706
|
+
|
|
707
|
+
Args:
|
|
708
|
+
work_item: Integration test work item
|
|
709
|
+
gate_results: Results from quality gates
|
|
710
|
+
|
|
711
|
+
Returns:
|
|
712
|
+
Integration test summary section
|
|
713
|
+
"""
|
|
714
|
+
if work_item.get("type") != WorkItemType.INTEGRATION_TEST.value:
|
|
715
|
+
return ""
|
|
716
|
+
|
|
717
|
+
summary = "\n## Integration Test Results\n\n"
|
|
718
|
+
|
|
719
|
+
# Integration test execution results
|
|
720
|
+
integration_results = gate_results.get("integration_tests", {})
|
|
721
|
+
|
|
722
|
+
if integration_results:
|
|
723
|
+
test_results = integration_results.get("integration_tests", {})
|
|
724
|
+
|
|
725
|
+
if test_results:
|
|
726
|
+
summary += "**Integration Tests:**\n"
|
|
727
|
+
summary += f"- Passed: {test_results.get('passed', 0)}\n"
|
|
728
|
+
summary += f"- Failed: {test_results.get('failed', 0)}\n"
|
|
729
|
+
summary += f"- Skipped: {test_results.get('skipped', 0)}\n"
|
|
730
|
+
summary += f"- Duration: {test_results.get('total_duration', 0):.2f}s\n\n"
|
|
731
|
+
|
|
732
|
+
# Performance benchmarks
|
|
733
|
+
perf_results = integration_results.get("performance_benchmarks", {})
|
|
734
|
+
if perf_results:
|
|
735
|
+
summary += "**Performance Benchmarks:**\n"
|
|
736
|
+
|
|
737
|
+
latency = perf_results.get("load_test", {}).get("latency", {})
|
|
738
|
+
if latency:
|
|
739
|
+
summary += f"- p50 latency: {latency.get('p50', 'N/A')}ms\n"
|
|
740
|
+
summary += f"- p95 latency: {latency.get('p95', 'N/A')}ms\n"
|
|
741
|
+
summary += f"- p99 latency: {latency.get('p99', 'N/A')}ms\n"
|
|
742
|
+
|
|
743
|
+
throughput = perf_results.get("load_test", {}).get("throughput", {})
|
|
744
|
+
if throughput:
|
|
745
|
+
summary += f"- Throughput: {throughput.get('requests_per_sec', 'N/A')} req/s\n"
|
|
746
|
+
|
|
747
|
+
if perf_results.get("regression_detected"):
|
|
748
|
+
summary += "- ⚠️ Performance regression detected!\n"
|
|
749
|
+
|
|
750
|
+
summary += "\n"
|
|
751
|
+
|
|
752
|
+
# API contracts
|
|
753
|
+
contract_results = integration_results.get("api_contracts", {})
|
|
754
|
+
if contract_results:
|
|
755
|
+
summary += "**API Contract Validation:**\n"
|
|
756
|
+
summary += f"- Contracts validated: {contract_results.get('contracts_validated', 0)}\n"
|
|
757
|
+
|
|
758
|
+
breaking_changes = contract_results.get("breaking_changes", [])
|
|
759
|
+
if breaking_changes:
|
|
760
|
+
summary += f"- ⚠️ Breaking changes detected: {len(breaking_changes)}\n"
|
|
761
|
+
for change in breaking_changes[:3]: # Show first 3
|
|
762
|
+
summary += f" - {change.get('message', 'Unknown')}\n"
|
|
763
|
+
else:
|
|
764
|
+
summary += "- ✓ No breaking changes\n"
|
|
765
|
+
|
|
766
|
+
summary += "\n"
|
|
767
|
+
|
|
768
|
+
return summary
|
|
769
|
+
|
|
770
|
+
|
|
771
|
+
def generate_deployment_summary(work_item: dict, gate_results: dict) -> str:
|
|
772
|
+
"""
|
|
773
|
+
Generate deployment-specific summary section.
|
|
774
|
+
|
|
775
|
+
Args:
|
|
776
|
+
work_item: Deployment work item
|
|
777
|
+
gate_results: Results from deployment quality gates
|
|
778
|
+
|
|
779
|
+
Returns:
|
|
780
|
+
Deployment summary text
|
|
781
|
+
"""
|
|
782
|
+
if work_item.get("type") != WorkItemType.DEPLOYMENT.value:
|
|
783
|
+
return ""
|
|
784
|
+
|
|
785
|
+
summary = []
|
|
786
|
+
summary.append("\n" + "=" * 60)
|
|
787
|
+
summary.append("DEPLOYMENT RESULTS")
|
|
788
|
+
summary.append("=" * 60)
|
|
789
|
+
|
|
790
|
+
# Deployment execution results
|
|
791
|
+
# NOTE: Framework stub - Parse actual results from deployment_executor
|
|
792
|
+
# When implemented, extract from DeploymentExecutor.get_deployment_log()
|
|
793
|
+
summary.append("\n**Deployment Execution:**")
|
|
794
|
+
summary.append(" Status: [Success/Failed]")
|
|
795
|
+
summary.append(" Steps completed: [X/Y]")
|
|
796
|
+
summary.append(" Duration: [X minutes]")
|
|
797
|
+
|
|
798
|
+
# Smoke test results
|
|
799
|
+
summary.append("\n**Smoke Tests:**")
|
|
800
|
+
summary.append(" Passed: [X]")
|
|
801
|
+
summary.append(" Failed: [Y]")
|
|
802
|
+
summary.append(" Skipped: [Z]")
|
|
803
|
+
|
|
804
|
+
# Environment validation
|
|
805
|
+
summary.append("\n**Environment Validation:**")
|
|
806
|
+
for gate in gate_results.get("gates", []):
|
|
807
|
+
if gate.get("name") == "Environment Validation":
|
|
808
|
+
status = "✓ PASSED" if gate.get("passed") else "✗ FAILED"
|
|
809
|
+
summary.append(f" {status}")
|
|
810
|
+
|
|
811
|
+
# Rollback status (if applicable)
|
|
812
|
+
# NOTE: Framework stub - Check deployment results for rollback trigger
|
|
813
|
+
# When implemented, check DeploymentExecutor results for rollback_triggered flag
|
|
814
|
+
rollback_triggered = False
|
|
815
|
+
if rollback_triggered:
|
|
816
|
+
summary.append("\n⚠️ ROLLBACK TRIGGERED")
|
|
817
|
+
summary.append(" Reason: [smoke test failure / error threshold]")
|
|
818
|
+
summary.append(" Rollback status: [Success/Failed]")
|
|
819
|
+
|
|
820
|
+
# Post-deployment metrics
|
|
821
|
+
summary.append("\n**Post-Deployment Metrics:**")
|
|
822
|
+
summary.append(" Error rate: [X%]")
|
|
823
|
+
summary.append(" Response time p99: [X ms]")
|
|
824
|
+
summary.append(" Active alerts: [X]")
|
|
825
|
+
|
|
826
|
+
summary.append("\n" + "=" * 60)
|
|
827
|
+
|
|
828
|
+
return "\n".join(summary)
|
|
829
|
+
|
|
830
|
+
|
|
831
|
+
@log_errors()
|
|
832
|
+
def check_uncommitted_changes() -> bool:
|
|
833
|
+
"""Check for uncommitted changes and guide user to commit first.
|
|
834
|
+
|
|
835
|
+
Returns:
|
|
836
|
+
bool: True if can proceed, False if should abort
|
|
837
|
+
|
|
838
|
+
Note:
|
|
839
|
+
This function logs warnings but does not raise exceptions.
|
|
840
|
+
Git errors allow proceeding to avoid blocking workflows.
|
|
841
|
+
"""
|
|
842
|
+
try:
|
|
843
|
+
runner = CommandRunner(default_timeout=GIT_QUICK_TIMEOUT, working_dir=Path.cwd())
|
|
844
|
+
result = runner.run(["git", "status", "--porcelain"])
|
|
845
|
+
|
|
846
|
+
uncommitted = [line for line in result.stdout.split("\n") if line.strip()]
|
|
847
|
+
|
|
848
|
+
# Filter out .session/tracking files (they're updated by sk end)
|
|
849
|
+
user_changes = [
|
|
850
|
+
line
|
|
851
|
+
for line in uncommitted
|
|
852
|
+
if ".session/tracking/" not in line and ".session/briefings/" not in line
|
|
853
|
+
]
|
|
854
|
+
|
|
855
|
+
if not user_changes:
|
|
856
|
+
logger.debug("No uncommitted changes detected")
|
|
857
|
+
return True # All good
|
|
858
|
+
|
|
859
|
+
logger.warning(f"Detected {len(user_changes)} uncommitted changes")
|
|
860
|
+
|
|
861
|
+
# Display uncommitted changes
|
|
862
|
+
output.info("\n" + "=" * 60)
|
|
863
|
+
output.warning("UNCOMMITTED CHANGES DETECTED")
|
|
864
|
+
output.info("=" * 60)
|
|
865
|
+
output.info("\nYou have uncommitted changes:")
|
|
866
|
+
output.info("")
|
|
867
|
+
|
|
868
|
+
for line in user_changes[:15]: # Show first 15
|
|
869
|
+
output.info(f" {line}")
|
|
870
|
+
|
|
871
|
+
if len(user_changes) > 15:
|
|
872
|
+
output.info(f" ... and {len(user_changes) - 15} more")
|
|
873
|
+
|
|
874
|
+
output.info("\n" + "=" * 60)
|
|
875
|
+
output.info("📋 REQUIRED STEPS BEFORE /sk:end:")
|
|
876
|
+
output.info("=" * 60)
|
|
877
|
+
output.info("")
|
|
878
|
+
output.info("1. Review your changes:")
|
|
879
|
+
output.info(" git status")
|
|
880
|
+
output.info("")
|
|
881
|
+
output.info("2. Update CHANGELOG.md with session changes:")
|
|
882
|
+
output.info(" ## [Unreleased]")
|
|
883
|
+
output.info(" ### Added")
|
|
884
|
+
output.info(" - Your feature or change")
|
|
885
|
+
output.info("")
|
|
886
|
+
output.info("3. Commit everything:")
|
|
887
|
+
output.info(" git add -A")
|
|
888
|
+
output.info(" git commit -m 'Implement feature X")
|
|
889
|
+
output.info("")
|
|
890
|
+
output.info(" LEARNING: Key insight from implementation")
|
|
891
|
+
output.info("")
|
|
892
|
+
output.info(" 🤖 Generated with [Claude Code](https://claude.com/claude-code)")
|
|
893
|
+
output.info(" Co-Authored-By: Claude <noreply@anthropic.com>'")
|
|
894
|
+
output.info("")
|
|
895
|
+
output.info("4. Then run:")
|
|
896
|
+
output.info(" sk end")
|
|
897
|
+
output.info("")
|
|
898
|
+
output.info("=" * 60)
|
|
899
|
+
|
|
900
|
+
# In interactive mode, allow override
|
|
901
|
+
if sys.stdin.isatty():
|
|
902
|
+
output.info("")
|
|
903
|
+
response = input("Continue anyway? (y/n): ")
|
|
904
|
+
user_override = response.lower() == "y"
|
|
905
|
+
logger.info(
|
|
906
|
+
f"User {'overrode' if user_override else 'aborted on'} uncommitted changes check"
|
|
907
|
+
)
|
|
908
|
+
return user_override
|
|
909
|
+
else:
|
|
910
|
+
logger.info("Non-interactive mode: aborting on uncommitted changes")
|
|
911
|
+
output.info("\nNon-interactive mode: exiting")
|
|
912
|
+
output.info("Please commit your changes and run 'sk end' again.")
|
|
913
|
+
return False
|
|
914
|
+
|
|
915
|
+
except Exception as e:
|
|
916
|
+
logger.warning(f"Could not check git status: {e}", exc_info=True)
|
|
917
|
+
output.info(f"Warning: Could not check git status: {e}")
|
|
918
|
+
return True # Don't block on errors
|
|
919
|
+
|
|
920
|
+
|
|
921
|
+
@log_errors()
|
|
922
|
+
def main() -> int:
|
|
923
|
+
"""Enhanced main entry point with full tracking updates.
|
|
924
|
+
|
|
925
|
+
Returns:
|
|
926
|
+
int: Exit code (0 for success, 1 for failure)
|
|
927
|
+
|
|
928
|
+
Raises:
|
|
929
|
+
SessionNotFoundError: If no active session exists
|
|
930
|
+
WorkItemNotFoundError: If work item cannot be found
|
|
931
|
+
QualityGateError: If quality gates fail
|
|
932
|
+
FileOperationError: If file operations fail
|
|
933
|
+
"""
|
|
934
|
+
# Parse command-line arguments
|
|
935
|
+
parser = argparse.ArgumentParser(description="Complete Solokit session")
|
|
936
|
+
parser.add_argument(
|
|
937
|
+
"--learnings-file",
|
|
938
|
+
type=str,
|
|
939
|
+
help="Path to file containing learnings (one per line)",
|
|
940
|
+
)
|
|
941
|
+
parser.add_argument(
|
|
942
|
+
"--complete",
|
|
943
|
+
action="store_true",
|
|
944
|
+
help="Mark work item as complete",
|
|
945
|
+
)
|
|
946
|
+
parser.add_argument(
|
|
947
|
+
"--incomplete",
|
|
948
|
+
action="store_true",
|
|
949
|
+
help="Keep work item as in-progress",
|
|
950
|
+
)
|
|
951
|
+
args = parser.parse_args()
|
|
952
|
+
|
|
953
|
+
# Load current status
|
|
954
|
+
try:
|
|
955
|
+
status = load_status()
|
|
956
|
+
if not status:
|
|
957
|
+
logger.error("No active session found")
|
|
958
|
+
output.info("Error: No active session found")
|
|
959
|
+
return 1
|
|
960
|
+
except FileOperationError as e:
|
|
961
|
+
logger.error(f"Failed to load session status: {e}")
|
|
962
|
+
output.info(f"Error: Failed to load session status: {e}")
|
|
963
|
+
return 1
|
|
964
|
+
|
|
965
|
+
try:
|
|
966
|
+
work_items_data = load_work_items()
|
|
967
|
+
except FileOperationError as e:
|
|
968
|
+
logger.error(f"Failed to load work items: {e}")
|
|
969
|
+
output.info(f"Error: Failed to load work items: {e}")
|
|
970
|
+
return 1
|
|
971
|
+
|
|
972
|
+
work_item_id = status["current_work_item"]
|
|
973
|
+
session_num = status["current_session"]
|
|
974
|
+
|
|
975
|
+
if work_item_id not in work_items_data["work_items"]:
|
|
976
|
+
logger.error(f"Work item not found: {work_item_id}")
|
|
977
|
+
output.info(f"Error: Work item not found: {work_item_id}")
|
|
978
|
+
return 1
|
|
979
|
+
|
|
980
|
+
work_item = work_items_data["work_items"][work_item_id]
|
|
981
|
+
|
|
982
|
+
logger.info(f"Starting session {session_num} completion for work item {work_item_id}")
|
|
983
|
+
|
|
984
|
+
# Pre-flight check - ensure changes are committed
|
|
985
|
+
if not check_uncommitted_changes():
|
|
986
|
+
logger.warning("Session completion aborted due to uncommitted changes")
|
|
987
|
+
output.info("\n❌ Session completion aborted")
|
|
988
|
+
output.info("Commit your changes and try again.\n")
|
|
989
|
+
return 1
|
|
990
|
+
|
|
991
|
+
output.info("Completing session...\n")
|
|
992
|
+
output.info("Running comprehensive quality gates...\n")
|
|
993
|
+
|
|
994
|
+
# Run quality gates with work item context
|
|
995
|
+
gate_results, all_passed, failed_gates = run_quality_gates(work_item)
|
|
996
|
+
|
|
997
|
+
if not all_passed:
|
|
998
|
+
logger.error(f"Quality gates failed: {failed_gates}")
|
|
999
|
+
output.info("\n❌ Required quality gates failed. Fix issues before completing session.")
|
|
1000
|
+
output.info(f"Failed gates: {', '.join(failed_gates)}")
|
|
1001
|
+
return 1
|
|
1002
|
+
|
|
1003
|
+
logger.info("All required quality gates passed")
|
|
1004
|
+
output.info("\n✓ All required quality gates PASSED\n")
|
|
1005
|
+
|
|
1006
|
+
# Update all tracking (stack, tree)
|
|
1007
|
+
update_all_tracking(session_num)
|
|
1008
|
+
|
|
1009
|
+
# Trigger curation if needed (every N sessions)
|
|
1010
|
+
trigger_curation_if_needed(session_num)
|
|
1011
|
+
|
|
1012
|
+
# Extract learnings manually or from file
|
|
1013
|
+
learnings = extract_learnings_from_session(args.learnings_file)
|
|
1014
|
+
|
|
1015
|
+
# Process learnings with learning_curator if available
|
|
1016
|
+
if learnings:
|
|
1017
|
+
logger.info(f"Processing {len(learnings)} learnings")
|
|
1018
|
+
output.info(f"\nProcessing {len(learnings)} learnings...")
|
|
1019
|
+
try:
|
|
1020
|
+
from solokit.learning.curator import LearningsCurator
|
|
1021
|
+
|
|
1022
|
+
curator = LearningsCurator()
|
|
1023
|
+
added_count = 0
|
|
1024
|
+
for learning in learnings:
|
|
1025
|
+
# Use standardized entry creator for consistent metadata structure
|
|
1026
|
+
# This ensures both 'learned_in' and 'context' fields are present
|
|
1027
|
+
source_type = "temp_file" if args.learnings_file else "manual"
|
|
1028
|
+
context = (
|
|
1029
|
+
f"Temp file: {args.learnings_file}" if args.learnings_file else "Manual entry"
|
|
1030
|
+
)
|
|
1031
|
+
|
|
1032
|
+
learning_dict = curator.create_learning_entry(
|
|
1033
|
+
content=learning,
|
|
1034
|
+
source=source_type,
|
|
1035
|
+
session_id=f"session_{session_num:03d}",
|
|
1036
|
+
context=context,
|
|
1037
|
+
)
|
|
1038
|
+
|
|
1039
|
+
if curator.add_learning_if_new(learning_dict):
|
|
1040
|
+
added_count += 1
|
|
1041
|
+
output.info(f" ✓ Added: {learning}")
|
|
1042
|
+
else:
|
|
1043
|
+
output.info(f" ⊘ Duplicate: {learning}")
|
|
1044
|
+
|
|
1045
|
+
if added_count > 0:
|
|
1046
|
+
logger.info(f"Added {added_count} new learnings")
|
|
1047
|
+
output.info(f"\n✓ Added {added_count} new learning(s) to learnings.json")
|
|
1048
|
+
else:
|
|
1049
|
+
logger.info("No new learnings added (all duplicates)")
|
|
1050
|
+
output.info("\n⊘ No new learnings added (all were duplicates)")
|
|
1051
|
+
except Exception as e:
|
|
1052
|
+
logger.warning(f"Failed to process learnings: {e}", exc_info=True)
|
|
1053
|
+
output.warning(f"Failed to process learnings: {e}")
|
|
1054
|
+
|
|
1055
|
+
# Determine work item completion status
|
|
1056
|
+
work_item_title = work_items_data["work_items"][work_item_id]["title"]
|
|
1057
|
+
|
|
1058
|
+
if args.complete:
|
|
1059
|
+
output.info(f"\n✓ Marking work item '{work_item_title}' as complete (--complete flag)")
|
|
1060
|
+
is_complete = True
|
|
1061
|
+
elif args.incomplete:
|
|
1062
|
+
output.info(f"\n✓ Keeping work item '{work_item_title}' as in-progress (--incomplete flag)")
|
|
1063
|
+
is_complete = False
|
|
1064
|
+
else:
|
|
1065
|
+
# Must specify either --complete or --incomplete flag (no interactive fallback)
|
|
1066
|
+
logger.error("Must specify --complete or --incomplete flag")
|
|
1067
|
+
output.info("Error: Must specify either --complete or --incomplete flag")
|
|
1068
|
+
output.info("")
|
|
1069
|
+
output.info("Usage:")
|
|
1070
|
+
output.info(" sk end --complete # Mark work item as completed")
|
|
1071
|
+
output.info(" sk end --incomplete # Keep work item as in-progress")
|
|
1072
|
+
output.info("")
|
|
1073
|
+
output.info("For Claude Code users: Use /end slash command for interactive UI")
|
|
1074
|
+
return 1
|
|
1075
|
+
|
|
1076
|
+
# Track changes for update_history
|
|
1077
|
+
changes = []
|
|
1078
|
+
previous_status = work_items_data["work_items"][work_item_id]["status"]
|
|
1079
|
+
|
|
1080
|
+
# Update work item status
|
|
1081
|
+
if is_complete:
|
|
1082
|
+
new_status = WorkItemStatus.COMPLETED.value
|
|
1083
|
+
work_items_data["work_items"][work_item_id]["status"] = new_status
|
|
1084
|
+
if "metadata" not in work_items_data["work_items"][work_item_id]:
|
|
1085
|
+
work_items_data["work_items"][work_item_id]["metadata"] = {}
|
|
1086
|
+
work_items_data["work_items"][work_item_id]["metadata"]["completed_at"] = (
|
|
1087
|
+
datetime.now().isoformat()
|
|
1088
|
+
)
|
|
1089
|
+
|
|
1090
|
+
# Record changes
|
|
1091
|
+
if previous_status != new_status:
|
|
1092
|
+
changes.append(f" status: {previous_status} → {new_status}")
|
|
1093
|
+
changes.append(f" metadata.completed_at: {datetime.now().isoformat()}")
|
|
1094
|
+
else:
|
|
1095
|
+
new_status = WorkItemStatus.IN_PROGRESS.value
|
|
1096
|
+
work_items_data["work_items"][work_item_id]["status"] = new_status
|
|
1097
|
+
|
|
1098
|
+
# Record changes
|
|
1099
|
+
if previous_status != new_status:
|
|
1100
|
+
changes.append(f" status: {previous_status} → {new_status}")
|
|
1101
|
+
|
|
1102
|
+
# Add update_history entry if changes were made
|
|
1103
|
+
if changes:
|
|
1104
|
+
if "update_history" not in work_items_data["work_items"][work_item_id]:
|
|
1105
|
+
work_items_data["work_items"][work_item_id]["update_history"] = []
|
|
1106
|
+
|
|
1107
|
+
work_items_data["work_items"][work_item_id]["update_history"].append(
|
|
1108
|
+
{"timestamp": datetime.now().isoformat(), "changes": changes}
|
|
1109
|
+
)
|
|
1110
|
+
|
|
1111
|
+
# Update metadata counters
|
|
1112
|
+
work_items = work_items_data.get("work_items", {})
|
|
1113
|
+
work_items_data["metadata"]["total_items"] = len(work_items)
|
|
1114
|
+
work_items_data["metadata"]["completed"] = sum(
|
|
1115
|
+
1 for item in work_items.values() if item["status"] == WorkItemStatus.COMPLETED.value
|
|
1116
|
+
)
|
|
1117
|
+
work_items_data["metadata"]["in_progress"] = sum(
|
|
1118
|
+
1 for item in work_items.values() if item["status"] == WorkItemStatus.IN_PROGRESS.value
|
|
1119
|
+
)
|
|
1120
|
+
work_items_data["metadata"]["blocked"] = sum(
|
|
1121
|
+
1 for item in work_items.values() if item["status"] == WorkItemStatus.BLOCKED.value
|
|
1122
|
+
)
|
|
1123
|
+
work_items_data["metadata"]["last_updated"] = datetime.now().isoformat()
|
|
1124
|
+
|
|
1125
|
+
# Save updated work items
|
|
1126
|
+
with open(".session/tracking/work_items.json", "w") as f:
|
|
1127
|
+
json.dump(work_items_data, f, indent=2)
|
|
1128
|
+
|
|
1129
|
+
# Generate commit message
|
|
1130
|
+
commit_message = generate_commit_message(status, work_item)
|
|
1131
|
+
|
|
1132
|
+
# Complete git workflow (commit, push, optionally merge or create PR)
|
|
1133
|
+
output.info("\nCompleting git workflow...")
|
|
1134
|
+
git_result = complete_git_workflow(work_item_id, commit_message, session_num)
|
|
1135
|
+
|
|
1136
|
+
if git_result.get("success"):
|
|
1137
|
+
output.success(f"Git: {git_result.get('message', 'Success')}")
|
|
1138
|
+
else:
|
|
1139
|
+
output.warning(f"Git: {git_result.get('message', 'Failed')}")
|
|
1140
|
+
|
|
1141
|
+
# Record commits to work item tracking (Bug #15 fix)
|
|
1142
|
+
record_session_commits(work_item_id)
|
|
1143
|
+
|
|
1144
|
+
# Reload work_items_data to include newly recorded commits (Enhancement #11 Phase 1)
|
|
1145
|
+
work_items_data = load_work_items()
|
|
1146
|
+
|
|
1147
|
+
# Generate comprehensive summary
|
|
1148
|
+
summary = generate_summary(status, work_items_data, gate_results, learnings)
|
|
1149
|
+
|
|
1150
|
+
# Save summary
|
|
1151
|
+
history_dir = Path(".session/history")
|
|
1152
|
+
history_dir.mkdir(exist_ok=True)
|
|
1153
|
+
summary_file = history_dir / f"session_{session_num:03d}_summary.md"
|
|
1154
|
+
try:
|
|
1155
|
+
with open(summary_file, "w") as f:
|
|
1156
|
+
f.write(summary)
|
|
1157
|
+
logger.info(f"Saved session summary to {summary_file}")
|
|
1158
|
+
except OSError as e:
|
|
1159
|
+
logger.error(f"Failed to save session summary: {e}")
|
|
1160
|
+
output.warning(f"Failed to save session summary: {e}")
|
|
1161
|
+
|
|
1162
|
+
# Auto-extract learnings from session artifacts (Bug #16 fix)
|
|
1163
|
+
# Now that commit and summary are created, we can extract from them
|
|
1164
|
+
auto_extract_learnings(session_num)
|
|
1165
|
+
|
|
1166
|
+
# Print summary
|
|
1167
|
+
output.info("\n" + "=" * 50)
|
|
1168
|
+
output.info(summary)
|
|
1169
|
+
output.info("=" * 50)
|
|
1170
|
+
|
|
1171
|
+
# Update status
|
|
1172
|
+
status["status"] = WorkItemStatus.COMPLETED.value
|
|
1173
|
+
status["completed_at"] = datetime.now().isoformat()
|
|
1174
|
+
try:
|
|
1175
|
+
with open(".session/tracking/status_update.json", "w") as f:
|
|
1176
|
+
json.dump(status, f, indent=2)
|
|
1177
|
+
logger.info("Updated session status to completed")
|
|
1178
|
+
except OSError as e:
|
|
1179
|
+
logger.error(f"Failed to update session status: {e}")
|
|
1180
|
+
output.warning(f"Failed to update session status: {e}")
|
|
1181
|
+
|
|
1182
|
+
logger.info(f"Session {session_num} completed successfully")
|
|
1183
|
+
output.info("\n✓ Session completed successfully")
|
|
1184
|
+
return 0
|
|
1185
|
+
|
|
1186
|
+
|
|
1187
|
+
if __name__ == "__main__":
|
|
1188
|
+
exit(main())
|