tapps-agents 3.5.41__py3-none-any.whl → 3.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tapps_agents/__init__.py +2 -2
- tapps_agents/agents/__init__.py +22 -22
- tapps_agents/agents/analyst/__init__.py +5 -5
- tapps_agents/agents/architect/__init__.py +5 -5
- tapps_agents/agents/architect/agent.py +1033 -1033
- tapps_agents/agents/architect/pattern_detector.py +75 -75
- tapps_agents/agents/cleanup/__init__.py +7 -7
- tapps_agents/agents/cleanup/agent.py +445 -445
- tapps_agents/agents/debugger/__init__.py +7 -7
- tapps_agents/agents/debugger/agent.py +310 -310
- tapps_agents/agents/debugger/error_analyzer.py +437 -437
- tapps_agents/agents/designer/__init__.py +5 -5
- tapps_agents/agents/designer/agent.py +786 -786
- tapps_agents/agents/designer/visual_designer.py +638 -638
- tapps_agents/agents/documenter/__init__.py +7 -7
- tapps_agents/agents/documenter/agent.py +531 -531
- tapps_agents/agents/documenter/doc_generator.py +472 -472
- tapps_agents/agents/documenter/doc_validator.py +393 -393
- tapps_agents/agents/documenter/framework_doc_updater.py +493 -493
- tapps_agents/agents/enhancer/__init__.py +7 -7
- tapps_agents/agents/evaluator/__init__.py +7 -7
- tapps_agents/agents/evaluator/agent.py +443 -443
- tapps_agents/agents/evaluator/priority_evaluator.py +641 -641
- tapps_agents/agents/evaluator/quality_analyzer.py +147 -147
- tapps_agents/agents/evaluator/report_generator.py +344 -344
- tapps_agents/agents/evaluator/usage_analyzer.py +192 -192
- tapps_agents/agents/evaluator/workflow_analyzer.py +189 -189
- tapps_agents/agents/implementer/__init__.py +7 -7
- tapps_agents/agents/implementer/agent.py +798 -798
- tapps_agents/agents/implementer/auto_fix.py +1119 -1119
- tapps_agents/agents/implementer/code_generator.py +73 -73
- tapps_agents/agents/improver/__init__.py +1 -1
- tapps_agents/agents/improver/agent.py +753 -753
- tapps_agents/agents/ops/__init__.py +1 -1
- tapps_agents/agents/ops/agent.py +619 -619
- tapps_agents/agents/ops/dependency_analyzer.py +600 -600
- tapps_agents/agents/orchestrator/__init__.py +5 -5
- tapps_agents/agents/orchestrator/agent.py +522 -522
- tapps_agents/agents/planner/__init__.py +7 -7
- tapps_agents/agents/planner/agent.py +1127 -1127
- tapps_agents/agents/reviewer/__init__.py +24 -24
- tapps_agents/agents/reviewer/agent.py +3513 -3513
- tapps_agents/agents/reviewer/aggregator.py +213 -213
- tapps_agents/agents/reviewer/batch_review.py +448 -448
- tapps_agents/agents/reviewer/cache.py +443 -443
- tapps_agents/agents/reviewer/context7_enhancer.py +630 -630
- tapps_agents/agents/reviewer/context_detector.py +203 -203
- tapps_agents/agents/reviewer/docker_compose_validator.py +158 -158
- tapps_agents/agents/reviewer/dockerfile_validator.py +176 -176
- tapps_agents/agents/reviewer/error_handling.py +126 -126
- tapps_agents/agents/reviewer/feedback_generator.py +490 -490
- tapps_agents/agents/reviewer/influxdb_validator.py +316 -316
- tapps_agents/agents/reviewer/issue_tracking.py +169 -169
- tapps_agents/agents/reviewer/library_detector.py +295 -295
- tapps_agents/agents/reviewer/library_patterns.py +268 -268
- tapps_agents/agents/reviewer/maintainability_scorer.py +593 -593
- tapps_agents/agents/reviewer/metric_strategies.py +276 -276
- tapps_agents/agents/reviewer/mqtt_validator.py +160 -160
- tapps_agents/agents/reviewer/output_enhancer.py +105 -105
- tapps_agents/agents/reviewer/pattern_detector.py +241 -241
- tapps_agents/agents/reviewer/performance_scorer.py +357 -357
- tapps_agents/agents/reviewer/phased_review.py +516 -516
- tapps_agents/agents/reviewer/progressive_review.py +435 -435
- tapps_agents/agents/reviewer/react_scorer.py +331 -331
- tapps_agents/agents/reviewer/score_constants.py +228 -228
- tapps_agents/agents/reviewer/score_validator.py +507 -507
- tapps_agents/agents/reviewer/scorer_registry.py +373 -373
- tapps_agents/agents/reviewer/scoring.py +1566 -1566
- tapps_agents/agents/reviewer/service_discovery.py +534 -534
- tapps_agents/agents/reviewer/tools/__init__.py +41 -41
- tapps_agents/agents/reviewer/tools/parallel_executor.py +581 -581
- tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -250
- tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -284
- tapps_agents/agents/reviewer/typescript_scorer.py +1142 -1142
- tapps_agents/agents/reviewer/validation.py +208 -208
- tapps_agents/agents/reviewer/websocket_validator.py +132 -132
- tapps_agents/agents/tester/__init__.py +7 -7
- tapps_agents/agents/tester/accessibility_auditor.py +309 -309
- tapps_agents/agents/tester/agent.py +1080 -1080
- tapps_agents/agents/tester/batch_generator.py +54 -54
- tapps_agents/agents/tester/context_learner.py +51 -51
- tapps_agents/agents/tester/coverage_analyzer.py +386 -386
- tapps_agents/agents/tester/coverage_test_generator.py +290 -290
- tapps_agents/agents/tester/debug_enhancer.py +238 -238
- tapps_agents/agents/tester/device_emulator.py +241 -241
- tapps_agents/agents/tester/integration_generator.py +62 -62
- tapps_agents/agents/tester/network_recorder.py +300 -300
- tapps_agents/agents/tester/performance_monitor.py +320 -320
- tapps_agents/agents/tester/test_fixer.py +316 -316
- tapps_agents/agents/tester/test_generator.py +632 -632
- tapps_agents/agents/tester/trace_manager.py +234 -234
- tapps_agents/agents/tester/visual_regression.py +291 -291
- tapps_agents/analysis/pattern_detector.py +36 -36
- tapps_agents/beads/hydration.py +213 -213
- tapps_agents/beads/parse.py +32 -32
- tapps_agents/beads/specs.py +206 -206
- tapps_agents/cli/__init__.py +9 -9
- tapps_agents/cli/__main__.py +8 -8
- tapps_agents/cli/base.py +478 -478
- tapps_agents/cli/command_classifier.py +72 -72
- tapps_agents/cli/commands/__init__.py +2 -2
- tapps_agents/cli/commands/analyst.py +173 -173
- tapps_agents/cli/commands/architect.py +109 -109
- tapps_agents/cli/commands/cleanup_agent.py +92 -92
- tapps_agents/cli/commands/common.py +126 -126
- tapps_agents/cli/commands/debugger.py +90 -90
- tapps_agents/cli/commands/designer.py +112 -112
- tapps_agents/cli/commands/documenter.py +136 -136
- tapps_agents/cli/commands/enhancer.py +110 -110
- tapps_agents/cli/commands/evaluator.py +255 -255
- tapps_agents/cli/commands/health.py +665 -665
- tapps_agents/cli/commands/implementer.py +301 -301
- tapps_agents/cli/commands/improver.py +91 -91
- tapps_agents/cli/commands/knowledge.py +111 -111
- tapps_agents/cli/commands/learning.py +172 -172
- tapps_agents/cli/commands/observability.py +283 -283
- tapps_agents/cli/commands/ops.py +135 -135
- tapps_agents/cli/commands/orchestrator.py +116 -116
- tapps_agents/cli/commands/planner.py +237 -237
- tapps_agents/cli/commands/reviewer.py +1872 -1872
- tapps_agents/cli/commands/status.py +285 -285
- tapps_agents/cli/commands/task.py +227 -227
- tapps_agents/cli/commands/tester.py +191 -191
- tapps_agents/cli/commands/top_level.py +3586 -3586
- tapps_agents/cli/feedback.py +936 -936
- tapps_agents/cli/formatters.py +608 -608
- tapps_agents/cli/help/__init__.py +7 -7
- tapps_agents/cli/help/static_help.py +425 -425
- tapps_agents/cli/network_detection.py +110 -110
- tapps_agents/cli/output_compactor.py +274 -274
- tapps_agents/cli/parsers/__init__.py +2 -2
- tapps_agents/cli/parsers/analyst.py +186 -186
- tapps_agents/cli/parsers/architect.py +167 -167
- tapps_agents/cli/parsers/cleanup_agent.py +228 -228
- tapps_agents/cli/parsers/debugger.py +116 -116
- tapps_agents/cli/parsers/designer.py +182 -182
- tapps_agents/cli/parsers/documenter.py +134 -134
- tapps_agents/cli/parsers/enhancer.py +113 -113
- tapps_agents/cli/parsers/evaluator.py +213 -213
- tapps_agents/cli/parsers/implementer.py +168 -168
- tapps_agents/cli/parsers/improver.py +132 -132
- tapps_agents/cli/parsers/ops.py +159 -159
- tapps_agents/cli/parsers/orchestrator.py +98 -98
- tapps_agents/cli/parsers/planner.py +145 -145
- tapps_agents/cli/parsers/reviewer.py +462 -462
- tapps_agents/cli/parsers/tester.py +124 -124
- tapps_agents/cli/progress_heartbeat.py +254 -254
- tapps_agents/cli/streaming_progress.py +336 -336
- tapps_agents/cli/utils/__init__.py +6 -6
- tapps_agents/cli/utils/agent_lifecycle.py +48 -48
- tapps_agents/cli/utils/error_formatter.py +82 -82
- tapps_agents/cli/utils/error_recovery.py +188 -188
- tapps_agents/cli/utils/output_handler.py +59 -59
- tapps_agents/cli/utils/prompt_enhancer.py +319 -319
- tapps_agents/cli/validators/__init__.py +9 -9
- tapps_agents/cli/validators/command_validator.py +81 -81
- tapps_agents/context7/__init__.py +112 -112
- tapps_agents/context7/agent_integration.py +869 -869
- tapps_agents/context7/analytics.py +382 -382
- tapps_agents/context7/analytics_dashboard.py +299 -299
- tapps_agents/context7/async_cache.py +681 -681
- tapps_agents/context7/backup_client.py +958 -958
- tapps_agents/context7/cache_locking.py +194 -194
- tapps_agents/context7/cache_metadata.py +214 -214
- tapps_agents/context7/cache_prewarm.py +488 -488
- tapps_agents/context7/cache_structure.py +168 -168
- tapps_agents/context7/cache_warming.py +604 -604
- tapps_agents/context7/circuit_breaker.py +376 -376
- tapps_agents/context7/cleanup.py +461 -461
- tapps_agents/context7/commands.py +858 -858
- tapps_agents/context7/credential_validation.py +276 -276
- tapps_agents/context7/cross_reference_resolver.py +168 -168
- tapps_agents/context7/cross_references.py +424 -424
- tapps_agents/context7/doc_manager.py +225 -225
- tapps_agents/context7/fuzzy_matcher.py +369 -369
- tapps_agents/context7/kb_cache.py +404 -404
- tapps_agents/context7/language_detector.py +219 -219
- tapps_agents/context7/library_detector.py +725 -725
- tapps_agents/context7/lookup.py +738 -738
- tapps_agents/context7/metadata.py +258 -258
- tapps_agents/context7/refresh_queue.py +300 -300
- tapps_agents/context7/security.py +373 -373
- tapps_agents/context7/staleness_policies.py +278 -278
- tapps_agents/context7/tiles_integration.py +47 -47
- tapps_agents/continuous_bug_fix/__init__.py +20 -20
- tapps_agents/continuous_bug_fix/bug_finder.py +306 -306
- tapps_agents/continuous_bug_fix/bug_fix_coordinator.py +177 -177
- tapps_agents/continuous_bug_fix/commit_manager.py +178 -178
- tapps_agents/continuous_bug_fix/continuous_bug_fixer.py +322 -322
- tapps_agents/continuous_bug_fix/proactive_bug_finder.py +285 -285
- tapps_agents/core/__init__.py +298 -298
- tapps_agents/core/adaptive_cache_config.py +432 -432
- tapps_agents/core/agent_base.py +647 -647
- tapps_agents/core/agent_cache.py +466 -466
- tapps_agents/core/agent_learning.py +1865 -1865
- tapps_agents/core/analytics_dashboard.py +563 -563
- tapps_agents/core/analytics_enhancements.py +597 -597
- tapps_agents/core/anonymization.py +274 -274
- tapps_agents/core/artifact_context_builder.py +293 -0
- tapps_agents/core/ast_parser.py +228 -228
- tapps_agents/core/async_file_ops.py +402 -402
- tapps_agents/core/best_practice_consultant.py +299 -299
- tapps_agents/core/brownfield_analyzer.py +299 -299
- tapps_agents/core/brownfield_review.py +541 -541
- tapps_agents/core/browser_controller.py +513 -513
- tapps_agents/core/capability_registry.py +418 -418
- tapps_agents/core/change_impact_analyzer.py +190 -190
- tapps_agents/core/checkpoint_manager.py +377 -377
- tapps_agents/core/code_generator.py +329 -329
- tapps_agents/core/code_validator.py +276 -276
- tapps_agents/core/command_registry.py +327 -327
- tapps_agents/core/config.py +33 -0
- tapps_agents/core/context_gathering/__init__.py +2 -2
- tapps_agents/core/context_gathering/repository_explorer.py +28 -28
- tapps_agents/core/context_intelligence/__init__.py +2 -2
- tapps_agents/core/context_intelligence/relevance_scorer.py +24 -24
- tapps_agents/core/context_intelligence/token_budget_manager.py +27 -27
- tapps_agents/core/context_manager.py +240 -240
- tapps_agents/core/cursor_feedback_monitor.py +146 -146
- tapps_agents/core/cursor_verification.py +290 -290
- tapps_agents/core/customization_loader.py +280 -280
- tapps_agents/core/customization_schema.py +260 -260
- tapps_agents/core/customization_template.py +238 -238
- tapps_agents/core/debug_logger.py +124 -124
- tapps_agents/core/design_validator.py +298 -298
- tapps_agents/core/diagram_generator.py +226 -226
- tapps_agents/core/docker_utils.py +232 -232
- tapps_agents/core/document_generator.py +617 -617
- tapps_agents/core/domain_detector.py +30 -30
- tapps_agents/core/error_envelope.py +454 -454
- tapps_agents/core/error_handler.py +270 -270
- tapps_agents/core/estimation_tracker.py +189 -189
- tapps_agents/core/eval_prompt_engine.py +116 -116
- tapps_agents/core/evaluation_base.py +119 -119
- tapps_agents/core/evaluation_models.py +320 -320
- tapps_agents/core/evaluation_orchestrator.py +225 -225
- tapps_agents/core/evaluators/__init__.py +7 -7
- tapps_agents/core/evaluators/architectural_evaluator.py +205 -205
- tapps_agents/core/evaluators/behavioral_evaluator.py +160 -160
- tapps_agents/core/evaluators/performance_profile_evaluator.py +160 -160
- tapps_agents/core/evaluators/security_posture_evaluator.py +148 -148
- tapps_agents/core/evaluators/spec_compliance_evaluator.py +181 -181
- tapps_agents/core/exceptions.py +107 -107
- tapps_agents/core/expert_config_generator.py +293 -293
- tapps_agents/core/export_schema.py +202 -202
- tapps_agents/core/external_feedback_models.py +102 -102
- tapps_agents/core/external_feedback_storage.py +213 -213
- tapps_agents/core/fallback_strategy.py +314 -314
- tapps_agents/core/feedback_analyzer.py +162 -162
- tapps_agents/core/feedback_collector.py +178 -178
- tapps_agents/core/git_operations.py +445 -445
- tapps_agents/core/hardware_profiler.py +151 -151
- tapps_agents/core/instructions.py +324 -324
- tapps_agents/core/io_guardrails.py +69 -69
- tapps_agents/core/issue_manifest.py +249 -249
- tapps_agents/core/issue_schema.py +139 -139
- tapps_agents/core/json_utils.py +128 -128
- tapps_agents/core/knowledge_graph.py +446 -446
- tapps_agents/core/language_detector.py +296 -296
- tapps_agents/core/learning_confidence.py +242 -242
- tapps_agents/core/learning_dashboard.py +246 -246
- tapps_agents/core/learning_decision.py +384 -384
- tapps_agents/core/learning_explainability.py +578 -578
- tapps_agents/core/learning_export.py +287 -287
- tapps_agents/core/learning_integration.py +228 -228
- tapps_agents/core/llm_behavior.py +232 -232
- tapps_agents/core/long_duration_support.py +786 -786
- tapps_agents/core/mcp_setup.py +106 -106
- tapps_agents/core/memory_integration.py +396 -396
- tapps_agents/core/meta_learning.py +666 -666
- tapps_agents/core/module_path_sanitizer.py +199 -199
- tapps_agents/core/multi_agent_orchestrator.py +382 -382
- tapps_agents/core/network_errors.py +125 -125
- tapps_agents/core/nfr_validator.py +336 -336
- tapps_agents/core/offline_mode.py +158 -158
- tapps_agents/core/output_contracts.py +300 -300
- tapps_agents/core/output_formatter.py +300 -300
- tapps_agents/core/path_normalizer.py +174 -174
- tapps_agents/core/path_validator.py +322 -322
- tapps_agents/core/pattern_library.py +250 -250
- tapps_agents/core/performance_benchmark.py +301 -301
- tapps_agents/core/performance_monitor.py +184 -184
- tapps_agents/core/playwright_mcp_controller.py +771 -771
- tapps_agents/core/policy_loader.py +135 -135
- tapps_agents/core/progress.py +166 -166
- tapps_agents/core/project_profile.py +354 -354
- tapps_agents/core/project_type_detector.py +454 -454
- tapps_agents/core/prompt_base.py +223 -223
- tapps_agents/core/prompt_learning/__init__.py +2 -2
- tapps_agents/core/prompt_learning/learning_loop.py +24 -24
- tapps_agents/core/prompt_learning/project_prompt_store.py +25 -25
- tapps_agents/core/prompt_learning/skills_prompt_analyzer.py +35 -35
- tapps_agents/core/prompt_optimization/__init__.py +6 -6
- tapps_agents/core/prompt_optimization/ab_tester.py +114 -114
- tapps_agents/core/prompt_optimization/correlation_analyzer.py +160 -160
- tapps_agents/core/prompt_optimization/progressive_refiner.py +129 -129
- tapps_agents/core/prompt_optimization/prompt_library.py +37 -37
- tapps_agents/core/requirements_evaluator.py +431 -431
- tapps_agents/core/resource_aware_executor.py +449 -449
- tapps_agents/core/resource_monitor.py +343 -343
- tapps_agents/core/resume_handler.py +298 -298
- tapps_agents/core/retry_handler.py +197 -197
- tapps_agents/core/review_checklists.py +479 -479
- tapps_agents/core/role_loader.py +201 -201
- tapps_agents/core/role_template_loader.py +201 -201
- tapps_agents/core/runtime_mode.py +60 -60
- tapps_agents/core/security_scanner.py +342 -342
- tapps_agents/core/skill_agent_registry.py +194 -194
- tapps_agents/core/skill_integration.py +208 -208
- tapps_agents/core/skill_loader.py +492 -492
- tapps_agents/core/skill_template.py +341 -341
- tapps_agents/core/skill_validator.py +478 -478
- tapps_agents/core/stack_analyzer.py +35 -35
- tapps_agents/core/startup.py +174 -174
- tapps_agents/core/storage_manager.py +397 -397
- tapps_agents/core/storage_models.py +166 -166
- tapps_agents/core/story_evaluator.py +410 -410
- tapps_agents/core/subprocess_utils.py +170 -170
- tapps_agents/core/task_duration.py +296 -296
- tapps_agents/core/task_memory.py +582 -582
- tapps_agents/core/task_state.py +226 -226
- tapps_agents/core/tech_stack_priorities.py +208 -208
- tapps_agents/core/temp_directory.py +194 -194
- tapps_agents/core/template_merger.py +600 -600
- tapps_agents/core/template_selector.py +280 -280
- tapps_agents/core/test_generator.py +286 -286
- tapps_agents/core/tiered_context.py +253 -253
- tapps_agents/core/token_monitor.py +345 -345
- tapps_agents/core/traceability.py +254 -254
- tapps_agents/core/trajectory_tracker.py +50 -50
- tapps_agents/core/unicode_safe.py +143 -143
- tapps_agents/core/unified_cache_config.py +170 -170
- tapps_agents/core/unified_state.py +324 -324
- tapps_agents/core/validate_cursor_setup.py +237 -237
- tapps_agents/core/validation_registry.py +136 -136
- tapps_agents/core/validators/__init__.py +4 -4
- tapps_agents/core/validators/python_validator.py +87 -87
- tapps_agents/core/verification_agent.py +90 -90
- tapps_agents/core/visual_feedback.py +644 -644
- tapps_agents/core/workflow_validator.py +197 -197
- tapps_agents/core/worktree.py +367 -367
- tapps_agents/docker/__init__.py +10 -10
- tapps_agents/docker/analyzer.py +186 -186
- tapps_agents/docker/debugger.py +229 -229
- tapps_agents/docker/error_patterns.py +216 -216
- tapps_agents/epic/__init__.py +22 -22
- tapps_agents/epic/beads_sync.py +115 -115
- tapps_agents/epic/markdown_sync.py +105 -105
- tapps_agents/epic/models.py +96 -96
- tapps_agents/experts/__init__.py +163 -163
- tapps_agents/experts/agent_integration.py +243 -243
- tapps_agents/experts/auto_generator.py +331 -331
- tapps_agents/experts/base_expert.py +536 -536
- tapps_agents/experts/builtin_registry.py +261 -261
- tapps_agents/experts/business_metrics.py +565 -565
- tapps_agents/experts/cache.py +266 -266
- tapps_agents/experts/confidence_breakdown.py +306 -306
- tapps_agents/experts/confidence_calculator.py +336 -336
- tapps_agents/experts/confidence_metrics.py +236 -236
- tapps_agents/experts/domain_config.py +311 -311
- tapps_agents/experts/domain_detector.py +550 -550
- tapps_agents/experts/domain_utils.py +84 -84
- tapps_agents/experts/expert_config.py +113 -113
- tapps_agents/experts/expert_engine.py +465 -465
- tapps_agents/experts/expert_registry.py +744 -744
- tapps_agents/experts/expert_synthesizer.py +70 -70
- tapps_agents/experts/governance.py +197 -197
- tapps_agents/experts/history_logger.py +312 -312
- tapps_agents/experts/knowledge/README.md +180 -180
- tapps_agents/experts/knowledge/accessibility/accessible-forms.md +331 -331
- tapps_agents/experts/knowledge/accessibility/aria-patterns.md +344 -344
- tapps_agents/experts/knowledge/accessibility/color-contrast.md +285 -285
- tapps_agents/experts/knowledge/accessibility/keyboard-navigation.md +332 -332
- tapps_agents/experts/knowledge/accessibility/screen-readers.md +282 -282
- tapps_agents/experts/knowledge/accessibility/semantic-html.md +355 -355
- tapps_agents/experts/knowledge/accessibility/testing-accessibility.md +369 -369
- tapps_agents/experts/knowledge/accessibility/wcag-2.1.md +296 -296
- tapps_agents/experts/knowledge/accessibility/wcag-2.2.md +211 -211
- tapps_agents/experts/knowledge/agent-learning/best-practices.md +715 -715
- tapps_agents/experts/knowledge/agent-learning/pattern-extraction.md +282 -282
- tapps_agents/experts/knowledge/agent-learning/prompt-optimization.md +320 -320
- tapps_agents/experts/knowledge/ai-frameworks/model-optimization.md +90 -90
- tapps_agents/experts/knowledge/ai-frameworks/openvino-patterns.md +260 -260
- tapps_agents/experts/knowledge/api-design-integration/api-gateway-patterns.md +309 -309
- tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +521 -521
- tapps_agents/experts/knowledge/api-design-integration/api-versioning.md +421 -421
- tapps_agents/experts/knowledge/api-design-integration/async-protocol-patterns.md +61 -61
- tapps_agents/experts/knowledge/api-design-integration/contract-testing.md +221 -221
- tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +489 -489
- tapps_agents/experts/knowledge/api-design-integration/fastapi-patterns.md +360 -360
- tapps_agents/experts/knowledge/api-design-integration/fastapi-testing.md +262 -262
- tapps_agents/experts/knowledge/api-design-integration/graphql-patterns.md +582 -582
- tapps_agents/experts/knowledge/api-design-integration/grpc-best-practices.md +499 -499
- tapps_agents/experts/knowledge/api-design-integration/mqtt-patterns.md +455 -455
- tapps_agents/experts/knowledge/api-design-integration/rate-limiting.md +507 -507
- tapps_agents/experts/knowledge/api-design-integration/restful-api-design.md +618 -618
- tapps_agents/experts/knowledge/api-design-integration/websocket-patterns.md +480 -480
- tapps_agents/experts/knowledge/cloud-infrastructure/cloud-native-patterns.md +175 -175
- tapps_agents/experts/knowledge/cloud-infrastructure/container-health-checks.md +261 -261
- tapps_agents/experts/knowledge/cloud-infrastructure/containerization.md +222 -222
- tapps_agents/experts/knowledge/cloud-infrastructure/cost-optimization.md +122 -122
- tapps_agents/experts/knowledge/cloud-infrastructure/disaster-recovery.md +153 -153
- tapps_agents/experts/knowledge/cloud-infrastructure/dockerfile-patterns.md +285 -285
- tapps_agents/experts/knowledge/cloud-infrastructure/infrastructure-as-code.md +187 -187
- tapps_agents/experts/knowledge/cloud-infrastructure/kubernetes-patterns.md +253 -253
- tapps_agents/experts/knowledge/cloud-infrastructure/multi-cloud-strategies.md +155 -155
- tapps_agents/experts/knowledge/cloud-infrastructure/serverless-architecture.md +200 -200
- tapps_agents/experts/knowledge/code-quality-analysis/README.md +16 -16
- tapps_agents/experts/knowledge/code-quality-analysis/code-metrics.md +137 -137
- tapps_agents/experts/knowledge/code-quality-analysis/complexity-analysis.md +181 -181
- tapps_agents/experts/knowledge/code-quality-analysis/technical-debt-patterns.md +191 -191
- tapps_agents/experts/knowledge/data-privacy-compliance/anonymization.md +313 -313
- tapps_agents/experts/knowledge/data-privacy-compliance/ccpa.md +255 -255
- tapps_agents/experts/knowledge/data-privacy-compliance/consent-management.md +282 -282
- tapps_agents/experts/knowledge/data-privacy-compliance/data-minimization.md +275 -275
- tapps_agents/experts/knowledge/data-privacy-compliance/data-retention.md +297 -297
- tapps_agents/experts/knowledge/data-privacy-compliance/data-subject-rights.md +383 -383
- tapps_agents/experts/knowledge/data-privacy-compliance/encryption-privacy.md +285 -285
- tapps_agents/experts/knowledge/data-privacy-compliance/gdpr.md +344 -344
- tapps_agents/experts/knowledge/data-privacy-compliance/hipaa.md +385 -385
- tapps_agents/experts/knowledge/data-privacy-compliance/privacy-by-design.md +280 -280
- tapps_agents/experts/knowledge/database-data-management/acid-vs-cap.md +164 -164
- tapps_agents/experts/knowledge/database-data-management/backup-and-recovery.md +182 -182
- tapps_agents/experts/knowledge/database-data-management/data-modeling.md +172 -172
- tapps_agents/experts/knowledge/database-data-management/database-design.md +187 -187
- tapps_agents/experts/knowledge/database-data-management/flux-query-optimization.md +342 -342
- tapps_agents/experts/knowledge/database-data-management/influxdb-connection-patterns.md +432 -432
- tapps_agents/experts/knowledge/database-data-management/influxdb-patterns.md +442 -442
- tapps_agents/experts/knowledge/database-data-management/migration-strategies.md +216 -216
- tapps_agents/experts/knowledge/database-data-management/nosql-patterns.md +259 -259
- tapps_agents/experts/knowledge/database-data-management/scalability-patterns.md +184 -184
- tapps_agents/experts/knowledge/database-data-management/sql-optimization.md +175 -175
- tapps_agents/experts/knowledge/database-data-management/time-series-modeling.md +444 -444
- tapps_agents/experts/knowledge/development-workflow/README.md +16 -16
- tapps_agents/experts/knowledge/development-workflow/automation-best-practices.md +216 -216
- tapps_agents/experts/knowledge/development-workflow/build-strategies.md +198 -198
- tapps_agents/experts/knowledge/development-workflow/deployment-patterns.md +205 -205
- tapps_agents/experts/knowledge/development-workflow/git-workflows.md +205 -205
- tapps_agents/experts/knowledge/documentation-knowledge-management/README.md +16 -16
- tapps_agents/experts/knowledge/documentation-knowledge-management/api-documentation-patterns.md +231 -231
- tapps_agents/experts/knowledge/documentation-knowledge-management/documentation-standards.md +191 -191
- tapps_agents/experts/knowledge/documentation-knowledge-management/knowledge-management.md +171 -171
- tapps_agents/experts/knowledge/documentation-knowledge-management/technical-writing-guide.md +192 -192
- tapps_agents/experts/knowledge/observability-monitoring/alerting-patterns.md +461 -461
- tapps_agents/experts/knowledge/observability-monitoring/apm-tools.md +459 -459
- tapps_agents/experts/knowledge/observability-monitoring/distributed-tracing.md +367 -367
- tapps_agents/experts/knowledge/observability-monitoring/logging-strategies.md +478 -478
- tapps_agents/experts/knowledge/observability-monitoring/metrics-and-monitoring.md +510 -510
- tapps_agents/experts/knowledge/observability-monitoring/observability-best-practices.md +492 -492
- tapps_agents/experts/knowledge/observability-monitoring/open-telemetry.md +573 -573
- tapps_agents/experts/knowledge/observability-monitoring/slo-sli-sla.md +419 -419
- tapps_agents/experts/knowledge/performance/anti-patterns.md +284 -284
- tapps_agents/experts/knowledge/performance/api-performance.md +256 -256
- tapps_agents/experts/knowledge/performance/caching.md +327 -327
- tapps_agents/experts/knowledge/performance/database-performance.md +252 -252
- tapps_agents/experts/knowledge/performance/optimization-patterns.md +327 -327
- tapps_agents/experts/knowledge/performance/profiling.md +297 -297
- tapps_agents/experts/knowledge/performance/resource-management.md +293 -293
- tapps_agents/experts/knowledge/performance/scalability.md +306 -306
- tapps_agents/experts/knowledge/security/owasp-top10.md +209 -209
- tapps_agents/experts/knowledge/security/secure-coding-practices.md +207 -207
- tapps_agents/experts/knowledge/security/threat-modeling.md +220 -220
- tapps_agents/experts/knowledge/security/vulnerability-patterns.md +342 -342
- tapps_agents/experts/knowledge/software-architecture/docker-compose-patterns.md +314 -314
- tapps_agents/experts/knowledge/software-architecture/microservices-patterns.md +379 -379
- tapps_agents/experts/knowledge/software-architecture/service-communication.md +316 -316
- tapps_agents/experts/knowledge/testing/best-practices.md +310 -310
- tapps_agents/experts/knowledge/testing/coverage-analysis.md +293 -293
- tapps_agents/experts/knowledge/testing/mocking.md +256 -256
- tapps_agents/experts/knowledge/testing/test-automation.md +276 -276
- tapps_agents/experts/knowledge/testing/test-data.md +271 -271
- tapps_agents/experts/knowledge/testing/test-design-patterns.md +280 -280
- tapps_agents/experts/knowledge/testing/test-maintenance.md +236 -236
- tapps_agents/experts/knowledge/testing/test-strategies.md +311 -311
- tapps_agents/experts/knowledge/user-experience/information-architecture.md +325 -325
- tapps_agents/experts/knowledge/user-experience/interaction-design.md +363 -363
- tapps_agents/experts/knowledge/user-experience/prototyping.md +293 -293
- tapps_agents/experts/knowledge/user-experience/usability-heuristics.md +337 -337
- tapps_agents/experts/knowledge/user-experience/usability-testing.md +311 -311
- tapps_agents/experts/knowledge/user-experience/user-journeys.md +296 -296
- tapps_agents/experts/knowledge/user-experience/user-research.md +373 -373
- tapps_agents/experts/knowledge/user-experience/ux-principles.md +340 -340
- tapps_agents/experts/knowledge_freshness.py +321 -321
- tapps_agents/experts/knowledge_ingestion.py +438 -438
- tapps_agents/experts/knowledge_need_detector.py +93 -93
- tapps_agents/experts/knowledge_validator.py +382 -382
- tapps_agents/experts/observability.py +440 -440
- tapps_agents/experts/passive_notifier.py +238 -238
- tapps_agents/experts/proactive_orchestrator.py +32 -32
- tapps_agents/experts/rag_chunker.py +205 -205
- tapps_agents/experts/rag_embedder.py +152 -152
- tapps_agents/experts/rag_evaluation.py +299 -299
- tapps_agents/experts/rag_index.py +303 -303
- tapps_agents/experts/rag_metrics.py +293 -293
- tapps_agents/experts/rag_safety.py +263 -263
- tapps_agents/experts/report_generator.py +296 -296
- tapps_agents/experts/setup_wizard.py +441 -441
- tapps_agents/experts/simple_rag.py +431 -431
- tapps_agents/experts/vector_rag.py +354 -354
- tapps_agents/experts/weight_distributor.py +304 -304
- tapps_agents/health/__init__.py +24 -24
- tapps_agents/health/base.py +75 -75
- tapps_agents/health/checks/__init__.py +22 -22
- tapps_agents/health/checks/automation.py +127 -127
- tapps_agents/health/checks/context7_cache.py +210 -210
- tapps_agents/health/checks/environment.py +116 -116
- tapps_agents/health/checks/execution.py +170 -170
- tapps_agents/health/checks/knowledge_base.py +187 -187
- tapps_agents/health/checks/outcomes.py +324 -324
- tapps_agents/health/collector.py +280 -280
- tapps_agents/health/dashboard.py +137 -137
- tapps_agents/health/metrics.py +151 -151
- tapps_agents/health/orchestrator.py +271 -271
- tapps_agents/health/registry.py +166 -166
- tapps_agents/hooks/__init__.py +33 -33
- tapps_agents/hooks/config.py +140 -140
- tapps_agents/hooks/events.py +135 -135
- tapps_agents/hooks/executor.py +128 -128
- tapps_agents/hooks/manager.py +143 -143
- tapps_agents/integration/__init__.py +8 -8
- tapps_agents/integration/service_integrator.py +121 -121
- tapps_agents/integrations/__init__.py +10 -10
- tapps_agents/integrations/clawdbot.py +525 -525
- tapps_agents/integrations/memory_bridge.py +356 -356
- tapps_agents/mcp/__init__.py +18 -18
- tapps_agents/mcp/gateway.py +112 -112
- tapps_agents/mcp/servers/__init__.py +13 -13
- tapps_agents/mcp/servers/analysis.py +204 -204
- tapps_agents/mcp/servers/context7.py +198 -198
- tapps_agents/mcp/servers/filesystem.py +218 -218
- tapps_agents/mcp/servers/git.py +201 -201
- tapps_agents/mcp/tool_registry.py +115 -115
- tapps_agents/quality/__init__.py +54 -54
- tapps_agents/quality/coverage_analyzer.py +379 -379
- tapps_agents/quality/enforcement.py +82 -82
- tapps_agents/quality/gates/__init__.py +37 -37
- tapps_agents/quality/gates/approval_gate.py +255 -255
- tapps_agents/quality/gates/base.py +84 -84
- tapps_agents/quality/gates/exceptions.py +43 -43
- tapps_agents/quality/gates/policy_gate.py +195 -195
- tapps_agents/quality/gates/registry.py +239 -239
- tapps_agents/quality/gates/security_gate.py +156 -156
- tapps_agents/quality/quality_gates.py +369 -369
- tapps_agents/quality/secret_scanner.py +335 -335
- tapps_agents/session/__init__.py +19 -19
- tapps_agents/session/manager.py +256 -256
- tapps_agents/simple_mode/__init__.py +66 -66
- tapps_agents/simple_mode/agent_contracts.py +357 -357
- tapps_agents/simple_mode/beads_hooks.py +151 -151
- tapps_agents/simple_mode/code_snippet_handler.py +382 -382
- tapps_agents/simple_mode/documentation_manager.py +395 -395
- tapps_agents/simple_mode/documentation_reader.py +187 -187
- tapps_agents/simple_mode/file_inference.py +292 -292
- tapps_agents/simple_mode/framework_change_detector.py +268 -268
- tapps_agents/simple_mode/intent_parser.py +510 -510
- tapps_agents/simple_mode/learning_progression.py +358 -358
- tapps_agents/simple_mode/nl_handler.py +700 -700
- tapps_agents/simple_mode/onboarding.py +253 -253
- tapps_agents/simple_mode/orchestrators/__init__.py +38 -38
- tapps_agents/simple_mode/orchestrators/base.py +185 -185
- tapps_agents/simple_mode/orchestrators/breakdown_orchestrator.py +49 -49
- tapps_agents/simple_mode/orchestrators/brownfield_orchestrator.py +135 -135
- tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2700 -2667
- tapps_agents/simple_mode/orchestrators/deliverable_checklist.py +349 -349
- tapps_agents/simple_mode/orchestrators/enhance_orchestrator.py +53 -53
- tapps_agents/simple_mode/orchestrators/epic_orchestrator.py +122 -122
- tapps_agents/simple_mode/orchestrators/explore_orchestrator.py +184 -184
- tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +723 -723
- tapps_agents/simple_mode/orchestrators/plan_analysis_orchestrator.py +206 -206
- tapps_agents/simple_mode/orchestrators/pr_orchestrator.py +237 -237
- tapps_agents/simple_mode/orchestrators/refactor_orchestrator.py +222 -222
- tapps_agents/simple_mode/orchestrators/requirements_tracer.py +262 -262
- tapps_agents/simple_mode/orchestrators/resume_orchestrator.py +210 -210
- tapps_agents/simple_mode/orchestrators/review_orchestrator.py +161 -161
- tapps_agents/simple_mode/orchestrators/test_orchestrator.py +82 -82
- tapps_agents/simple_mode/output_aggregator.py +340 -340
- tapps_agents/simple_mode/result_formatters.py +598 -598
- tapps_agents/simple_mode/step_dependencies.py +382 -382
- tapps_agents/simple_mode/step_results.py +276 -276
- tapps_agents/simple_mode/streaming.py +388 -388
- tapps_agents/simple_mode/variations.py +129 -129
- tapps_agents/simple_mode/visual_feedback.py +238 -238
- tapps_agents/simple_mode/zero_config.py +274 -274
- tapps_agents/suggestions/__init__.py +8 -8
- tapps_agents/suggestions/inline_suggester.py +52 -52
- tapps_agents/templates/__init__.py +8 -8
- tapps_agents/templates/microservice_generator.py +274 -274
- tapps_agents/utils/env_validator.py +291 -291
- tapps_agents/workflow/__init__.py +171 -171
- tapps_agents/workflow/acceptance_verifier.py +132 -132
- tapps_agents/workflow/agent_handlers/__init__.py +41 -41
- tapps_agents/workflow/agent_handlers/analyst_handler.py +75 -75
- tapps_agents/workflow/agent_handlers/architect_handler.py +107 -107
- tapps_agents/workflow/agent_handlers/base.py +84 -84
- tapps_agents/workflow/agent_handlers/debugger_handler.py +100 -100
- tapps_agents/workflow/agent_handlers/designer_handler.py +110 -110
- tapps_agents/workflow/agent_handlers/documenter_handler.py +94 -94
- tapps_agents/workflow/agent_handlers/implementer_handler.py +235 -235
- tapps_agents/workflow/agent_handlers/ops_handler.py +62 -62
- tapps_agents/workflow/agent_handlers/orchestrator_handler.py +43 -43
- tapps_agents/workflow/agent_handlers/planner_handler.py +98 -98
- tapps_agents/workflow/agent_handlers/registry.py +119 -119
- tapps_agents/workflow/agent_handlers/reviewer_handler.py +119 -119
- tapps_agents/workflow/agent_handlers/tester_handler.py +69 -69
- tapps_agents/workflow/analytics_accessor.py +337 -337
- tapps_agents/workflow/analytics_alerts.py +416 -416
- tapps_agents/workflow/analytics_dashboard_cursor.py +281 -281
- tapps_agents/workflow/analytics_dual_write.py +103 -103
- tapps_agents/workflow/analytics_integration.py +119 -119
- tapps_agents/workflow/analytics_query_parser.py +278 -278
- tapps_agents/workflow/analytics_visualizer.py +259 -259
- tapps_agents/workflow/artifact_helper.py +204 -204
- tapps_agents/workflow/audit_logger.py +263 -263
- tapps_agents/workflow/auto_execution_config.py +340 -340
- tapps_agents/workflow/auto_progression.py +586 -586
- tapps_agents/workflow/branch_cleanup.py +349 -349
- tapps_agents/workflow/checkpoint.py +256 -256
- tapps_agents/workflow/checkpoint_manager.py +178 -178
- tapps_agents/workflow/code_artifact.py +179 -179
- tapps_agents/workflow/common_enums.py +96 -96
- tapps_agents/workflow/confirmation_handler.py +130 -130
- tapps_agents/workflow/context_analyzer.py +222 -222
- tapps_agents/workflow/context_artifact.py +230 -230
- tapps_agents/workflow/cursor_chat.py +94 -94
- tapps_agents/workflow/cursor_executor.py +2337 -2337
- tapps_agents/workflow/cursor_skill_helper.py +516 -516
- tapps_agents/workflow/dependency_resolver.py +244 -244
- tapps_agents/workflow/design_artifact.py +156 -156
- tapps_agents/workflow/detector.py +751 -751
- tapps_agents/workflow/direct_execution_fallback.py +301 -301
- tapps_agents/workflow/docs_artifact.py +168 -168
- tapps_agents/workflow/enforcer.py +389 -389
- tapps_agents/workflow/enhancement_artifact.py +142 -142
- tapps_agents/workflow/error_recovery.py +806 -806
- tapps_agents/workflow/event_bus.py +183 -183
- tapps_agents/workflow/event_log.py +612 -612
- tapps_agents/workflow/events.py +63 -63
- tapps_agents/workflow/exceptions.py +43 -43
- tapps_agents/workflow/execution_graph.py +498 -498
- tapps_agents/workflow/execution_plan.py +126 -126
- tapps_agents/workflow/file_utils.py +186 -186
- tapps_agents/workflow/gate_evaluator.py +182 -182
- tapps_agents/workflow/gate_integration.py +200 -200
- tapps_agents/workflow/graph_visualizer.py +130 -130
- tapps_agents/workflow/health_checker.py +206 -206
- tapps_agents/workflow/logging_helper.py +243 -243
- tapps_agents/workflow/manifest.py +582 -582
- tapps_agents/workflow/marker_writer.py +250 -250
- tapps_agents/workflow/message_formatter.py +188 -188
- tapps_agents/workflow/messaging.py +325 -325
- tapps_agents/workflow/metadata_models.py +91 -91
- tapps_agents/workflow/metrics_integration.py +226 -226
- tapps_agents/workflow/migration_utils.py +116 -116
- tapps_agents/workflow/models.py +148 -148
- tapps_agents/workflow/nlp_config.py +198 -198
- tapps_agents/workflow/nlp_error_handler.py +207 -207
- tapps_agents/workflow/nlp_executor.py +163 -163
- tapps_agents/workflow/nlp_parser.py +528 -528
- tapps_agents/workflow/observability_dashboard.py +451 -451
- tapps_agents/workflow/observer.py +170 -170
- tapps_agents/workflow/ops_artifact.py +257 -257
- tapps_agents/workflow/output_passing.py +214 -214
- tapps_agents/workflow/parallel_executor.py +463 -463
- tapps_agents/workflow/planning_artifact.py +179 -179
- tapps_agents/workflow/preset_loader.py +285 -285
- tapps_agents/workflow/preset_recommender.py +270 -270
- tapps_agents/workflow/progress_logger.py +145 -145
- tapps_agents/workflow/progress_manager.py +303 -303
- tapps_agents/workflow/progress_monitor.py +186 -186
- tapps_agents/workflow/progress_updates.py +423 -423
- tapps_agents/workflow/quality_artifact.py +158 -158
- tapps_agents/workflow/quality_loopback.py +101 -101
- tapps_agents/workflow/recommender.py +387 -387
- tapps_agents/workflow/remediation_loop.py +166 -166
- tapps_agents/workflow/result_aggregator.py +300 -300
- tapps_agents/workflow/review_artifact.py +185 -185
- tapps_agents/workflow/schema_validator.py +522 -522
- tapps_agents/workflow/session_handoff.py +178 -178
- tapps_agents/workflow/skill_invoker.py +648 -648
- tapps_agents/workflow/state_manager.py +756 -756
- tapps_agents/workflow/state_persistence_config.py +331 -331
- tapps_agents/workflow/status_monitor.py +449 -449
- tapps_agents/workflow/step_checkpoint.py +314 -314
- tapps_agents/workflow/step_details.py +201 -201
- tapps_agents/workflow/story_models.py +147 -147
- tapps_agents/workflow/streaming.py +416 -416
- tapps_agents/workflow/suggestion_engine.py +552 -552
- tapps_agents/workflow/testing_artifact.py +186 -186
- tapps_agents/workflow/timeline.py +158 -158
- tapps_agents/workflow/token_integration.py +209 -209
- tapps_agents/workflow/validation.py +217 -217
- tapps_agents/workflow/visual_feedback.py +391 -391
- tapps_agents/workflow/workflow_chain.py +95 -95
- tapps_agents/workflow/workflow_summary.py +219 -219
- tapps_agents/workflow/worktree_manager.py +724 -724
- {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.0.dist-info}/METADATA +672 -672
- tapps_agents-3.6.0.dist-info/RECORD +758 -0
- {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.0.dist-info}/licenses/LICENSE +22 -22
- tapps_agents/health/checks/outcomes.backup_20260204_064058.py +0 -324
- tapps_agents/health/checks/outcomes.backup_20260204_064256.py +0 -324
- tapps_agents/health/checks/outcomes.backup_20260204_064600.py +0 -324
- tapps_agents-3.5.41.dist-info/RECORD +0 -760
- {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.0.dist-info}/WHEEL +0 -0
- {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.0.dist-info}/entry_points.txt +0 -0
- {tapps_agents-3.5.41.dist-info → tapps_agents-3.6.0.dist-info}/top_level.txt +0 -0
|
@@ -1,756 +1,756 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Advanced Workflow State Management
|
|
3
|
-
|
|
4
|
-
Provides enhanced state persistence with validation, migration, versioning, and recovery.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
from __future__ import annotations
|
|
8
|
-
|
|
9
|
-
import gzip
|
|
10
|
-
import hashlib
|
|
11
|
-
import json
|
|
12
|
-
import logging
|
|
13
|
-
from dataclasses import asdict, dataclass
|
|
14
|
-
from datetime import datetime
|
|
15
|
-
from pathlib import Path
|
|
16
|
-
from typing import Any
|
|
17
|
-
|
|
18
|
-
from .models import WorkflowState
|
|
19
|
-
|
|
20
|
-
logger = logging.getLogger(__name__)
|
|
21
|
-
|
|
22
|
-
# State format version for migration
|
|
23
|
-
CURRENT_STATE_VERSION = "2.0"
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
@dataclass
|
|
27
|
-
class StateMetadata:
|
|
28
|
-
"""Metadata for persisted workflow state."""
|
|
29
|
-
|
|
30
|
-
version: str
|
|
31
|
-
saved_at: datetime
|
|
32
|
-
checksum: str
|
|
33
|
-
workflow_id: str
|
|
34
|
-
state_file: str
|
|
35
|
-
workflow_path: str | None = None
|
|
36
|
-
compression: bool = False
|
|
37
|
-
# Epic 12: Enhanced checkpoint metadata
|
|
38
|
-
current_step: str | None = None
|
|
39
|
-
completed_steps_count: int = 0
|
|
40
|
-
progress_percentage: float = 0.0
|
|
41
|
-
trigger_step_id: str | None = None
|
|
42
|
-
|
|
43
|
-
def to_dict(self) -> dict[str, Any]:
|
|
44
|
-
"""Convert to dictionary."""
|
|
45
|
-
data = asdict(self)
|
|
46
|
-
data["saved_at"] = self.saved_at.isoformat()
|
|
47
|
-
return data
|
|
48
|
-
|
|
49
|
-
@classmethod
|
|
50
|
-
def from_dict(cls, data: dict[str, Any]) -> StateMetadata:
|
|
51
|
-
"""Create from dictionary."""
|
|
52
|
-
data = data.copy()
|
|
53
|
-
data["saved_at"] = datetime.fromisoformat(data["saved_at"])
|
|
54
|
-
return cls(**data)
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
class StateValidator:
|
|
58
|
-
"""Validates workflow state integrity."""
|
|
59
|
-
|
|
60
|
-
@staticmethod
|
|
61
|
-
def calculate_checksum(state_data: dict[str, Any]) -> str:
|
|
62
|
-
"""Calculate SHA256 checksum for state data."""
|
|
63
|
-
def _make_json_serializable(obj: Any) -> Any:
|
|
64
|
-
"""Recursively convert objects to JSON-serializable format."""
|
|
65
|
-
# Handle ProjectProfile objects
|
|
66
|
-
if hasattr(obj, "to_dict") and hasattr(obj, "compliance_requirements"):
|
|
67
|
-
try:
|
|
68
|
-
from ..core.project_profile import ProjectProfile
|
|
69
|
-
if isinstance(obj, ProjectProfile):
|
|
70
|
-
return obj.to_dict()
|
|
71
|
-
except (ImportError, AttributeError):
|
|
72
|
-
pass
|
|
73
|
-
|
|
74
|
-
# Handle ComplianceRequirement objects
|
|
75
|
-
if hasattr(obj, "name") and hasattr(obj, "confidence") and hasattr(obj, "indicators"):
|
|
76
|
-
try:
|
|
77
|
-
from ..core.project_profile import ComplianceRequirement
|
|
78
|
-
if isinstance(obj, ComplianceRequirement):
|
|
79
|
-
return asdict(obj)
|
|
80
|
-
except (ImportError, AttributeError):
|
|
81
|
-
pass
|
|
82
|
-
|
|
83
|
-
# Handle dictionaries recursively
|
|
84
|
-
if isinstance(obj, dict):
|
|
85
|
-
return {k: _make_json_serializable(v) for k, v in obj.items()}
|
|
86
|
-
|
|
87
|
-
# Handle lists recursively
|
|
88
|
-
if isinstance(obj, list):
|
|
89
|
-
return [_make_json_serializable(item) for item in obj]
|
|
90
|
-
|
|
91
|
-
return obj
|
|
92
|
-
|
|
93
|
-
# Ensure variables are JSON-serializable
|
|
94
|
-
variables = state_data.get("variables", {})
|
|
95
|
-
serializable_variables = _make_json_serializable(variables)
|
|
96
|
-
|
|
97
|
-
# Create stable representation (sorted keys, no metadata)
|
|
98
|
-
stable_data = {
|
|
99
|
-
"workflow_id": state_data.get("workflow_id"),
|
|
100
|
-
"current_step": state_data.get("current_step"),
|
|
101
|
-
"completed_steps": sorted(state_data.get("completed_steps", [])),
|
|
102
|
-
"skipped_steps": sorted(state_data.get("skipped_steps", [])),
|
|
103
|
-
"status": state_data.get("status"),
|
|
104
|
-
"variables": serializable_variables,
|
|
105
|
-
}
|
|
106
|
-
stable_str = json.dumps(stable_data, sort_keys=True)
|
|
107
|
-
return hashlib.sha256(stable_str.encode()).hexdigest()
|
|
108
|
-
|
|
109
|
-
@staticmethod
|
|
110
|
-
def validate_state(
|
|
111
|
-
state_data: dict[str, Any], expected_checksum: str | None = None
|
|
112
|
-
) -> tuple[bool, str | None]:
|
|
113
|
-
"""
|
|
114
|
-
Validate state integrity.
|
|
115
|
-
|
|
116
|
-
Returns:
|
|
117
|
-
(is_valid, error_message)
|
|
118
|
-
"""
|
|
119
|
-
# Check required fields
|
|
120
|
-
required_fields = ["workflow_id", "started_at", "status"]
|
|
121
|
-
for field in required_fields:
|
|
122
|
-
if field not in state_data:
|
|
123
|
-
return False, f"Missing required field: {field}"
|
|
124
|
-
|
|
125
|
-
# Validate status
|
|
126
|
-
valid_statuses = ["running", "paused", "completed", "failed"]
|
|
127
|
-
if state_data.get("status") not in valid_statuses:
|
|
128
|
-
return False, f"Invalid status: {state_data.get('status')}"
|
|
129
|
-
|
|
130
|
-
# Validate checksum if provided
|
|
131
|
-
if expected_checksum:
|
|
132
|
-
calculated = StateValidator.calculate_checksum(state_data)
|
|
133
|
-
if calculated != expected_checksum:
|
|
134
|
-
return (
|
|
135
|
-
False,
|
|
136
|
-
f"Checksum mismatch: expected {expected_checksum[:8]}..., got {calculated[:8]}...",
|
|
137
|
-
)
|
|
138
|
-
|
|
139
|
-
return True, None
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
class StateMigrator:
|
|
143
|
-
"""Handles migration of state between versions."""
|
|
144
|
-
|
|
145
|
-
@staticmethod
|
|
146
|
-
def migrate_state(
|
|
147
|
-
state_data: dict[str, Any], from_version: str, to_version: str
|
|
148
|
-
) -> dict[str, Any]:
|
|
149
|
-
"""
|
|
150
|
-
Migrate state from one version to another.
|
|
151
|
-
|
|
152
|
-
Args:
|
|
153
|
-
state_data: State data to migrate
|
|
154
|
-
from_version: Source version
|
|
155
|
-
to_version: Target version
|
|
156
|
-
|
|
157
|
-
Returns:
|
|
158
|
-
Migrated state data
|
|
159
|
-
"""
|
|
160
|
-
if from_version == to_version:
|
|
161
|
-
return state_data
|
|
162
|
-
|
|
163
|
-
# Migration path: 1.0 -> 2.0
|
|
164
|
-
if from_version == "1.0" and to_version == "2.0":
|
|
165
|
-
# Add default fields if missing
|
|
166
|
-
if "skipped_steps" not in state_data:
|
|
167
|
-
state_data["skipped_steps"] = []
|
|
168
|
-
if "artifacts" not in state_data:
|
|
169
|
-
state_data["artifacts"] = {}
|
|
170
|
-
if "variables" not in state_data:
|
|
171
|
-
state_data["variables"] = {}
|
|
172
|
-
|
|
173
|
-
# Ensure status field exists
|
|
174
|
-
if "status" not in state_data:
|
|
175
|
-
state_data["status"] = "running"
|
|
176
|
-
|
|
177
|
-
return state_data
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
class AdvancedStateManager:
|
|
181
|
-
"""Advanced workflow state manager with validation, migration, and recovery."""
|
|
182
|
-
|
|
183
|
-
def __init__(self, state_dir: Path, compression: bool = False):
|
|
184
|
-
"""
|
|
185
|
-
Initialize advanced state manager.
|
|
186
|
-
|
|
187
|
-
Args:
|
|
188
|
-
state_dir: Directory for state storage
|
|
189
|
-
compression: Enable compression for state files
|
|
190
|
-
"""
|
|
191
|
-
self.state_dir = Path(state_dir)
|
|
192
|
-
self.state_dir.mkdir(parents=True, exist_ok=True)
|
|
193
|
-
self.compression = compression
|
|
194
|
-
self.history_dir = self.state_dir / "history"
|
|
195
|
-
self.history_dir.mkdir(parents=True, exist_ok=True)
|
|
196
|
-
|
|
197
|
-
def save_state(
|
|
198
|
-
self, state: WorkflowState, workflow_path: Path | None = None
|
|
199
|
-
) -> Path:
|
|
200
|
-
"""
|
|
201
|
-
Save workflow state with validation and metadata.
|
|
202
|
-
|
|
203
|
-
Args:
|
|
204
|
-
state: Workflow state to save
|
|
205
|
-
workflow_path: Optional path to workflow YAML
|
|
206
|
-
|
|
207
|
-
Returns:
|
|
208
|
-
Path to saved state file
|
|
209
|
-
"""
|
|
210
|
-
# Convert state to dict
|
|
211
|
-
state_data = self._state_to_dict(state)
|
|
212
|
-
|
|
213
|
-
# Calculate checksum
|
|
214
|
-
checksum = StateValidator.calculate_checksum(state_data)
|
|
215
|
-
state_data["_checksum"] = checksum
|
|
216
|
-
|
|
217
|
-
# Create metadata
|
|
218
|
-
state_file = (
|
|
219
|
-
f"{state.workflow_id}-{datetime.now().strftime('%Y%m%d-%H%M%S')}.json"
|
|
220
|
-
)
|
|
221
|
-
if self.compression:
|
|
222
|
-
state_file += ".gz"
|
|
223
|
-
|
|
224
|
-
# Epic 12: Extract checkpoint metadata from state if available
|
|
225
|
-
checkpoint_metadata = state.variables.get("_checkpoint_metadata", {})
|
|
226
|
-
|
|
227
|
-
metadata = StateMetadata(
|
|
228
|
-
version=CURRENT_STATE_VERSION,
|
|
229
|
-
saved_at=datetime.now(),
|
|
230
|
-
checksum=checksum,
|
|
231
|
-
workflow_id=state.workflow_id,
|
|
232
|
-
state_file=state_file,
|
|
233
|
-
workflow_path=str(workflow_path) if workflow_path else None,
|
|
234
|
-
compression=self.compression,
|
|
235
|
-
current_step=checkpoint_metadata.get("current_step") or state.current_step,
|
|
236
|
-
completed_steps_count=checkpoint_metadata.get("completed_steps", len(state.completed_steps)),
|
|
237
|
-
progress_percentage=checkpoint_metadata.get("progress_percentage", 0.0),
|
|
238
|
-
trigger_step_id=checkpoint_metadata.get("trigger_step_id"),
|
|
239
|
-
)
|
|
240
|
-
|
|
241
|
-
# Save state file
|
|
242
|
-
state_path = self.state_dir / state_file
|
|
243
|
-
self._write_state_file(state_path, state_data, self.compression)
|
|
244
|
-
|
|
245
|
-
# Save metadata
|
|
246
|
-
metadata_path = self.state_dir / f"{state.workflow_id}.meta.json"
|
|
247
|
-
from .file_utils import atomic_write_json
|
|
248
|
-
atomic_write_json(metadata_path, metadata.to_dict(), indent=2)
|
|
249
|
-
|
|
250
|
-
# Save to history
|
|
251
|
-
history_path = self.history_dir / state_file
|
|
252
|
-
self._write_state_file(history_path, state_data, self.compression)
|
|
253
|
-
|
|
254
|
-
# Update last pointer
|
|
255
|
-
last_data = {
|
|
256
|
-
"workflow_id": state.workflow_id,
|
|
257
|
-
"state_file": str(state_path),
|
|
258
|
-
"metadata_file": str(metadata_path),
|
|
259
|
-
"saved_at": datetime.now().isoformat(),
|
|
260
|
-
"version": CURRENT_STATE_VERSION,
|
|
261
|
-
"workflow_path": str(workflow_path) if workflow_path else None,
|
|
262
|
-
}
|
|
263
|
-
last_path = self.state_dir / "last.json"
|
|
264
|
-
from .file_utils import atomic_write_json
|
|
265
|
-
atomic_write_json(last_path, last_data, indent=2)
|
|
266
|
-
|
|
267
|
-
# Session handoff when workflow ends (plan 2.1)
|
|
268
|
-
if state.status in ("completed", "failed", "paused"):
|
|
269
|
-
try:
|
|
270
|
-
from .session_handoff import SessionHandoff, write_handoff
|
|
271
|
-
|
|
272
|
-
project_root = self.state_dir.parent.parent
|
|
273
|
-
done = list(state.completed_steps or [])
|
|
274
|
-
artifact_paths = [
|
|
275
|
-
a.path for a in (state.artifacts or {}).values()
|
|
276
|
-
if getattr(a, "path", None)
|
|
277
|
-
]
|
|
278
|
-
next_steps = [
|
|
279
|
-
"Resume with: tapps-agents workflow resume",
|
|
280
|
-
"Run `bd ready` to see unblocked tasks (if using Beads).",
|
|
281
|
-
]
|
|
282
|
-
handoff = SessionHandoff(
|
|
283
|
-
workflow_id=state.workflow_id,
|
|
284
|
-
session_ended_at=datetime.now(timezone.utc).isoformat(),
|
|
285
|
-
summary=f"Workflow {state.status}. Completed steps: {len(done)}.",
|
|
286
|
-
done=done,
|
|
287
|
-
decisions=[],
|
|
288
|
-
next_steps=next_steps,
|
|
289
|
-
artifact_paths=artifact_paths,
|
|
290
|
-
bd_ready_hint="Run `bd ready`" if done else None,
|
|
291
|
-
)
|
|
292
|
-
write_handoff(project_root, handoff)
|
|
293
|
-
except Exception as e: # pylint: disable=broad-except
|
|
294
|
-
logger.debug("Could not write session handoff: %s", e)
|
|
295
|
-
|
|
296
|
-
logger.info(f"Saved workflow state: {state.workflow_id} to {state_path}")
|
|
297
|
-
return state_path
|
|
298
|
-
|
|
299
|
-
def load_state(
|
|
300
|
-
self,
|
|
301
|
-
workflow_id: str | None = None,
|
|
302
|
-
state_file: Path | None = None,
|
|
303
|
-
validate: bool = True,
|
|
304
|
-
) -> tuple[WorkflowState, StateMetadata]:
|
|
305
|
-
"""
|
|
306
|
-
Load workflow state with validation and migration.
|
|
307
|
-
|
|
308
|
-
Args:
|
|
309
|
-
workflow_id: Workflow ID to load (uses last if not specified)
|
|
310
|
-
state_file: Specific state file to load
|
|
311
|
-
validate: Whether to validate state integrity
|
|
312
|
-
|
|
313
|
-
Returns:
|
|
314
|
-
(WorkflowState, StateMetadata)
|
|
315
|
-
"""
|
|
316
|
-
# Determine which state file to load
|
|
317
|
-
if state_file:
|
|
318
|
-
state_path = Path(state_file)
|
|
319
|
-
elif workflow_id:
|
|
320
|
-
# Find latest state for workflow
|
|
321
|
-
metadata_path = self.state_dir / f"{workflow_id}.meta.json"
|
|
322
|
-
if metadata_path.exists():
|
|
323
|
-
with open(metadata_path, encoding="utf-8") as f:
|
|
324
|
-
metadata_data = json.load(f)
|
|
325
|
-
state_path = self.state_dir / metadata_data["state_file"]
|
|
326
|
-
else:
|
|
327
|
-
raise FileNotFoundError(f"No state found for workflow: {workflow_id}")
|
|
328
|
-
else:
|
|
329
|
-
# Load last state
|
|
330
|
-
last_path = self.state_dir / "last.json"
|
|
331
|
-
if not last_path.exists():
|
|
332
|
-
raise FileNotFoundError("No persisted workflow state found")
|
|
333
|
-
|
|
334
|
-
with open(last_path, encoding="utf-8") as f:
|
|
335
|
-
last_data = json.load(f)
|
|
336
|
-
|
|
337
|
-
state_path = Path(last_data["state_file"])
|
|
338
|
-
if not state_path.is_absolute():
|
|
339
|
-
state_path = self.state_dir / state_path
|
|
340
|
-
|
|
341
|
-
# Load state data
|
|
342
|
-
state_data = self._read_state_file(state_path)
|
|
343
|
-
|
|
344
|
-
# Extract checksum if present
|
|
345
|
-
expected_checksum = state_data.pop("_checksum", None)
|
|
346
|
-
|
|
347
|
-
# Validate if requested
|
|
348
|
-
if validate:
|
|
349
|
-
is_valid, error = StateValidator.validate_state(
|
|
350
|
-
state_data, expected_checksum
|
|
351
|
-
)
|
|
352
|
-
if not is_valid:
|
|
353
|
-
logger.warning(f"State validation failed: {error}")
|
|
354
|
-
# Try to recover from history
|
|
355
|
-
recovered_workflow_id = (
|
|
356
|
-
workflow_id
|
|
357
|
-
or state_data.get("workflow_id")
|
|
358
|
-
or state_data.get("id")
|
|
359
|
-
or ""
|
|
360
|
-
)
|
|
361
|
-
if (
|
|
362
|
-
not isinstance(recovered_workflow_id, str)
|
|
363
|
-
or not recovered_workflow_id
|
|
364
|
-
):
|
|
365
|
-
raise ValueError(
|
|
366
|
-
"Cannot recover: missing workflow_id in state data"
|
|
367
|
-
)
|
|
368
|
-
return self._recover_from_history(state_path, recovered_workflow_id)
|
|
369
|
-
|
|
370
|
-
# Check version and migrate if needed
|
|
371
|
-
metadata_path = self.state_dir / f"{state_data['workflow_id']}.meta.json"
|
|
372
|
-
if metadata_path.exists():
|
|
373
|
-
with open(metadata_path, encoding="utf-8") as f:
|
|
374
|
-
metadata_data = json.load(f)
|
|
375
|
-
metadata = StateMetadata.from_dict(metadata_data)
|
|
376
|
-
|
|
377
|
-
if metadata.version != CURRENT_STATE_VERSION:
|
|
378
|
-
logger.info(
|
|
379
|
-
f"Migrating state from {metadata.version} to {CURRENT_STATE_VERSION}"
|
|
380
|
-
)
|
|
381
|
-
state_data = StateMigrator.migrate_state(
|
|
382
|
-
state_data, metadata.version, CURRENT_STATE_VERSION
|
|
383
|
-
)
|
|
384
|
-
# Update metadata version after migration
|
|
385
|
-
metadata.version = CURRENT_STATE_VERSION
|
|
386
|
-
else:
|
|
387
|
-
# Legacy state without metadata
|
|
388
|
-
metadata = StateMetadata(
|
|
389
|
-
version=CURRENT_STATE_VERSION, # Use current version for legacy states
|
|
390
|
-
saved_at=datetime.fromisoformat(
|
|
391
|
-
state_data.get("started_at", datetime.now().isoformat())
|
|
392
|
-
),
|
|
393
|
-
checksum=expected_checksum or "",
|
|
394
|
-
workflow_id=state_data["workflow_id"],
|
|
395
|
-
state_file=str(state_path.name),
|
|
396
|
-
compression=self.compression,
|
|
397
|
-
)
|
|
398
|
-
|
|
399
|
-
# Convert to WorkflowState
|
|
400
|
-
state = self._state_from_dict(state_data)
|
|
401
|
-
|
|
402
|
-
return state, metadata
|
|
403
|
-
|
|
404
|
-
def list_states(self, workflow_id: str | None = None) -> list[dict[str, Any]]:
|
|
405
|
-
"""
|
|
406
|
-
List available workflow states.
|
|
407
|
-
|
|
408
|
-
Args:
|
|
409
|
-
workflow_id: Optional workflow ID to filter by
|
|
410
|
-
|
|
411
|
-
Returns:
|
|
412
|
-
List of state metadata dictionaries
|
|
413
|
-
"""
|
|
414
|
-
states = []
|
|
415
|
-
|
|
416
|
-
if workflow_id:
|
|
417
|
-
# List history for specific workflow
|
|
418
|
-
pattern = f"{workflow_id}-*.json*"
|
|
419
|
-
for state_file in self.history_dir.glob(pattern):
|
|
420
|
-
try:
|
|
421
|
-
state_data = self._read_state_file(state_file)
|
|
422
|
-
states.append(
|
|
423
|
-
{
|
|
424
|
-
"workflow_id": state_data.get("workflow_id"),
|
|
425
|
-
"state_file": str(state_file),
|
|
426
|
-
"status": state_data.get("status"),
|
|
427
|
-
"current_step": state_data.get("current_step"),
|
|
428
|
-
"saved_at": state_file.stat().st_mtime,
|
|
429
|
-
}
|
|
430
|
-
)
|
|
431
|
-
except Exception as e:
|
|
432
|
-
logger.warning(f"Failed to read state file {state_file}: {e}")
|
|
433
|
-
else:
|
|
434
|
-
# List all workflows
|
|
435
|
-
for metadata_file in self.state_dir.glob("*.meta.json"):
|
|
436
|
-
try:
|
|
437
|
-
with open(metadata_file, encoding="utf-8") as f:
|
|
438
|
-
metadata_data = json.load(f)
|
|
439
|
-
states.append(metadata_data)
|
|
440
|
-
except Exception as e:
|
|
441
|
-
logger.warning(f"Failed to read metadata {metadata_file}: {e}")
|
|
442
|
-
|
|
443
|
-
return sorted(states, key=lambda x: x.get("saved_at", 0), reverse=True)
|
|
444
|
-
|
|
445
|
-
def cleanup_old_states(
|
|
446
|
-
self,
|
|
447
|
-
retention_days: int = 30,
|
|
448
|
-
max_states_per_workflow: int = 10,
|
|
449
|
-
remove_completed: bool = True,
|
|
450
|
-
dry_run: bool = False,
|
|
451
|
-
) -> dict[str, Any]:
|
|
452
|
-
"""
|
|
453
|
-
Clean up old workflow states.
|
|
454
|
-
|
|
455
|
-
Epic 12: State cleanup functionality
|
|
456
|
-
|
|
457
|
-
Args:
|
|
458
|
-
retention_days: Keep states newer than this many days
|
|
459
|
-
max_states_per_workflow: Maximum states to keep per workflow
|
|
460
|
-
remove_completed: Whether to remove states for completed workflows
|
|
461
|
-
dry_run: If True, only report what would be removed (no files deleted)
|
|
462
|
-
|
|
463
|
-
Returns:
|
|
464
|
-
Dictionary with cleanup statistics; if dry_run, includes would_remove list
|
|
465
|
-
"""
|
|
466
|
-
from datetime import timedelta
|
|
467
|
-
|
|
468
|
-
cutoff_date = datetime.now() - timedelta(days=retention_days)
|
|
469
|
-
removed_count = 0
|
|
470
|
-
removed_size = 0
|
|
471
|
-
workflows_cleaned = set()
|
|
472
|
-
would_remove: list[dict[str, Any]] = []
|
|
473
|
-
|
|
474
|
-
# Group states by workflow_id
|
|
475
|
-
workflow_states: dict[str, list[dict[str, Any]]] = {}
|
|
476
|
-
for state_info in self.list_states():
|
|
477
|
-
workflow_id = state_info.get("workflow_id", "unknown")
|
|
478
|
-
if workflow_id not in workflow_states:
|
|
479
|
-
workflow_states[workflow_id] = []
|
|
480
|
-
workflow_states[workflow_id].append(state_info)
|
|
481
|
-
|
|
482
|
-
# Clean up each workflow
|
|
483
|
-
for workflow_id, states in workflow_states.items():
|
|
484
|
-
# Sort by saved_at (newest first)
|
|
485
|
-
states_sorted = sorted(
|
|
486
|
-
states,
|
|
487
|
-
key=lambda x: datetime.fromisoformat(x.get("saved_at", "1970-01-01"))
|
|
488
|
-
if isinstance(x.get("saved_at"), str)
|
|
489
|
-
else datetime.fromtimestamp(0),
|
|
490
|
-
reverse=True,
|
|
491
|
-
)
|
|
492
|
-
|
|
493
|
-
# Check if workflow is completed
|
|
494
|
-
is_completed = False
|
|
495
|
-
if remove_completed:
|
|
496
|
-
# Try to load latest state to check status
|
|
497
|
-
try:
|
|
498
|
-
state, _ = self.load_state(workflow_id=workflow_id, validate=False)
|
|
499
|
-
is_completed = state.status in ("completed", "failed", "cancelled")
|
|
500
|
-
except Exception:
|
|
501
|
-
pass
|
|
502
|
-
|
|
503
|
-
# Remove old states
|
|
504
|
-
for state_info in states_sorted:
|
|
505
|
-
state_file = state_info.get("state_file", "")
|
|
506
|
-
if not state_file:
|
|
507
|
-
continue
|
|
508
|
-
|
|
509
|
-
state_path = self.state_dir / state_file
|
|
510
|
-
if not state_path.exists():
|
|
511
|
-
continue
|
|
512
|
-
|
|
513
|
-
# Check retention period
|
|
514
|
-
saved_at_str = state_info.get("saved_at", "")
|
|
515
|
-
try:
|
|
516
|
-
if isinstance(saved_at_str, str):
|
|
517
|
-
saved_at = datetime.fromisoformat(saved_at_str)
|
|
518
|
-
else:
|
|
519
|
-
saved_at = datetime.fromtimestamp(saved_at_str)
|
|
520
|
-
except (ValueError, TypeError):
|
|
521
|
-
saved_at = datetime.fromtimestamp(0)
|
|
522
|
-
|
|
523
|
-
should_remove = False
|
|
524
|
-
reason = ""
|
|
525
|
-
|
|
526
|
-
# Remove if older than retention period
|
|
527
|
-
if saved_at < cutoff_date:
|
|
528
|
-
should_remove = True
|
|
529
|
-
reason = "retention_period"
|
|
530
|
-
# Remove if completed workflow and remove_completed is True
|
|
531
|
-
elif is_completed and remove_completed:
|
|
532
|
-
# Keep only the most recent completed state
|
|
533
|
-
if states_sorted.index(state_info) > 0:
|
|
534
|
-
should_remove = True
|
|
535
|
-
reason = "completed_workflow"
|
|
536
|
-
# Remove if exceeds max_states_per_workflow
|
|
537
|
-
elif len([s for s in states_sorted if not should_remove]) > max_states_per_workflow:
|
|
538
|
-
if states_sorted.index(state_info) >= max_states_per_workflow:
|
|
539
|
-
should_remove = True
|
|
540
|
-
reason = "max_states_limit"
|
|
541
|
-
|
|
542
|
-
if should_remove:
|
|
543
|
-
try:
|
|
544
|
-
# Get file size before removal
|
|
545
|
-
file_size = state_path.stat().st_size
|
|
546
|
-
if dry_run:
|
|
547
|
-
would_remove.append({
|
|
548
|
-
"state_file": state_file,
|
|
549
|
-
"workflow_id": workflow_id,
|
|
550
|
-
"size_bytes": file_size,
|
|
551
|
-
"reason": reason,
|
|
552
|
-
})
|
|
553
|
-
removed_size += file_size
|
|
554
|
-
removed_count += 1
|
|
555
|
-
workflows_cleaned.add(workflow_id)
|
|
556
|
-
else:
|
|
557
|
-
state_path.unlink()
|
|
558
|
-
removed_size += file_size
|
|
559
|
-
removed_count += 1
|
|
560
|
-
workflows_cleaned.add(workflow_id)
|
|
561
|
-
# Also remove from history if exists
|
|
562
|
-
history_path = self.history_dir / state_file
|
|
563
|
-
if history_path.exists():
|
|
564
|
-
history_path.unlink()
|
|
565
|
-
logger.debug(
|
|
566
|
-
f"Removed state {state_file} (reason: {reason})"
|
|
567
|
-
)
|
|
568
|
-
except Exception as e:
|
|
569
|
-
logger.warning(f"Failed to remove state {state_file}: {e}")
|
|
570
|
-
|
|
571
|
-
result = {
|
|
572
|
-
"removed_count": removed_count,
|
|
573
|
-
"removed_size_mb": round(removed_size / (1024 * 1024), 2),
|
|
574
|
-
"workflows_cleaned": len(workflows_cleaned),
|
|
575
|
-
}
|
|
576
|
-
if dry_run:
|
|
577
|
-
result["dry_run"] = True
|
|
578
|
-
result["would_remove"] = would_remove
|
|
579
|
-
|
|
580
|
-
logger.info(
|
|
581
|
-
f"State cleanup completed: removed {removed_count} states "
|
|
582
|
-
f"({result['removed_size_mb']} MB) from {len(workflows_cleaned)} workflows"
|
|
583
|
-
)
|
|
584
|
-
|
|
585
|
-
return result
|
|
586
|
-
|
|
587
|
-
def _recover_from_history(
|
|
588
|
-
self, corrupted_path: Path, workflow_id: str
|
|
589
|
-
) -> tuple[WorkflowState, StateMetadata]:
|
|
590
|
-
"""Attempt to recover state from history."""
|
|
591
|
-
logger.info(f"Attempting recovery from history for {workflow_id}")
|
|
592
|
-
|
|
593
|
-
# Find most recent valid state in history
|
|
594
|
-
pattern = f"{workflow_id}-*.json*"
|
|
595
|
-
history_files = sorted(
|
|
596
|
-
self.history_dir.glob(pattern),
|
|
597
|
-
key=lambda p: p.stat().st_mtime,
|
|
598
|
-
reverse=True,
|
|
599
|
-
)
|
|
600
|
-
|
|
601
|
-
for history_file in history_files:
|
|
602
|
-
try:
|
|
603
|
-
state_data = self._read_state_file(history_file)
|
|
604
|
-
expected_checksum = state_data.pop("_checksum", None)
|
|
605
|
-
is_valid, error = StateValidator.validate_state(
|
|
606
|
-
state_data, expected_checksum
|
|
607
|
-
)
|
|
608
|
-
if is_valid:
|
|
609
|
-
logger.info(f"Recovered valid state from {history_file}")
|
|
610
|
-
# Re-save as current state
|
|
611
|
-
return self.load_state(state_file=history_file, validate=False)
|
|
612
|
-
except Exception as e:
|
|
613
|
-
logger.warning(f"Failed to recover from {history_file}: {e}")
|
|
614
|
-
continue
|
|
615
|
-
|
|
616
|
-
raise ValueError(f"Could not recover state for workflow {workflow_id}")
|
|
617
|
-
|
|
618
|
-
def _write_state_file(self, path: Path, data: dict[str, Any], compress: bool):
|
|
619
|
-
"""Write state file with optional compression using atomic write."""
|
|
620
|
-
from .file_utils import atomic_write_json
|
|
621
|
-
|
|
622
|
-
atomic_write_json(path, data, compress=compress, indent=2)
|
|
623
|
-
|
|
624
|
-
def _read_state_file(self, path: Path) -> dict[str, Any]:
|
|
625
|
-
"""Read state file with optional decompression and safe loading."""
|
|
626
|
-
from .file_utils import safe_load_json
|
|
627
|
-
|
|
628
|
-
# Use safe loading with validation
|
|
629
|
-
result = safe_load_json(
|
|
630
|
-
path,
|
|
631
|
-
retries=3,
|
|
632
|
-
backoff=0.5,
|
|
633
|
-
min_age_seconds=1.0, # Shorter wait for explicit loads
|
|
634
|
-
min_size=50, # Smaller minimum for compressed files
|
|
635
|
-
)
|
|
636
|
-
|
|
637
|
-
if result is None:
|
|
638
|
-
raise ValueError(f"Failed to load state file: {path}")
|
|
639
|
-
|
|
640
|
-
return result
|
|
641
|
-
|
|
642
|
-
def _state_to_dict(self, state: WorkflowState) -> dict[str, Any]:
|
|
643
|
-
"""Convert WorkflowState to dictionary."""
|
|
644
|
-
|
|
645
|
-
def _artifact_to_dict(name: str, artifact: Any) -> dict[str, Any]:
|
|
646
|
-
created_at = getattr(artifact, "created_at", None)
|
|
647
|
-
return {
|
|
648
|
-
"name": getattr(artifact, "name", name) or name,
|
|
649
|
-
"path": str(getattr(artifact, "path", "")),
|
|
650
|
-
"status": getattr(artifact, "status", "pending"),
|
|
651
|
-
"created_by": getattr(artifact, "created_by", None),
|
|
652
|
-
"created_at": created_at.isoformat() if created_at else None,
|
|
653
|
-
"metadata": getattr(artifact, "metadata", {}) or {},
|
|
654
|
-
}
|
|
655
|
-
|
|
656
|
-
def _make_json_serializable(obj: Any) -> Any:
|
|
657
|
-
"""Recursively convert objects to JSON-serializable format."""
|
|
658
|
-
# Handle ProjectProfile objects
|
|
659
|
-
if hasattr(obj, "to_dict") and hasattr(obj, "compliance_requirements"):
|
|
660
|
-
# This is likely a ProjectProfile object
|
|
661
|
-
try:
|
|
662
|
-
from ..core.project_profile import ProjectProfile
|
|
663
|
-
if isinstance(obj, ProjectProfile):
|
|
664
|
-
return obj.to_dict()
|
|
665
|
-
except (ImportError, AttributeError):
|
|
666
|
-
pass
|
|
667
|
-
|
|
668
|
-
# Handle ComplianceRequirement objects
|
|
669
|
-
if hasattr(obj, "name") and hasattr(obj, "confidence") and hasattr(obj, "indicators"):
|
|
670
|
-
try:
|
|
671
|
-
from ..core.project_profile import ComplianceRequirement
|
|
672
|
-
if isinstance(obj, ComplianceRequirement):
|
|
673
|
-
return asdict(obj)
|
|
674
|
-
except (ImportError, AttributeError):
|
|
675
|
-
pass
|
|
676
|
-
|
|
677
|
-
# Handle dictionaries recursively
|
|
678
|
-
if isinstance(obj, dict):
|
|
679
|
-
return {k: _make_json_serializable(v) for k, v in obj.items()}
|
|
680
|
-
|
|
681
|
-
# Handle lists recursively
|
|
682
|
-
if isinstance(obj, list):
|
|
683
|
-
return [_make_json_serializable(item) for item in obj]
|
|
684
|
-
|
|
685
|
-
# Handle other non-serializable types
|
|
686
|
-
try:
|
|
687
|
-
json.dumps(obj)
|
|
688
|
-
return obj
|
|
689
|
-
except (TypeError, ValueError):
|
|
690
|
-
# For non-serializable types, convert to string as fallback
|
|
691
|
-
return str(obj)
|
|
692
|
-
|
|
693
|
-
# Convert variables to JSON-serializable format
|
|
694
|
-
variables = state.variables or {}
|
|
695
|
-
serializable_variables = _make_json_serializable(variables)
|
|
696
|
-
|
|
697
|
-
return {
|
|
698
|
-
"workflow_id": state.workflow_id,
|
|
699
|
-
"started_at": state.started_at.isoformat(),
|
|
700
|
-
"current_step": state.current_step,
|
|
701
|
-
"completed_steps": state.completed_steps,
|
|
702
|
-
"skipped_steps": state.skipped_steps,
|
|
703
|
-
"artifacts": {
|
|
704
|
-
k: _artifact_to_dict(k, v) for k, v in (state.artifacts or {}).items()
|
|
705
|
-
},
|
|
706
|
-
"variables": serializable_variables,
|
|
707
|
-
"status": state.status,
|
|
708
|
-
"error": state.error,
|
|
709
|
-
}
|
|
710
|
-
|
|
711
|
-
def _state_from_dict(self, data: dict[str, Any]) -> WorkflowState:
|
|
712
|
-
"""Convert dictionary to WorkflowState."""
|
|
713
|
-
from .models import Artifact
|
|
714
|
-
|
|
715
|
-
artifacts = {}
|
|
716
|
-
for k, v in data.get("artifacts", {}).items():
|
|
717
|
-
if not isinstance(v, dict):
|
|
718
|
-
continue
|
|
719
|
-
|
|
720
|
-
# New format (preferred)
|
|
721
|
-
if "name" in v or "created_by" in v or "metadata" in v:
|
|
722
|
-
created_at = v.get("created_at")
|
|
723
|
-
artifacts[k] = Artifact(
|
|
724
|
-
name=v.get("name", k) or k,
|
|
725
|
-
path=str(v.get("path", "")),
|
|
726
|
-
status=v.get("status", "pending"),
|
|
727
|
-
created_by=v.get("created_by"),
|
|
728
|
-
created_at=(
|
|
729
|
-
datetime.fromisoformat(created_at) if created_at else None
|
|
730
|
-
),
|
|
731
|
-
metadata=v.get("metadata", {}) or {},
|
|
732
|
-
)
|
|
733
|
-
continue
|
|
734
|
-
|
|
735
|
-
# Legacy format (v1): {"path": "...", "status": "...", "step_id": "..."}
|
|
736
|
-
step_id = v.get("step_id")
|
|
737
|
-
artifacts[k] = Artifact(
|
|
738
|
-
name=k,
|
|
739
|
-
path=str(v.get("path", "")),
|
|
740
|
-
status=v.get("status", "pending"),
|
|
741
|
-
created_by=step_id if isinstance(step_id, str) else None,
|
|
742
|
-
created_at=None,
|
|
743
|
-
metadata={},
|
|
744
|
-
)
|
|
745
|
-
|
|
746
|
-
return WorkflowState(
|
|
747
|
-
workflow_id=data["workflow_id"],
|
|
748
|
-
started_at=datetime.fromisoformat(data["started_at"]),
|
|
749
|
-
current_step=data.get("current_step"),
|
|
750
|
-
completed_steps=data.get("completed_steps", []),
|
|
751
|
-
skipped_steps=data.get("skipped_steps", []),
|
|
752
|
-
artifacts=artifacts,
|
|
753
|
-
variables=data.get("variables", {}),
|
|
754
|
-
status=data.get("status", "running"),
|
|
755
|
-
error=data.get("error"),
|
|
756
|
-
)
|
|
1
|
+
"""
|
|
2
|
+
Advanced Workflow State Management
|
|
3
|
+
|
|
4
|
+
Provides enhanced state persistence with validation, migration, versioning, and recovery.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import gzip
|
|
10
|
+
import hashlib
|
|
11
|
+
import json
|
|
12
|
+
import logging
|
|
13
|
+
from dataclasses import asdict, dataclass
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
from .models import WorkflowState
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
# State format version for migration
|
|
23
|
+
CURRENT_STATE_VERSION = "2.0"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class StateMetadata:
|
|
28
|
+
"""Metadata for persisted workflow state."""
|
|
29
|
+
|
|
30
|
+
version: str
|
|
31
|
+
saved_at: datetime
|
|
32
|
+
checksum: str
|
|
33
|
+
workflow_id: str
|
|
34
|
+
state_file: str
|
|
35
|
+
workflow_path: str | None = None
|
|
36
|
+
compression: bool = False
|
|
37
|
+
# Epic 12: Enhanced checkpoint metadata
|
|
38
|
+
current_step: str | None = None
|
|
39
|
+
completed_steps_count: int = 0
|
|
40
|
+
progress_percentage: float = 0.0
|
|
41
|
+
trigger_step_id: str | None = None
|
|
42
|
+
|
|
43
|
+
def to_dict(self) -> dict[str, Any]:
|
|
44
|
+
"""Convert to dictionary."""
|
|
45
|
+
data = asdict(self)
|
|
46
|
+
data["saved_at"] = self.saved_at.isoformat()
|
|
47
|
+
return data
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_dict(cls, data: dict[str, Any]) -> StateMetadata:
|
|
51
|
+
"""Create from dictionary."""
|
|
52
|
+
data = data.copy()
|
|
53
|
+
data["saved_at"] = datetime.fromisoformat(data["saved_at"])
|
|
54
|
+
return cls(**data)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class StateValidator:
|
|
58
|
+
"""Validates workflow state integrity."""
|
|
59
|
+
|
|
60
|
+
@staticmethod
|
|
61
|
+
def calculate_checksum(state_data: dict[str, Any]) -> str:
|
|
62
|
+
"""Calculate SHA256 checksum for state data."""
|
|
63
|
+
def _make_json_serializable(obj: Any) -> Any:
|
|
64
|
+
"""Recursively convert objects to JSON-serializable format."""
|
|
65
|
+
# Handle ProjectProfile objects
|
|
66
|
+
if hasattr(obj, "to_dict") and hasattr(obj, "compliance_requirements"):
|
|
67
|
+
try:
|
|
68
|
+
from ..core.project_profile import ProjectProfile
|
|
69
|
+
if isinstance(obj, ProjectProfile):
|
|
70
|
+
return obj.to_dict()
|
|
71
|
+
except (ImportError, AttributeError):
|
|
72
|
+
pass
|
|
73
|
+
|
|
74
|
+
# Handle ComplianceRequirement objects
|
|
75
|
+
if hasattr(obj, "name") and hasattr(obj, "confidence") and hasattr(obj, "indicators"):
|
|
76
|
+
try:
|
|
77
|
+
from ..core.project_profile import ComplianceRequirement
|
|
78
|
+
if isinstance(obj, ComplianceRequirement):
|
|
79
|
+
return asdict(obj)
|
|
80
|
+
except (ImportError, AttributeError):
|
|
81
|
+
pass
|
|
82
|
+
|
|
83
|
+
# Handle dictionaries recursively
|
|
84
|
+
if isinstance(obj, dict):
|
|
85
|
+
return {k: _make_json_serializable(v) for k, v in obj.items()}
|
|
86
|
+
|
|
87
|
+
# Handle lists recursively
|
|
88
|
+
if isinstance(obj, list):
|
|
89
|
+
return [_make_json_serializable(item) for item in obj]
|
|
90
|
+
|
|
91
|
+
return obj
|
|
92
|
+
|
|
93
|
+
# Ensure variables are JSON-serializable
|
|
94
|
+
variables = state_data.get("variables", {})
|
|
95
|
+
serializable_variables = _make_json_serializable(variables)
|
|
96
|
+
|
|
97
|
+
# Create stable representation (sorted keys, no metadata)
|
|
98
|
+
stable_data = {
|
|
99
|
+
"workflow_id": state_data.get("workflow_id"),
|
|
100
|
+
"current_step": state_data.get("current_step"),
|
|
101
|
+
"completed_steps": sorted(state_data.get("completed_steps", [])),
|
|
102
|
+
"skipped_steps": sorted(state_data.get("skipped_steps", [])),
|
|
103
|
+
"status": state_data.get("status"),
|
|
104
|
+
"variables": serializable_variables,
|
|
105
|
+
}
|
|
106
|
+
stable_str = json.dumps(stable_data, sort_keys=True)
|
|
107
|
+
return hashlib.sha256(stable_str.encode()).hexdigest()
|
|
108
|
+
|
|
109
|
+
@staticmethod
|
|
110
|
+
def validate_state(
|
|
111
|
+
state_data: dict[str, Any], expected_checksum: str | None = None
|
|
112
|
+
) -> tuple[bool, str | None]:
|
|
113
|
+
"""
|
|
114
|
+
Validate state integrity.
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
(is_valid, error_message)
|
|
118
|
+
"""
|
|
119
|
+
# Check required fields
|
|
120
|
+
required_fields = ["workflow_id", "started_at", "status"]
|
|
121
|
+
for field in required_fields:
|
|
122
|
+
if field not in state_data:
|
|
123
|
+
return False, f"Missing required field: {field}"
|
|
124
|
+
|
|
125
|
+
# Validate status
|
|
126
|
+
valid_statuses = ["running", "paused", "completed", "failed"]
|
|
127
|
+
if state_data.get("status") not in valid_statuses:
|
|
128
|
+
return False, f"Invalid status: {state_data.get('status')}"
|
|
129
|
+
|
|
130
|
+
# Validate checksum if provided
|
|
131
|
+
if expected_checksum:
|
|
132
|
+
calculated = StateValidator.calculate_checksum(state_data)
|
|
133
|
+
if calculated != expected_checksum:
|
|
134
|
+
return (
|
|
135
|
+
False,
|
|
136
|
+
f"Checksum mismatch: expected {expected_checksum[:8]}..., got {calculated[:8]}...",
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
return True, None
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class StateMigrator:
|
|
143
|
+
"""Handles migration of state between versions."""
|
|
144
|
+
|
|
145
|
+
@staticmethod
|
|
146
|
+
def migrate_state(
|
|
147
|
+
state_data: dict[str, Any], from_version: str, to_version: str
|
|
148
|
+
) -> dict[str, Any]:
|
|
149
|
+
"""
|
|
150
|
+
Migrate state from one version to another.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
state_data: State data to migrate
|
|
154
|
+
from_version: Source version
|
|
155
|
+
to_version: Target version
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
Migrated state data
|
|
159
|
+
"""
|
|
160
|
+
if from_version == to_version:
|
|
161
|
+
return state_data
|
|
162
|
+
|
|
163
|
+
# Migration path: 1.0 -> 2.0
|
|
164
|
+
if from_version == "1.0" and to_version == "2.0":
|
|
165
|
+
# Add default fields if missing
|
|
166
|
+
if "skipped_steps" not in state_data:
|
|
167
|
+
state_data["skipped_steps"] = []
|
|
168
|
+
if "artifacts" not in state_data:
|
|
169
|
+
state_data["artifacts"] = {}
|
|
170
|
+
if "variables" not in state_data:
|
|
171
|
+
state_data["variables"] = {}
|
|
172
|
+
|
|
173
|
+
# Ensure status field exists
|
|
174
|
+
if "status" not in state_data:
|
|
175
|
+
state_data["status"] = "running"
|
|
176
|
+
|
|
177
|
+
return state_data
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
class AdvancedStateManager:
|
|
181
|
+
"""Advanced workflow state manager with validation, migration, and recovery."""
|
|
182
|
+
|
|
183
|
+
def __init__(self, state_dir: Path, compression: bool = False):
|
|
184
|
+
"""
|
|
185
|
+
Initialize advanced state manager.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
state_dir: Directory for state storage
|
|
189
|
+
compression: Enable compression for state files
|
|
190
|
+
"""
|
|
191
|
+
self.state_dir = Path(state_dir)
|
|
192
|
+
self.state_dir.mkdir(parents=True, exist_ok=True)
|
|
193
|
+
self.compression = compression
|
|
194
|
+
self.history_dir = self.state_dir / "history"
|
|
195
|
+
self.history_dir.mkdir(parents=True, exist_ok=True)
|
|
196
|
+
|
|
197
|
+
def save_state(
|
|
198
|
+
self, state: WorkflowState, workflow_path: Path | None = None
|
|
199
|
+
) -> Path:
|
|
200
|
+
"""
|
|
201
|
+
Save workflow state with validation and metadata.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
state: Workflow state to save
|
|
205
|
+
workflow_path: Optional path to workflow YAML
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
Path to saved state file
|
|
209
|
+
"""
|
|
210
|
+
# Convert state to dict
|
|
211
|
+
state_data = self._state_to_dict(state)
|
|
212
|
+
|
|
213
|
+
# Calculate checksum
|
|
214
|
+
checksum = StateValidator.calculate_checksum(state_data)
|
|
215
|
+
state_data["_checksum"] = checksum
|
|
216
|
+
|
|
217
|
+
# Create metadata
|
|
218
|
+
state_file = (
|
|
219
|
+
f"{state.workflow_id}-{datetime.now().strftime('%Y%m%d-%H%M%S')}.json"
|
|
220
|
+
)
|
|
221
|
+
if self.compression:
|
|
222
|
+
state_file += ".gz"
|
|
223
|
+
|
|
224
|
+
# Epic 12: Extract checkpoint metadata from state if available
|
|
225
|
+
checkpoint_metadata = state.variables.get("_checkpoint_metadata", {})
|
|
226
|
+
|
|
227
|
+
metadata = StateMetadata(
|
|
228
|
+
version=CURRENT_STATE_VERSION,
|
|
229
|
+
saved_at=datetime.now(),
|
|
230
|
+
checksum=checksum,
|
|
231
|
+
workflow_id=state.workflow_id,
|
|
232
|
+
state_file=state_file,
|
|
233
|
+
workflow_path=str(workflow_path) if workflow_path else None,
|
|
234
|
+
compression=self.compression,
|
|
235
|
+
current_step=checkpoint_metadata.get("current_step") or state.current_step,
|
|
236
|
+
completed_steps_count=checkpoint_metadata.get("completed_steps", len(state.completed_steps)),
|
|
237
|
+
progress_percentage=checkpoint_metadata.get("progress_percentage", 0.0),
|
|
238
|
+
trigger_step_id=checkpoint_metadata.get("trigger_step_id"),
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
# Save state file
|
|
242
|
+
state_path = self.state_dir / state_file
|
|
243
|
+
self._write_state_file(state_path, state_data, self.compression)
|
|
244
|
+
|
|
245
|
+
# Save metadata
|
|
246
|
+
metadata_path = self.state_dir / f"{state.workflow_id}.meta.json"
|
|
247
|
+
from .file_utils import atomic_write_json
|
|
248
|
+
atomic_write_json(metadata_path, metadata.to_dict(), indent=2)
|
|
249
|
+
|
|
250
|
+
# Save to history
|
|
251
|
+
history_path = self.history_dir / state_file
|
|
252
|
+
self._write_state_file(history_path, state_data, self.compression)
|
|
253
|
+
|
|
254
|
+
# Update last pointer
|
|
255
|
+
last_data = {
|
|
256
|
+
"workflow_id": state.workflow_id,
|
|
257
|
+
"state_file": str(state_path),
|
|
258
|
+
"metadata_file": str(metadata_path),
|
|
259
|
+
"saved_at": datetime.now().isoformat(),
|
|
260
|
+
"version": CURRENT_STATE_VERSION,
|
|
261
|
+
"workflow_path": str(workflow_path) if workflow_path else None,
|
|
262
|
+
}
|
|
263
|
+
last_path = self.state_dir / "last.json"
|
|
264
|
+
from .file_utils import atomic_write_json
|
|
265
|
+
atomic_write_json(last_path, last_data, indent=2)
|
|
266
|
+
|
|
267
|
+
# Session handoff when workflow ends (plan 2.1)
|
|
268
|
+
if state.status in ("completed", "failed", "paused"):
|
|
269
|
+
try:
|
|
270
|
+
from .session_handoff import SessionHandoff, write_handoff
|
|
271
|
+
|
|
272
|
+
project_root = self.state_dir.parent.parent
|
|
273
|
+
done = list(state.completed_steps or [])
|
|
274
|
+
artifact_paths = [
|
|
275
|
+
a.path for a in (state.artifacts or {}).values()
|
|
276
|
+
if getattr(a, "path", None)
|
|
277
|
+
]
|
|
278
|
+
next_steps = [
|
|
279
|
+
"Resume with: tapps-agents workflow resume",
|
|
280
|
+
"Run `bd ready` to see unblocked tasks (if using Beads).",
|
|
281
|
+
]
|
|
282
|
+
handoff = SessionHandoff(
|
|
283
|
+
workflow_id=state.workflow_id,
|
|
284
|
+
session_ended_at=datetime.now(timezone.utc).isoformat(),
|
|
285
|
+
summary=f"Workflow {state.status}. Completed steps: {len(done)}.",
|
|
286
|
+
done=done,
|
|
287
|
+
decisions=[],
|
|
288
|
+
next_steps=next_steps,
|
|
289
|
+
artifact_paths=artifact_paths,
|
|
290
|
+
bd_ready_hint="Run `bd ready`" if done else None,
|
|
291
|
+
)
|
|
292
|
+
write_handoff(project_root, handoff)
|
|
293
|
+
except Exception as e: # pylint: disable=broad-except
|
|
294
|
+
logger.debug("Could not write session handoff: %s", e)
|
|
295
|
+
|
|
296
|
+
logger.info(f"Saved workflow state: {state.workflow_id} to {state_path}")
|
|
297
|
+
return state_path
|
|
298
|
+
|
|
299
|
+
def load_state(
|
|
300
|
+
self,
|
|
301
|
+
workflow_id: str | None = None,
|
|
302
|
+
state_file: Path | None = None,
|
|
303
|
+
validate: bool = True,
|
|
304
|
+
) -> tuple[WorkflowState, StateMetadata]:
|
|
305
|
+
"""
|
|
306
|
+
Load workflow state with validation and migration.
|
|
307
|
+
|
|
308
|
+
Args:
|
|
309
|
+
workflow_id: Workflow ID to load (uses last if not specified)
|
|
310
|
+
state_file: Specific state file to load
|
|
311
|
+
validate: Whether to validate state integrity
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
(WorkflowState, StateMetadata)
|
|
315
|
+
"""
|
|
316
|
+
# Determine which state file to load
|
|
317
|
+
if state_file:
|
|
318
|
+
state_path = Path(state_file)
|
|
319
|
+
elif workflow_id:
|
|
320
|
+
# Find latest state for workflow
|
|
321
|
+
metadata_path = self.state_dir / f"{workflow_id}.meta.json"
|
|
322
|
+
if metadata_path.exists():
|
|
323
|
+
with open(metadata_path, encoding="utf-8") as f:
|
|
324
|
+
metadata_data = json.load(f)
|
|
325
|
+
state_path = self.state_dir / metadata_data["state_file"]
|
|
326
|
+
else:
|
|
327
|
+
raise FileNotFoundError(f"No state found for workflow: {workflow_id}")
|
|
328
|
+
else:
|
|
329
|
+
# Load last state
|
|
330
|
+
last_path = self.state_dir / "last.json"
|
|
331
|
+
if not last_path.exists():
|
|
332
|
+
raise FileNotFoundError("No persisted workflow state found")
|
|
333
|
+
|
|
334
|
+
with open(last_path, encoding="utf-8") as f:
|
|
335
|
+
last_data = json.load(f)
|
|
336
|
+
|
|
337
|
+
state_path = Path(last_data["state_file"])
|
|
338
|
+
if not state_path.is_absolute():
|
|
339
|
+
state_path = self.state_dir / state_path
|
|
340
|
+
|
|
341
|
+
# Load state data
|
|
342
|
+
state_data = self._read_state_file(state_path)
|
|
343
|
+
|
|
344
|
+
# Extract checksum if present
|
|
345
|
+
expected_checksum = state_data.pop("_checksum", None)
|
|
346
|
+
|
|
347
|
+
# Validate if requested
|
|
348
|
+
if validate:
|
|
349
|
+
is_valid, error = StateValidator.validate_state(
|
|
350
|
+
state_data, expected_checksum
|
|
351
|
+
)
|
|
352
|
+
if not is_valid:
|
|
353
|
+
logger.warning(f"State validation failed: {error}")
|
|
354
|
+
# Try to recover from history
|
|
355
|
+
recovered_workflow_id = (
|
|
356
|
+
workflow_id
|
|
357
|
+
or state_data.get("workflow_id")
|
|
358
|
+
or state_data.get("id")
|
|
359
|
+
or ""
|
|
360
|
+
)
|
|
361
|
+
if (
|
|
362
|
+
not isinstance(recovered_workflow_id, str)
|
|
363
|
+
or not recovered_workflow_id
|
|
364
|
+
):
|
|
365
|
+
raise ValueError(
|
|
366
|
+
"Cannot recover: missing workflow_id in state data"
|
|
367
|
+
)
|
|
368
|
+
return self._recover_from_history(state_path, recovered_workflow_id)
|
|
369
|
+
|
|
370
|
+
# Check version and migrate if needed
|
|
371
|
+
metadata_path = self.state_dir / f"{state_data['workflow_id']}.meta.json"
|
|
372
|
+
if metadata_path.exists():
|
|
373
|
+
with open(metadata_path, encoding="utf-8") as f:
|
|
374
|
+
metadata_data = json.load(f)
|
|
375
|
+
metadata = StateMetadata.from_dict(metadata_data)
|
|
376
|
+
|
|
377
|
+
if metadata.version != CURRENT_STATE_VERSION:
|
|
378
|
+
logger.info(
|
|
379
|
+
f"Migrating state from {metadata.version} to {CURRENT_STATE_VERSION}"
|
|
380
|
+
)
|
|
381
|
+
state_data = StateMigrator.migrate_state(
|
|
382
|
+
state_data, metadata.version, CURRENT_STATE_VERSION
|
|
383
|
+
)
|
|
384
|
+
# Update metadata version after migration
|
|
385
|
+
metadata.version = CURRENT_STATE_VERSION
|
|
386
|
+
else:
|
|
387
|
+
# Legacy state without metadata
|
|
388
|
+
metadata = StateMetadata(
|
|
389
|
+
version=CURRENT_STATE_VERSION, # Use current version for legacy states
|
|
390
|
+
saved_at=datetime.fromisoformat(
|
|
391
|
+
state_data.get("started_at", datetime.now().isoformat())
|
|
392
|
+
),
|
|
393
|
+
checksum=expected_checksum or "",
|
|
394
|
+
workflow_id=state_data["workflow_id"],
|
|
395
|
+
state_file=str(state_path.name),
|
|
396
|
+
compression=self.compression,
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
# Convert to WorkflowState
|
|
400
|
+
state = self._state_from_dict(state_data)
|
|
401
|
+
|
|
402
|
+
return state, metadata
|
|
403
|
+
|
|
404
|
+
def list_states(self, workflow_id: str | None = None) -> list[dict[str, Any]]:
|
|
405
|
+
"""
|
|
406
|
+
List available workflow states.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
workflow_id: Optional workflow ID to filter by
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
List of state metadata dictionaries
|
|
413
|
+
"""
|
|
414
|
+
states = []
|
|
415
|
+
|
|
416
|
+
if workflow_id:
|
|
417
|
+
# List history for specific workflow
|
|
418
|
+
pattern = f"{workflow_id}-*.json*"
|
|
419
|
+
for state_file in self.history_dir.glob(pattern):
|
|
420
|
+
try:
|
|
421
|
+
state_data = self._read_state_file(state_file)
|
|
422
|
+
states.append(
|
|
423
|
+
{
|
|
424
|
+
"workflow_id": state_data.get("workflow_id"),
|
|
425
|
+
"state_file": str(state_file),
|
|
426
|
+
"status": state_data.get("status"),
|
|
427
|
+
"current_step": state_data.get("current_step"),
|
|
428
|
+
"saved_at": state_file.stat().st_mtime,
|
|
429
|
+
}
|
|
430
|
+
)
|
|
431
|
+
except Exception as e:
|
|
432
|
+
logger.warning(f"Failed to read state file {state_file}: {e}")
|
|
433
|
+
else:
|
|
434
|
+
# List all workflows
|
|
435
|
+
for metadata_file in self.state_dir.glob("*.meta.json"):
|
|
436
|
+
try:
|
|
437
|
+
with open(metadata_file, encoding="utf-8") as f:
|
|
438
|
+
metadata_data = json.load(f)
|
|
439
|
+
states.append(metadata_data)
|
|
440
|
+
except Exception as e:
|
|
441
|
+
logger.warning(f"Failed to read metadata {metadata_file}: {e}")
|
|
442
|
+
|
|
443
|
+
return sorted(states, key=lambda x: x.get("saved_at", 0), reverse=True)
|
|
444
|
+
|
|
445
|
+
def cleanup_old_states(
|
|
446
|
+
self,
|
|
447
|
+
retention_days: int = 30,
|
|
448
|
+
max_states_per_workflow: int = 10,
|
|
449
|
+
remove_completed: bool = True,
|
|
450
|
+
dry_run: bool = False,
|
|
451
|
+
) -> dict[str, Any]:
|
|
452
|
+
"""
|
|
453
|
+
Clean up old workflow states.
|
|
454
|
+
|
|
455
|
+
Epic 12: State cleanup functionality
|
|
456
|
+
|
|
457
|
+
Args:
|
|
458
|
+
retention_days: Keep states newer than this many days
|
|
459
|
+
max_states_per_workflow: Maximum states to keep per workflow
|
|
460
|
+
remove_completed: Whether to remove states for completed workflows
|
|
461
|
+
dry_run: If True, only report what would be removed (no files deleted)
|
|
462
|
+
|
|
463
|
+
Returns:
|
|
464
|
+
Dictionary with cleanup statistics; if dry_run, includes would_remove list
|
|
465
|
+
"""
|
|
466
|
+
from datetime import timedelta
|
|
467
|
+
|
|
468
|
+
cutoff_date = datetime.now() - timedelta(days=retention_days)
|
|
469
|
+
removed_count = 0
|
|
470
|
+
removed_size = 0
|
|
471
|
+
workflows_cleaned = set()
|
|
472
|
+
would_remove: list[dict[str, Any]] = []
|
|
473
|
+
|
|
474
|
+
# Group states by workflow_id
|
|
475
|
+
workflow_states: dict[str, list[dict[str, Any]]] = {}
|
|
476
|
+
for state_info in self.list_states():
|
|
477
|
+
workflow_id = state_info.get("workflow_id", "unknown")
|
|
478
|
+
if workflow_id not in workflow_states:
|
|
479
|
+
workflow_states[workflow_id] = []
|
|
480
|
+
workflow_states[workflow_id].append(state_info)
|
|
481
|
+
|
|
482
|
+
# Clean up each workflow
|
|
483
|
+
for workflow_id, states in workflow_states.items():
|
|
484
|
+
# Sort by saved_at (newest first)
|
|
485
|
+
states_sorted = sorted(
|
|
486
|
+
states,
|
|
487
|
+
key=lambda x: datetime.fromisoformat(x.get("saved_at", "1970-01-01"))
|
|
488
|
+
if isinstance(x.get("saved_at"), str)
|
|
489
|
+
else datetime.fromtimestamp(0),
|
|
490
|
+
reverse=True,
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
# Check if workflow is completed
|
|
494
|
+
is_completed = False
|
|
495
|
+
if remove_completed:
|
|
496
|
+
# Try to load latest state to check status
|
|
497
|
+
try:
|
|
498
|
+
state, _ = self.load_state(workflow_id=workflow_id, validate=False)
|
|
499
|
+
is_completed = state.status in ("completed", "failed", "cancelled")
|
|
500
|
+
except Exception:
|
|
501
|
+
pass
|
|
502
|
+
|
|
503
|
+
# Remove old states
|
|
504
|
+
for state_info in states_sorted:
|
|
505
|
+
state_file = state_info.get("state_file", "")
|
|
506
|
+
if not state_file:
|
|
507
|
+
continue
|
|
508
|
+
|
|
509
|
+
state_path = self.state_dir / state_file
|
|
510
|
+
if not state_path.exists():
|
|
511
|
+
continue
|
|
512
|
+
|
|
513
|
+
# Check retention period
|
|
514
|
+
saved_at_str = state_info.get("saved_at", "")
|
|
515
|
+
try:
|
|
516
|
+
if isinstance(saved_at_str, str):
|
|
517
|
+
saved_at = datetime.fromisoformat(saved_at_str)
|
|
518
|
+
else:
|
|
519
|
+
saved_at = datetime.fromtimestamp(saved_at_str)
|
|
520
|
+
except (ValueError, TypeError):
|
|
521
|
+
saved_at = datetime.fromtimestamp(0)
|
|
522
|
+
|
|
523
|
+
should_remove = False
|
|
524
|
+
reason = ""
|
|
525
|
+
|
|
526
|
+
# Remove if older than retention period
|
|
527
|
+
if saved_at < cutoff_date:
|
|
528
|
+
should_remove = True
|
|
529
|
+
reason = "retention_period"
|
|
530
|
+
# Remove if completed workflow and remove_completed is True
|
|
531
|
+
elif is_completed and remove_completed:
|
|
532
|
+
# Keep only the most recent completed state
|
|
533
|
+
if states_sorted.index(state_info) > 0:
|
|
534
|
+
should_remove = True
|
|
535
|
+
reason = "completed_workflow"
|
|
536
|
+
# Remove if exceeds max_states_per_workflow
|
|
537
|
+
elif len([s for s in states_sorted if not should_remove]) > max_states_per_workflow:
|
|
538
|
+
if states_sorted.index(state_info) >= max_states_per_workflow:
|
|
539
|
+
should_remove = True
|
|
540
|
+
reason = "max_states_limit"
|
|
541
|
+
|
|
542
|
+
if should_remove:
|
|
543
|
+
try:
|
|
544
|
+
# Get file size before removal
|
|
545
|
+
file_size = state_path.stat().st_size
|
|
546
|
+
if dry_run:
|
|
547
|
+
would_remove.append({
|
|
548
|
+
"state_file": state_file,
|
|
549
|
+
"workflow_id": workflow_id,
|
|
550
|
+
"size_bytes": file_size,
|
|
551
|
+
"reason": reason,
|
|
552
|
+
})
|
|
553
|
+
removed_size += file_size
|
|
554
|
+
removed_count += 1
|
|
555
|
+
workflows_cleaned.add(workflow_id)
|
|
556
|
+
else:
|
|
557
|
+
state_path.unlink()
|
|
558
|
+
removed_size += file_size
|
|
559
|
+
removed_count += 1
|
|
560
|
+
workflows_cleaned.add(workflow_id)
|
|
561
|
+
# Also remove from history if exists
|
|
562
|
+
history_path = self.history_dir / state_file
|
|
563
|
+
if history_path.exists():
|
|
564
|
+
history_path.unlink()
|
|
565
|
+
logger.debug(
|
|
566
|
+
f"Removed state {state_file} (reason: {reason})"
|
|
567
|
+
)
|
|
568
|
+
except Exception as e:
|
|
569
|
+
logger.warning(f"Failed to remove state {state_file}: {e}")
|
|
570
|
+
|
|
571
|
+
result = {
|
|
572
|
+
"removed_count": removed_count,
|
|
573
|
+
"removed_size_mb": round(removed_size / (1024 * 1024), 2),
|
|
574
|
+
"workflows_cleaned": len(workflows_cleaned),
|
|
575
|
+
}
|
|
576
|
+
if dry_run:
|
|
577
|
+
result["dry_run"] = True
|
|
578
|
+
result["would_remove"] = would_remove
|
|
579
|
+
|
|
580
|
+
logger.info(
|
|
581
|
+
f"State cleanup completed: removed {removed_count} states "
|
|
582
|
+
f"({result['removed_size_mb']} MB) from {len(workflows_cleaned)} workflows"
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
return result
|
|
586
|
+
|
|
587
|
+
def _recover_from_history(
|
|
588
|
+
self, corrupted_path: Path, workflow_id: str
|
|
589
|
+
) -> tuple[WorkflowState, StateMetadata]:
|
|
590
|
+
"""Attempt to recover state from history."""
|
|
591
|
+
logger.info(f"Attempting recovery from history for {workflow_id}")
|
|
592
|
+
|
|
593
|
+
# Find most recent valid state in history
|
|
594
|
+
pattern = f"{workflow_id}-*.json*"
|
|
595
|
+
history_files = sorted(
|
|
596
|
+
self.history_dir.glob(pattern),
|
|
597
|
+
key=lambda p: p.stat().st_mtime,
|
|
598
|
+
reverse=True,
|
|
599
|
+
)
|
|
600
|
+
|
|
601
|
+
for history_file in history_files:
|
|
602
|
+
try:
|
|
603
|
+
state_data = self._read_state_file(history_file)
|
|
604
|
+
expected_checksum = state_data.pop("_checksum", None)
|
|
605
|
+
is_valid, error = StateValidator.validate_state(
|
|
606
|
+
state_data, expected_checksum
|
|
607
|
+
)
|
|
608
|
+
if is_valid:
|
|
609
|
+
logger.info(f"Recovered valid state from {history_file}")
|
|
610
|
+
# Re-save as current state
|
|
611
|
+
return self.load_state(state_file=history_file, validate=False)
|
|
612
|
+
except Exception as e:
|
|
613
|
+
logger.warning(f"Failed to recover from {history_file}: {e}")
|
|
614
|
+
continue
|
|
615
|
+
|
|
616
|
+
raise ValueError(f"Could not recover state for workflow {workflow_id}")
|
|
617
|
+
|
|
618
|
+
def _write_state_file(self, path: Path, data: dict[str, Any], compress: bool):
|
|
619
|
+
"""Write state file with optional compression using atomic write."""
|
|
620
|
+
from .file_utils import atomic_write_json
|
|
621
|
+
|
|
622
|
+
atomic_write_json(path, data, compress=compress, indent=2)
|
|
623
|
+
|
|
624
|
+
def _read_state_file(self, path: Path) -> dict[str, Any]:
|
|
625
|
+
"""Read state file with optional decompression and safe loading."""
|
|
626
|
+
from .file_utils import safe_load_json
|
|
627
|
+
|
|
628
|
+
# Use safe loading with validation
|
|
629
|
+
result = safe_load_json(
|
|
630
|
+
path,
|
|
631
|
+
retries=3,
|
|
632
|
+
backoff=0.5,
|
|
633
|
+
min_age_seconds=1.0, # Shorter wait for explicit loads
|
|
634
|
+
min_size=50, # Smaller minimum for compressed files
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
if result is None:
|
|
638
|
+
raise ValueError(f"Failed to load state file: {path}")
|
|
639
|
+
|
|
640
|
+
return result
|
|
641
|
+
|
|
642
|
+
def _state_to_dict(self, state: WorkflowState) -> dict[str, Any]:
|
|
643
|
+
"""Convert WorkflowState to dictionary."""
|
|
644
|
+
|
|
645
|
+
def _artifact_to_dict(name: str, artifact: Any) -> dict[str, Any]:
|
|
646
|
+
created_at = getattr(artifact, "created_at", None)
|
|
647
|
+
return {
|
|
648
|
+
"name": getattr(artifact, "name", name) or name,
|
|
649
|
+
"path": str(getattr(artifact, "path", "")),
|
|
650
|
+
"status": getattr(artifact, "status", "pending"),
|
|
651
|
+
"created_by": getattr(artifact, "created_by", None),
|
|
652
|
+
"created_at": created_at.isoformat() if created_at else None,
|
|
653
|
+
"metadata": getattr(artifact, "metadata", {}) or {},
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
def _make_json_serializable(obj: Any) -> Any:
|
|
657
|
+
"""Recursively convert objects to JSON-serializable format."""
|
|
658
|
+
# Handle ProjectProfile objects
|
|
659
|
+
if hasattr(obj, "to_dict") and hasattr(obj, "compliance_requirements"):
|
|
660
|
+
# This is likely a ProjectProfile object
|
|
661
|
+
try:
|
|
662
|
+
from ..core.project_profile import ProjectProfile
|
|
663
|
+
if isinstance(obj, ProjectProfile):
|
|
664
|
+
return obj.to_dict()
|
|
665
|
+
except (ImportError, AttributeError):
|
|
666
|
+
pass
|
|
667
|
+
|
|
668
|
+
# Handle ComplianceRequirement objects
|
|
669
|
+
if hasattr(obj, "name") and hasattr(obj, "confidence") and hasattr(obj, "indicators"):
|
|
670
|
+
try:
|
|
671
|
+
from ..core.project_profile import ComplianceRequirement
|
|
672
|
+
if isinstance(obj, ComplianceRequirement):
|
|
673
|
+
return asdict(obj)
|
|
674
|
+
except (ImportError, AttributeError):
|
|
675
|
+
pass
|
|
676
|
+
|
|
677
|
+
# Handle dictionaries recursively
|
|
678
|
+
if isinstance(obj, dict):
|
|
679
|
+
return {k: _make_json_serializable(v) for k, v in obj.items()}
|
|
680
|
+
|
|
681
|
+
# Handle lists recursively
|
|
682
|
+
if isinstance(obj, list):
|
|
683
|
+
return [_make_json_serializable(item) for item in obj]
|
|
684
|
+
|
|
685
|
+
# Handle other non-serializable types
|
|
686
|
+
try:
|
|
687
|
+
json.dumps(obj)
|
|
688
|
+
return obj
|
|
689
|
+
except (TypeError, ValueError):
|
|
690
|
+
# For non-serializable types, convert to string as fallback
|
|
691
|
+
return str(obj)
|
|
692
|
+
|
|
693
|
+
# Convert variables to JSON-serializable format
|
|
694
|
+
variables = state.variables or {}
|
|
695
|
+
serializable_variables = _make_json_serializable(variables)
|
|
696
|
+
|
|
697
|
+
return {
|
|
698
|
+
"workflow_id": state.workflow_id,
|
|
699
|
+
"started_at": state.started_at.isoformat(),
|
|
700
|
+
"current_step": state.current_step,
|
|
701
|
+
"completed_steps": state.completed_steps,
|
|
702
|
+
"skipped_steps": state.skipped_steps,
|
|
703
|
+
"artifacts": {
|
|
704
|
+
k: _artifact_to_dict(k, v) for k, v in (state.artifacts or {}).items()
|
|
705
|
+
},
|
|
706
|
+
"variables": serializable_variables,
|
|
707
|
+
"status": state.status,
|
|
708
|
+
"error": state.error,
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
def _state_from_dict(self, data: dict[str, Any]) -> WorkflowState:
|
|
712
|
+
"""Convert dictionary to WorkflowState."""
|
|
713
|
+
from .models import Artifact
|
|
714
|
+
|
|
715
|
+
artifacts = {}
|
|
716
|
+
for k, v in data.get("artifacts", {}).items():
|
|
717
|
+
if not isinstance(v, dict):
|
|
718
|
+
continue
|
|
719
|
+
|
|
720
|
+
# New format (preferred)
|
|
721
|
+
if "name" in v or "created_by" in v or "metadata" in v:
|
|
722
|
+
created_at = v.get("created_at")
|
|
723
|
+
artifacts[k] = Artifact(
|
|
724
|
+
name=v.get("name", k) or k,
|
|
725
|
+
path=str(v.get("path", "")),
|
|
726
|
+
status=v.get("status", "pending"),
|
|
727
|
+
created_by=v.get("created_by"),
|
|
728
|
+
created_at=(
|
|
729
|
+
datetime.fromisoformat(created_at) if created_at else None
|
|
730
|
+
),
|
|
731
|
+
metadata=v.get("metadata", {}) or {},
|
|
732
|
+
)
|
|
733
|
+
continue
|
|
734
|
+
|
|
735
|
+
# Legacy format (v1): {"path": "...", "status": "...", "step_id": "..."}
|
|
736
|
+
step_id = v.get("step_id")
|
|
737
|
+
artifacts[k] = Artifact(
|
|
738
|
+
name=k,
|
|
739
|
+
path=str(v.get("path", "")),
|
|
740
|
+
status=v.get("status", "pending"),
|
|
741
|
+
created_by=step_id if isinstance(step_id, str) else None,
|
|
742
|
+
created_at=None,
|
|
743
|
+
metadata={},
|
|
744
|
+
)
|
|
745
|
+
|
|
746
|
+
return WorkflowState(
|
|
747
|
+
workflow_id=data["workflow_id"],
|
|
748
|
+
started_at=datetime.fromisoformat(data["started_at"]),
|
|
749
|
+
current_step=data.get("current_step"),
|
|
750
|
+
completed_steps=data.get("completed_steps", []),
|
|
751
|
+
skipped_steps=data.get("skipped_steps", []),
|
|
752
|
+
artifacts=artifacts,
|
|
753
|
+
variables=data.get("variables", {}),
|
|
754
|
+
status=data.get("status", "running"),
|
|
755
|
+
error=data.get("error"),
|
|
756
|
+
)
|