devflow-engine 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devflow_engine/__init__.py +3 -0
- devflow_engine/agentic_prompts.py +100 -0
- devflow_engine/agentic_runtime.py +398 -0
- devflow_engine/api_key_flow_harness.py +539 -0
- devflow_engine/api_keys.py +357 -0
- devflow_engine/bootstrap/__init__.py +2 -0
- devflow_engine/bootstrap/provision_from_template.py +84 -0
- devflow_engine/cli/__init__.py +0 -0
- devflow_engine/cli/app.py +7270 -0
- devflow_engine/core/__init__.py +0 -0
- devflow_engine/core/config.py +86 -0
- devflow_engine/core/logging.py +29 -0
- devflow_engine/core/paths.py +45 -0
- devflow_engine/core/toml_kv.py +33 -0
- devflow_engine/devflow_event_worker.py +1292 -0
- devflow_engine/devflow_state.py +201 -0
- devflow_engine/devin2/__init__.py +9 -0
- devflow_engine/devin2/agent_definition.py +120 -0
- devflow_engine/devin2/pi_runner.py +204 -0
- devflow_engine/devin_orchestration.py +69 -0
- devflow_engine/docs/prompts/anti-patterns.md +42 -0
- devflow_engine/docs/prompts/devin-agent-prompt.md +55 -0
- devflow_engine/docs/prompts/devin2-agent-prompt.md +81 -0
- devflow_engine/docs/prompts/examples/devin-vapi-clone-reference-exchange.json +85 -0
- devflow_engine/doctor/__init__.py +2 -0
- devflow_engine/doctor/triage.py +140 -0
- devflow_engine/error/__init__.py +0 -0
- devflow_engine/error/remediation.py +21 -0
- devflow_engine/errors/error_solver_dag.py +522 -0
- devflow_engine/errors/runtime_observability.py +67 -0
- devflow_engine/idea/__init__.py +4 -0
- devflow_engine/idea/actors.py +481 -0
- devflow_engine/idea/agentic.py +465 -0
- devflow_engine/idea/analyze.py +93 -0
- devflow_engine/idea/devin_chat_dag.py +1 -0
- devflow_engine/idea/diff.py +99 -0
- devflow_engine/idea/drafts.py +446 -0
- devflow_engine/idea/idea_creation_dag.py +643 -0
- devflow_engine/idea/ideation_enrichment.py +355 -0
- devflow_engine/idea/ideation_enrichment_worker.py +19 -0
- devflow_engine/idea/paths.py +28 -0
- devflow_engine/idea/promote.py +53 -0
- devflow_engine/idea/redaction.py +27 -0
- devflow_engine/idea/repo_tools.py +1277 -0
- devflow_engine/idea/response_mode.py +30 -0
- devflow_engine/idea/story_pipeline.py +1585 -0
- devflow_engine/idea/sufficiency.py +376 -0
- devflow_engine/idea/traditional_stories.py +1257 -0
- devflow_engine/implementation/__init__.py +0 -0
- devflow_engine/implementation/alembic_preflight.py +700 -0
- devflow_engine/implementation/dag.py +8450 -0
- devflow_engine/implementation/green_gate.py +93 -0
- devflow_engine/implementation/prompts.py +108 -0
- devflow_engine/implementation/test_runtime.py +623 -0
- devflow_engine/integration/__init__.py +19 -0
- devflow_engine/integration/agentic.py +66 -0
- devflow_engine/integration/dag.py +3539 -0
- devflow_engine/integration/prompts.py +114 -0
- devflow_engine/integration/supabase_schema.sql +31 -0
- devflow_engine/integration/supabase_sync.py +177 -0
- devflow_engine/llm/__init__.py +1 -0
- devflow_engine/llm/cli_one_shot.py +84 -0
- devflow_engine/llm/cli_stream.py +371 -0
- devflow_engine/llm/execution_context.py +26 -0
- devflow_engine/llm/invoke.py +1322 -0
- devflow_engine/llm/provider_api.py +304 -0
- devflow_engine/llm/repo_knowledge.py +588 -0
- devflow_engine/llm_primitives.py +315 -0
- devflow_engine/orchestration.py +62 -0
- devflow_engine/planning/__init__.py +0 -0
- devflow_engine/planning/analyze_repo.py +92 -0
- devflow_engine/planning/render_drafts.py +133 -0
- devflow_engine/playground/__init__.py +0 -0
- devflow_engine/playground/hooks.py +26 -0
- devflow_engine/playwright_workflow/__init__.py +5 -0
- devflow_engine/playwright_workflow/dag.py +1317 -0
- devflow_engine/process/__init__.py +5 -0
- devflow_engine/process/dag.py +59 -0
- devflow_engine/project_registration/__init__.py +3 -0
- devflow_engine/project_registration/dag.py +1581 -0
- devflow_engine/project_registry.py +109 -0
- devflow_engine/prompts/devin/generic/prompt.md +6 -0
- devflow_engine/prompts/devin/ideation/prompt.md +263 -0
- devflow_engine/prompts/devin/ideation/scenarios.md +5 -0
- devflow_engine/prompts/devin/ideation_loop/prompt.md +6 -0
- devflow_engine/prompts/devin/insight/prompt.md +11 -0
- devflow_engine/prompts/devin/insight/scenarios.md +5 -0
- devflow_engine/prompts/devin/intake/prompt.md +15 -0
- devflow_engine/prompts/devin/iterate/prompt.md +12 -0
- devflow_engine/prompts/devin/shared/eval_doctrine.md +9 -0
- devflow_engine/prompts/devin/shared/principles.md +246 -0
- devflow_engine/prompts/devin_eval/assessment/prompt.md +18 -0
- devflow_engine/prompts/idea/api_ideation_agent/prompt.md +8 -0
- devflow_engine/prompts/idea/api_insight_agent/prompt.md +8 -0
- devflow_engine/prompts/idea/response_doctrine/prompt.md +18 -0
- devflow_engine/prompts/implementation/dependency_assessment/prompt.md +12 -0
- devflow_engine/prompts/implementation/green/green/prompt.md +11 -0
- devflow_engine/prompts/implementation/green/node_config/prompt.md +3 -0
- devflow_engine/prompts/implementation/green_review/outcome_review/prompt.md +5 -0
- devflow_engine/prompts/implementation/green_review/prior_run_review/prompt.md +5 -0
- devflow_engine/prompts/implementation/red/prompt.md +27 -0
- devflow_engine/prompts/implementation/redreview/prompt.md +23 -0
- devflow_engine/prompts/implementation/redreview_repair/prompt.md +16 -0
- devflow_engine/prompts/implementation/setupdoc/prompt.md +10 -0
- devflow_engine/prompts/implementation/story_planning/prompt.md +13 -0
- devflow_engine/prompts/implementation/test_design/prompt.md +27 -0
- devflow_engine/prompts/integration/README.md +185 -0
- devflow_engine/prompts/integration/green/example.md +67 -0
- devflow_engine/prompts/integration/green/green/prompt.md +10 -0
- devflow_engine/prompts/integration/green/node_config/prompt.md +42 -0
- devflow_engine/prompts/integration/green/past_prompts/20260417T212300/green/prompt.md +15 -0
- devflow_engine/prompts/integration/green/past_prompts/20260417T212300/node_config/prompt.md +42 -0
- devflow_engine/prompts/integration/green_enrich/example.md +79 -0
- devflow_engine/prompts/integration/green_enrich/green_enrich/prompt.md +9 -0
- devflow_engine/prompts/integration/green_enrich/node_config/prompt.md +41 -0
- devflow_engine/prompts/integration/green_enrich/past_prompts/20260417T212300/green_enrich/prompt.md +14 -0
- devflow_engine/prompts/integration/green_enrich/past_prompts/20260417T212300/node_config/prompt.md +41 -0
- devflow_engine/prompts/integration/red/code_repair/prompt.md +12 -0
- devflow_engine/prompts/integration/red/example.md +152 -0
- devflow_engine/prompts/integration/red/node_config/prompt.md +86 -0
- devflow_engine/prompts/integration/red/past_prompts/20260417T212300/code_repair/prompt.md +19 -0
- devflow_engine/prompts/integration/red/past_prompts/20260417T212300/node_config/prompt.md +84 -0
- devflow_engine/prompts/integration/red/past_prompts/20260417T212300/red/prompt.md +16 -0
- devflow_engine/prompts/integration/red/past_prompts/20260417T212300/red_repair/prompt.md +15 -0
- devflow_engine/prompts/integration/red/past_prompts/20260417T215032/code_repair/prompt.md +10 -0
- devflow_engine/prompts/integration/red/past_prompts/20260417T215032/node_config/prompt.md +84 -0
- devflow_engine/prompts/integration/red/past_prompts/20260417T215032/red_repair/prompt.md +11 -0
- devflow_engine/prompts/integration/red/red/prompt.md +11 -0
- devflow_engine/prompts/integration/red/red_repair/prompt.md +12 -0
- devflow_engine/prompts/integration/red_review/example.md +71 -0
- devflow_engine/prompts/integration/red_review/node_config/prompt.md +41 -0
- devflow_engine/prompts/integration/red_review/past_prompts/20260417T212300/node_config/prompt.md +41 -0
- devflow_engine/prompts/integration/red_review/past_prompts/20260417T212300/red_review/prompt.md +15 -0
- devflow_engine/prompts/integration/red_review/red_review/prompt.md +9 -0
- devflow_engine/prompts/integration/resolve/example.md +111 -0
- devflow_engine/prompts/integration/resolve/node_config/prompt.md +64 -0
- devflow_engine/prompts/integration/resolve/past_prompts/20260417T212300/node_config/prompt.md +64 -0
- devflow_engine/prompts/integration/resolve/past_prompts/20260417T212300/resolve_implicated_users/prompt.md +15 -0
- devflow_engine/prompts/integration/resolve/past_prompts/20260417T212300/resolve_side_effects/prompt.md +15 -0
- devflow_engine/prompts/integration/resolve/resolve_implicated_users/prompt.md +10 -0
- devflow_engine/prompts/integration/resolve/resolve_side_effects/prompt.md +10 -0
- devflow_engine/prompts/integration/validate/build_idea_acceptance_coverage/prompt.md +12 -0
- devflow_engine/prompts/integration/validate/code_repair/prompt.md +13 -0
- devflow_engine/prompts/integration/validate/example.md +143 -0
- devflow_engine/prompts/integration/validate/node_config/prompt.md +87 -0
- devflow_engine/prompts/integration/validate/past_prompts/20260417T212300/code_repair/prompt.md +19 -0
- devflow_engine/prompts/integration/validate/past_prompts/20260417T212300/node_config/prompt.md +67 -0
- devflow_engine/prompts/integration/validate/past_prompts/20260417T212300/validate_enrich_gate/prompt.md +17 -0
- devflow_engine/prompts/integration/validate/past_prompts/20260417T212300/validate_repair/prompt.md +16 -0
- devflow_engine/prompts/integration/validate/past_prompts/20260417T215032/code_repair/prompt.md +10 -0
- devflow_engine/prompts/integration/validate/past_prompts/20260417T215032/node_config/prompt.md +67 -0
- devflow_engine/prompts/integration/validate/past_prompts/20260417T215032/validate_repair/prompt.md +9 -0
- devflow_engine/prompts/integration/validate/validate_enrich_gate/prompt.md +10 -0
- devflow_engine/prompts/integration/validate/validate_repair/prompt.md +20 -0
- devflow_engine/prompts/integration/write_workflows/example.md +100 -0
- devflow_engine/prompts/integration/write_workflows/node_config/prompt.md +44 -0
- devflow_engine/prompts/integration/write_workflows/past_prompts/20260417T212300/node_config/prompt.md +44 -0
- devflow_engine/prompts/integration/write_workflows/past_prompts/20260417T212300/write_workflows/prompt.md +17 -0
- devflow_engine/prompts/integration/write_workflows/write_workflows/prompt.md +11 -0
- devflow_engine/prompts/iterate/README.md +7 -0
- devflow_engine/prompts/iterate/coder/prompt.md +11 -0
- devflow_engine/prompts/iterate/framer/prompt.md +11 -0
- devflow_engine/prompts/iterate/iterator/prompt.md +13 -0
- devflow_engine/prompts/iterate/observer/prompt.md +11 -0
- devflow_engine/prompts/recovery/diagnosis/prompt.md +7 -0
- devflow_engine/prompts/recovery/execution/prompt.md +8 -0
- devflow_engine/prompts/recovery/execution_verification/prompt.md +7 -0
- devflow_engine/prompts/recovery/failure_investigation/prompt.md +10 -0
- devflow_engine/prompts/recovery/preflight_health_repo_repair/prompt.md +8 -0
- devflow_engine/prompts/recovery/remediation_execution/prompt.md +11 -0
- devflow_engine/prompts/recovery/root_cause_investigation/prompt.md +12 -0
- devflow_engine/prompts/scope_idea/doctrine/prompt.md +7 -0
- devflow_engine/prompts/source_doc_eval/document/prompt.md +6 -0
- devflow_engine/prompts/source_doc_eval/targeted_mutation/prompt.md +9 -0
- devflow_engine/prompts/source_doc_mutation/domain_entities/prompt.md +6 -0
- devflow_engine/prompts/source_doc_mutation/product_brief/prompt.md +6 -0
- devflow_engine/prompts/source_doc_mutation/project_doc_coherence/prompt.md +7 -0
- devflow_engine/prompts/source_doc_mutation/project_doc_render/prompt.md +9 -0
- devflow_engine/prompts/source_doc_mutation/source_doc_coherence/prompt.md +5 -0
- devflow_engine/prompts/source_doc_mutation/source_doc_enrichment_coherence/prompt.md +6 -0
- devflow_engine/prompts/source_doc_mutation/user_workflows/prompt.md +6 -0
- devflow_engine/prompts/source_scope/doctrine/prompt.md +10 -0
- devflow_engine/prompts/ui_grounding/doctrine/prompt.md +7 -0
- devflow_engine/recovery/__init__.py +3 -0
- devflow_engine/recovery/dag.py +2609 -0
- devflow_engine/recovery/models.py +220 -0
- devflow_engine/refactor.py +93 -0
- devflow_engine/registry/__init__.py +1 -0
- devflow_engine/registry/cards.py +238 -0
- devflow_engine/registry/domain_normalize.py +60 -0
- devflow_engine/registry/effects.py +65 -0
- devflow_engine/registry/enforce_report.py +150 -0
- devflow_engine/registry/module_cards_classify.py +164 -0
- devflow_engine/registry/module_cards_draft.py +184 -0
- devflow_engine/registry/module_cards_gate.py +59 -0
- devflow_engine/registry/packages.py +347 -0
- devflow_engine/registry/pathways.py +323 -0
- devflow_engine/review/__init__.py +11 -0
- devflow_engine/review/dag.py +588 -0
- devflow_engine/review/review_story.py +67 -0
- devflow_engine/scope_idea/__init__.py +3 -0
- devflow_engine/scope_idea/agentic.py +39 -0
- devflow_engine/scope_idea/dag.py +1069 -0
- devflow_engine/scope_idea/models.py +175 -0
- devflow_engine/skills/builtins/devflow/queue_failure_investigation/SKILL.md +112 -0
- devflow_engine/skills/builtins/devflow/queue_idea_to_story/SKILL.md +120 -0
- devflow_engine/skills/builtins/devflow/queue_integration/SKILL.md +105 -0
- devflow_engine/skills/builtins/devflow/queue_recovery/SKILL.md +108 -0
- devflow_engine/skills/builtins/devflow/queue_runtime_core/SKILL.md +155 -0
- devflow_engine/skills/builtins/devflow/queue_story_implementation/SKILL.md +122 -0
- devflow_engine/skills/builtins/devin/idea_to_story_handoff/SKILL.md +120 -0
- devflow_engine/skills/builtins/devin/ideation/SKILL.md +168 -0
- devflow_engine/skills/builtins/devin/ideation/state-and-phrasing-reference.md +18 -0
- devflow_engine/skills/builtins/devin/insight/SKILL.md +22 -0
- devflow_engine/skills/registry.example.yaml +42 -0
- devflow_engine/source_doc_assumptions.py +291 -0
- devflow_engine/source_doc_mutation_dag.py +1606 -0
- devflow_engine/source_doc_mutation_eval.py +417 -0
- devflow_engine/source_doc_mutation_worker.py +25 -0
- devflow_engine/source_docs_schema.py +207 -0
- devflow_engine/source_docs_updater.py +309 -0
- devflow_engine/source_scope/__init__.py +15 -0
- devflow_engine/source_scope/agentic.py +45 -0
- devflow_engine/source_scope/dag.py +1626 -0
- devflow_engine/source_scope/models.py +177 -0
- devflow_engine/stores/__init__.py +0 -0
- devflow_engine/stores/execution_store.py +3534 -0
- devflow_engine/story/__init__.py +0 -0
- devflow_engine/story/contracts.py +160 -0
- devflow_engine/story/discovery.py +47 -0
- devflow_engine/story/evidence.py +118 -0
- devflow_engine/story/hashing.py +27 -0
- devflow_engine/story/implemented_queue_purge.py +148 -0
- devflow_engine/story/indexer.py +105 -0
- devflow_engine/story/io.py +20 -0
- devflow_engine/story/markdown_contracts.py +298 -0
- devflow_engine/story/reconciliation.py +408 -0
- devflow_engine/story/validate_stories.py +149 -0
- devflow_engine/story/validate_tests_story.py +512 -0
- devflow_engine/story/validation.py +133 -0
- devflow_engine/ui_grounding/__init__.py +11 -0
- devflow_engine/ui_grounding/agentic.py +31 -0
- devflow_engine/ui_grounding/dag.py +874 -0
- devflow_engine/ui_grounding/models.py +224 -0
- devflow_engine/ui_grounding/pencil_bridge.py +247 -0
- devflow_engine/vendor/__init__.py +0 -0
- devflow_engine/vendor/datalumina_genai/__init__.py +11 -0
- devflow_engine/vendor/datalumina_genai/core/__init__.py +0 -0
- devflow_engine/vendor/datalumina_genai/core/exceptions.py +9 -0
- devflow_engine/vendor/datalumina_genai/core/nodes/__init__.py +0 -0
- devflow_engine/vendor/datalumina_genai/core/nodes/agent.py +48 -0
- devflow_engine/vendor/datalumina_genai/core/nodes/agent_streaming_node.py +26 -0
- devflow_engine/vendor/datalumina_genai/core/nodes/base.py +89 -0
- devflow_engine/vendor/datalumina_genai/core/nodes/concurrent.py +30 -0
- devflow_engine/vendor/datalumina_genai/core/nodes/router.py +69 -0
- devflow_engine/vendor/datalumina_genai/core/schema.py +72 -0
- devflow_engine/vendor/datalumina_genai/core/task.py +52 -0
- devflow_engine/vendor/datalumina_genai/core/validate.py +139 -0
- devflow_engine/vendor/datalumina_genai/core/workflow.py +200 -0
- devflow_engine/worker.py +1086 -0
- devflow_engine/worker_guard.py +233 -0
- devflow_engine-1.0.0.dist-info/METADATA +235 -0
- devflow_engine-1.0.0.dist-info/RECORD +393 -0
- devflow_engine-1.0.0.dist-info/WHEEL +4 -0
- devflow_engine-1.0.0.dist-info/entry_points.txt +3 -0
- devin/__init__.py +6 -0
- devin/dag.py +58 -0
- devin/dag_two_arm.py +138 -0
- devin/devin_chat_scenario_catalog.json +588 -0
- devin/devin_eval.py +677 -0
- devin/nodes/__init__.py +0 -0
- devin/nodes/ideation/__init__.py +0 -0
- devin/nodes/ideation/node.py +195 -0
- devin/nodes/ideation/playground.py +267 -0
- devin/nodes/ideation/prompt.md +65 -0
- devin/nodes/ideation/scenarios/continue_refinement.py +13 -0
- devin/nodes/ideation/scenarios/continue_refinement_evals.py +18 -0
- devin/nodes/ideation/scenarios/idea_fits_existing_patterns.py +17 -0
- devin/nodes/ideation/scenarios/idea_fits_existing_patterns_evals.py +16 -0
- devin/nodes/ideation/scenarios/large_idea_split.py +4 -0
- devin/nodes/ideation/scenarios/large_idea_split_evals.py +17 -0
- devin/nodes/ideation/scenarios/source_documentation_added.py +4 -0
- devin/nodes/ideation/scenarios/source_documentation_added_evals.py +16 -0
- devin/nodes/ideation/scenarios/user_says_create_it.py +30 -0
- devin/nodes/ideation/scenarios/user_says_create_it_evals.py +23 -0
- devin/nodes/ideation/scenarios/vague_idea.py +16 -0
- devin/nodes/ideation/scenarios/vague_idea_evals.py +47 -0
- devin/nodes/ideation/tools.json +312 -0
- devin/nodes/insight/__init__.py +0 -0
- devin/nodes/insight/node.py +49 -0
- devin/nodes/insight/playground.py +154 -0
- devin/nodes/insight/prompt.md +61 -0
- devin/nodes/insight/scenarios/architecture_pattern_query.py +15 -0
- devin/nodes/insight/scenarios/architecture_pattern_query_evals.py +25 -0
- devin/nodes/insight/scenarios/codebase_exploration.py +15 -0
- devin/nodes/insight/scenarios/codebase_exploration_evals.py +23 -0
- devin/nodes/insight/scenarios/devin_ideation_routing.py +19 -0
- devin/nodes/insight/scenarios/devin_ideation_routing_evals.py +39 -0
- devin/nodes/insight/scenarios/devin_insight_routing.py +20 -0
- devin/nodes/insight/scenarios/devin_insight_routing_evals.py +40 -0
- devin/nodes/insight/scenarios/operational_debugging.py +15 -0
- devin/nodes/insight/scenarios/operational_debugging_evals.py +23 -0
- devin/nodes/insight/scenarios/operational_question.py +9 -0
- devin/nodes/insight/scenarios/operational_question_evals.py +8 -0
- devin/nodes/insight/scenarios/queue_status.py +15 -0
- devin/nodes/insight/scenarios/queue_status_evals.py +23 -0
- devin/nodes/insight/scenarios/source_doc_explanation.py +14 -0
- devin/nodes/insight/scenarios/source_doc_explanation_evals.py +21 -0
- devin/nodes/insight/scenarios/worker_state_check.py +15 -0
- devin/nodes/insight/scenarios/worker_state_check_evals.py +22 -0
- devin/nodes/insight/tools.json +126 -0
- devin/nodes/intake/__init__.py +0 -0
- devin/nodes/intake/node.py +27 -0
- devin/nodes/intake/playground.py +47 -0
- devin/nodes/intake/prompt.md +12 -0
- devin/nodes/intake/scenarios/ideation_routing.py +4 -0
- devin/nodes/intake/scenarios/ideation_routing_evals.py +5 -0
- devin/nodes/intake/scenarios/insight_routing.py +4 -0
- devin/nodes/intake/scenarios/insight_routing_evals.py +5 -0
- devin/nodes/iterate/README.md +44 -0
- devin/nodes/iterate/__init__.py +1 -0
- devin/nodes/iterate/_archived_design_stages/01-objectives-requirements.md +112 -0
- devin/nodes/iterate/_archived_design_stages/02-evals.md +131 -0
- devin/nodes/iterate/_archived_design_stages/03-tools-and-boundaries.md +110 -0
- devin/nodes/iterate/_archived_design_stages/04-harness-and-playground.md +32 -0
- devin/nodes/iterate/_archived_design_stages/05-prompt-deferred.md +11 -0
- devin/nodes/iterate/_archived_design_stages/coder_agent_design/01-objectives-requirements.md +20 -0
- devin/nodes/iterate/_archived_design_stages/coder_agent_design/02-evals.md +8 -0
- devin/nodes/iterate/_archived_design_stages/coder_agent_design/03-tools-and-boundaries.md +14 -0
- devin/nodes/iterate/_archived_design_stages/coder_agent_design/04-harness-and-playground.md +12 -0
- devin/nodes/iterate/_archived_design_stages/framer_agent_design/01-objectives-requirements.md +20 -0
- devin/nodes/iterate/_archived_design_stages/framer_agent_design/02-evals.md +8 -0
- devin/nodes/iterate/_archived_design_stages/framer_agent_design/03-tools-and-boundaries.md +13 -0
- devin/nodes/iterate/_archived_design_stages/framer_agent_design/04-harness-and-playground.md +12 -0
- devin/nodes/iterate/_archived_design_stages/iterator_agent_design/01-objectives-requirements.md +25 -0
- devin/nodes/iterate/_archived_design_stages/iterator_agent_design/02-evals.md +9 -0
- devin/nodes/iterate/_archived_design_stages/iterator_agent_design/03-tools-and-boundaries.md +14 -0
- devin/nodes/iterate/_archived_design_stages/iterator_agent_design/04-harness-and-playground.md +12 -0
- devin/nodes/iterate/_archived_design_stages/observer_agent_design/01-objectives-requirements.md +20 -0
- devin/nodes/iterate/_archived_design_stages/observer_agent_design/02-evals.md +8 -0
- devin/nodes/iterate/_archived_design_stages/observer_agent_design/03-tools-and-boundaries.md +14 -0
- devin/nodes/iterate/_archived_design_stages/observer_agent_design/04-harness-and-playground.md +13 -0
- devin/nodes/iterate/agent-roles.md +89 -0
- devin/nodes/iterate/agents/README.md +10 -0
- devin/nodes/iterate/artifacts.md +504 -0
- devin/nodes/iterate/contract.md +100 -0
- devin/nodes/iterate/eval-plan.md +74 -0
- devin/nodes/iterate/node.py +100 -0
- devin/nodes/iterate/pipeline/README.md +13 -0
- devin/nodes/iterate/playground-contract.md +76 -0
- devin/nodes/iterate/prompt.md +11 -0
- devin/nodes/iterate/scenarios/README.md +38 -0
- devin/nodes/iterate/scenarios/artifact-and-loop-scenarios.md +101 -0
- devin/nodes/iterate/scenarios/coder_artifact_alignment.py +32 -0
- devin/nodes/iterate/scenarios/coder_artifact_alignment_evals.py +45 -0
- devin/nodes/iterate/scenarios/coder_bounded_fix.py +27 -0
- devin/nodes/iterate/scenarios/coder_bounded_fix_evals.py +45 -0
- devin/nodes/iterate/scenarios/devin_iterate_routing.py +21 -0
- devin/nodes/iterate/scenarios/devin_iterate_routing_evals.py +36 -0
- devin/nodes/iterate/scenarios/framer_scope_boundary.py +25 -0
- devin/nodes/iterate/scenarios/framer_scope_boundary_evals.py +57 -0
- devin/nodes/iterate/scenarios/framer_task_framing.py +25 -0
- devin/nodes/iterate/scenarios/framer_task_framing_evals.py +58 -0
- devin/nodes/iterate/scenarios/iterate_error_fix.py +21 -0
- devin/nodes/iterate/scenarios/iterate_error_fix_evals.py +39 -0
- devin/nodes/iterate/scenarios/iterate_quick_change.py +21 -0
- devin/nodes/iterate/scenarios/iterate_quick_change_evals.py +35 -0
- devin/nodes/iterate/scenarios/iterate_to_idea_promotion.py +23 -0
- devin/nodes/iterate/scenarios/iterate_to_idea_promotion_evals.py +53 -0
- devin/nodes/iterate/scenarios/iterate_to_insight_reroute.py +23 -0
- devin/nodes/iterate/scenarios/iterate_to_insight_reroute_evals.py +53 -0
- devin/nodes/iterate/scenarios/observer_evidence_seam.py +28 -0
- devin/nodes/iterate/scenarios/observer_evidence_seam_evals.py +55 -0
- devin/nodes/iterate/scenarios/observer_repro_creation.py +28 -0
- devin/nodes/iterate/scenarios/observer_repro_creation_evals.py +45 -0
- devin/nodes/iterate/scenarios/routing-matrix.md +45 -0
- devin/nodes/shared/__init__.py +0 -0
- devin/nodes/shared/filemaker_expert.md +80 -0
- devin/nodes/shared/filemaker_expert.py +354 -0
- devin/nodes/shared/filemaker_expert_eval/runner.py +176 -0
- devin/nodes/shared/filemaker_expert_eval/scenarios.json +65 -0
- devin/nodes/shared/goldilocks_advisor_eval/runner.py +214 -0
- devin/nodes/shared/goldilocks_advisor_eval/scenarios.json +58 -0
- devin/nodes/shared/helpers.py +156 -0
- devin/nodes/shared/idea_compliance_advisor_eval/runner.py +252 -0
- devin/nodes/shared/idea_compliance_advisor_eval/scenarios.json +75 -0
- devin/nodes/shared/models.py +44 -0
- devin/nodes/shared/post.py +40 -0
- devin/nodes/shared/router.py +107 -0
- devin/nodes/shared/tools.py +191 -0
- devin/shared/devin-chat-rubric.md +237 -0
- devin/shared/devin-chat-scenario-suite.md +90 -0
- devin/shared/eval_doctrine.md +9 -0
|
@@ -0,0 +1,1626 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import hashlib
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
from contextlib import contextmanager
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from datetime import UTC, datetime
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
import re
|
|
11
|
+
from typing import Any, Iterator
|
|
12
|
+
from urllib.parse import quote
|
|
13
|
+
from urllib.request import Request, urlopen
|
|
14
|
+
|
|
15
|
+
from pydantic import BaseModel
|
|
16
|
+
|
|
17
|
+
from ..devflow_state import _is_uuid_like
|
|
18
|
+
from ..project_registry import find_project_for_repo_root
|
|
19
|
+
from ..stores.execution_store import ExecutionStore
|
|
20
|
+
from . import agentic as source_scope_agentic
|
|
21
|
+
from ..vendor.datalumina_genai.core.nodes.agent import AgentConfig, AgentNode
|
|
22
|
+
from ..vendor.datalumina_genai.core.nodes.base import Node
|
|
23
|
+
from ..vendor.datalumina_genai.core.nodes.router import BaseRouter, RouterNode
|
|
24
|
+
from ..vendor.datalumina_genai.core.schema import NodeConfig, WorkflowSchema
|
|
25
|
+
from ..vendor.datalumina_genai.core.task import TaskContext
|
|
26
|
+
from ..vendor.datalumina_genai.core.workflow import Workflow
|
|
27
|
+
from .models import (
|
|
28
|
+
ApprovalReadyPackageArtifact,
|
|
29
|
+
AssumptionLedgerItem,
|
|
30
|
+
CoverageDecisionArtifact,
|
|
31
|
+
LineageRef,
|
|
32
|
+
NormalizedSourcePacketArtifact,
|
|
33
|
+
RegisteredScopeItemArtifact,
|
|
34
|
+
ScopeCandidateArtifactItem,
|
|
35
|
+
ScopeCandidatesArtifact,
|
|
36
|
+
ScopeCoverageReportArtifact,
|
|
37
|
+
SourceInventoryArtifact,
|
|
38
|
+
ScopeOutlineArtifact,
|
|
39
|
+
ScopeOutlineItem,
|
|
40
|
+
ScopeRegistryRecordArtifact,
|
|
41
|
+
ScopeRevisionPackageArtifact,
|
|
42
|
+
SourceInventoryItem,
|
|
43
|
+
SourceRef,
|
|
44
|
+
SourceScopeDagSummary,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
DAG_ID = "source_to_scope_dag"
|
|
48
|
+
|
|
49
|
+
_CURRENT_STORE: ExecutionStore | None = None
|
|
50
|
+
_CURRENT_RUN_ID: str | None = None
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@dataclass(frozen=True)
|
|
54
|
+
class SourceToScopeDagResult:
|
|
55
|
+
exit_code: int
|
|
56
|
+
run_id: str
|
|
57
|
+
pipeline_dir: Path
|
|
58
|
+
message: str
|
|
59
|
+
outcome: dict[str, Any]
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class SourceToScopeDagEvent(BaseModel):
|
|
63
|
+
repo_root: str
|
|
64
|
+
project_id: str
|
|
65
|
+
source_intake_id: str
|
|
66
|
+
source_refs: list[SourceRef]
|
|
67
|
+
requested_by: str = "marcus"
|
|
68
|
+
mode: str = "draft_scope_generation"
|
|
69
|
+
pipeline_key: str
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class SourceScopeGenerationFailure(RuntimeError):
|
|
73
|
+
def __init__(self, *, stage_name: str, reason: str) -> None:
|
|
74
|
+
self.stage_name = stage_name
|
|
75
|
+
self.reason = reason
|
|
76
|
+
super().__init__(f"{stage_name}: {reason}")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def _store_run() -> tuple[ExecutionStore, str]:
|
|
80
|
+
if _CURRENT_STORE is None or _CURRENT_RUN_ID is None:
|
|
81
|
+
raise RuntimeError("source->scope dag missing runtime store/run_id")
|
|
82
|
+
return _CURRENT_STORE, _CURRENT_RUN_ID
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@contextmanager
|
|
86
|
+
def _node_attempt(*, node_id: str, node_name: str, task_context: TaskContext, input_payload: dict[str, Any] | None = None) -> Iterator[str]:
|
|
87
|
+
store, run_id = _store_run()
|
|
88
|
+
correlation_id = str(task_context.metadata.get("pipeline_key") or task_context.event.pipeline_key)
|
|
89
|
+
node_exec_id = store.create_node_attempt(
|
|
90
|
+
run_id=run_id,
|
|
91
|
+
node_id=node_id,
|
|
92
|
+
node_name=node_name,
|
|
93
|
+
attempt=1,
|
|
94
|
+
correlation_id=correlation_id,
|
|
95
|
+
input=input_payload,
|
|
96
|
+
)
|
|
97
|
+
try:
|
|
98
|
+
yield node_exec_id
|
|
99
|
+
except Exception as exc:
|
|
100
|
+
store.mark_node_finished(
|
|
101
|
+
node_exec_id=node_exec_id,
|
|
102
|
+
status="failed",
|
|
103
|
+
error={"message": str(exc)},
|
|
104
|
+
correlation_id=correlation_id,
|
|
105
|
+
)
|
|
106
|
+
raise
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def _stable_hash(payload: Any) -> str:
|
|
110
|
+
return hashlib.sha256(json.dumps(payload, sort_keys=True).encode("utf-8")).hexdigest()
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def _stable_id(prefix: str, payload: Any, *, size: int = 12) -> str:
|
|
114
|
+
return f"{prefix}{_stable_hash(payload)[:size]}"
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def _write_json(path: Path, payload: dict[str, Any]) -> None:
|
|
118
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
119
|
+
path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8")
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _resolve_supabase_rest_config() -> tuple[str, str] | None:
|
|
123
|
+
if os.environ.get("PYTEST_CURRENT_TEST"):
|
|
124
|
+
return None
|
|
125
|
+
url = os.environ.get("DEVFLOW_SUPABASE_URL") or os.environ.get("SUPABASE_URL")
|
|
126
|
+
key = (
|
|
127
|
+
os.environ.get("DEVFLOW_SUPABASE_SERVICE_KEY")
|
|
128
|
+
or os.environ.get("SUPABASE_SERVICE_ROLE_KEY")
|
|
129
|
+
or os.environ.get("SUPABASE_SERVICE_KEY")
|
|
130
|
+
)
|
|
131
|
+
if not url or not key:
|
|
132
|
+
from ..devflow_state import _keychain_get # type: ignore
|
|
133
|
+
|
|
134
|
+
url = url or _keychain_get("Supabase URL", "Clarity")
|
|
135
|
+
key = key or _keychain_get("Supabase Service Key", "Clarity")
|
|
136
|
+
if not url or not key:
|
|
137
|
+
return None
|
|
138
|
+
return url.rstrip("/"), key
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _postgrest_request(*, method: str, url: str, key: str, body: Any | None = None, prefer: str | None = None) -> Any:
|
|
142
|
+
payload = None if body is None else json.dumps(body).encode("utf-8")
|
|
143
|
+
req = Request(url, data=payload, method=method)
|
|
144
|
+
req.add_header("apikey", key)
|
|
145
|
+
req.add_header("Authorization", f"Bearer {key}")
|
|
146
|
+
if body is not None:
|
|
147
|
+
req.add_header("Content-Type", "application/json")
|
|
148
|
+
if prefer:
|
|
149
|
+
req.add_header("Prefer", prefer)
|
|
150
|
+
with urlopen(req, timeout=30) as resp:
|
|
151
|
+
raw = resp.read().decode("utf-8")
|
|
152
|
+
return json.loads(raw) if raw else None
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _lookup_supabase_project_uuid_by_field(*, url: str, key: str, field: str, value: str) -> str | None:
|
|
156
|
+
rows = _postgrest_request(
|
|
157
|
+
method="GET",
|
|
158
|
+
url=f"{url}/rest/v1/devflow_projects?select=id&{field}=eq.{quote(value)}&limit=1",
|
|
159
|
+
key=key,
|
|
160
|
+
)
|
|
161
|
+
if not isinstance(rows, list) or not rows:
|
|
162
|
+
return None
|
|
163
|
+
resolved = str((rows[0] or {}).get("id") or "").strip()
|
|
164
|
+
return resolved if _is_uuid_like(resolved) else None
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def _resolve_source_scope_sync_project_id(*, url: str, key: str, repo_root: Path, project_id: str) -> str:
|
|
168
|
+
value = str(project_id or "").strip()
|
|
169
|
+
if _is_uuid_like(value):
|
|
170
|
+
return value
|
|
171
|
+
|
|
172
|
+
resolved_repo_root = str(repo_root.expanduser().resolve())
|
|
173
|
+
resolved = _lookup_supabase_project_uuid_by_field(
|
|
174
|
+
url=url,
|
|
175
|
+
key=key,
|
|
176
|
+
field="devflow_repo_root",
|
|
177
|
+
value=resolved_repo_root,
|
|
178
|
+
)
|
|
179
|
+
if resolved:
|
|
180
|
+
return resolved
|
|
181
|
+
|
|
182
|
+
project_entry = find_project_for_repo_root(repo_root)
|
|
183
|
+
remote_url = str((project_entry or {}).get("remote_url") or "").strip()
|
|
184
|
+
if remote_url:
|
|
185
|
+
resolved = _lookup_supabase_project_uuid_by_field(
|
|
186
|
+
url=url,
|
|
187
|
+
key=key,
|
|
188
|
+
field="repo_url",
|
|
189
|
+
value=remote_url,
|
|
190
|
+
)
|
|
191
|
+
if resolved:
|
|
192
|
+
return resolved
|
|
193
|
+
|
|
194
|
+
raise RuntimeError(
|
|
195
|
+
f"Unable to resolve Supabase project UUID for source->scope sync from local project_id={value}"
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def _sync_registered_scopes_to_supabase(
|
|
200
|
+
*,
|
|
201
|
+
repo_root: Path,
|
|
202
|
+
project_id: str,
|
|
203
|
+
scope_set_id: str,
|
|
204
|
+
run_id: str,
|
|
205
|
+
pipeline_dir: Path,
|
|
206
|
+
) -> None:
|
|
207
|
+
config = _resolve_supabase_rest_config()
|
|
208
|
+
if config is None:
|
|
209
|
+
return
|
|
210
|
+
registry_path = pipeline_dir / "scope_registry_record.json"
|
|
211
|
+
if not registry_path.exists():
|
|
212
|
+
return
|
|
213
|
+
|
|
214
|
+
registry = ScopeRegistryRecordArtifact.model_validate_json(registry_path.read_text(encoding="utf-8"))
|
|
215
|
+
scope_set_registry_path = Path(str(registry.scope_set_registry_ref or "")).expanduser()
|
|
216
|
+
scope_set_payload: dict[str, Any] = {}
|
|
217
|
+
if scope_set_registry_path.exists():
|
|
218
|
+
scope_set_payload = json.loads(scope_set_registry_path.read_text(encoding="utf-8"))
|
|
219
|
+
|
|
220
|
+
rows: list[dict[str, Any]] = []
|
|
221
|
+
for registered in registry.registered_scope_items:
|
|
222
|
+
scope_payload_path = Path(registered.registry_ref).expanduser()
|
|
223
|
+
if not scope_payload_path.exists():
|
|
224
|
+
continue
|
|
225
|
+
persisted = json.loads(scope_payload_path.read_text(encoding="utf-8"))
|
|
226
|
+
scope_item = persisted.get("scope_item") if isinstance(persisted.get("scope_item"), dict) else {}
|
|
227
|
+
rows.append(
|
|
228
|
+
{
|
|
229
|
+
"scope_id": registered.scope_id,
|
|
230
|
+
"project_id": project_id,
|
|
231
|
+
"scope_set_id": scope_set_id,
|
|
232
|
+
"run_id": run_id,
|
|
233
|
+
"title": str(scope_item.get("title") or registered.scope_id),
|
|
234
|
+
"summary": scope_item.get("description"),
|
|
235
|
+
"status": persisted.get("status") or registered.status,
|
|
236
|
+
"coverage_status": persisted.get("coverage_status") or registry.coverage_status,
|
|
237
|
+
"review_status": scope_item.get("review_status"),
|
|
238
|
+
"scope_set_title": scope_set_payload.get("scope_set_title"),
|
|
239
|
+
"source_traceability_refs": registered.source_traceability_refs,
|
|
240
|
+
"assumptions": scope_item.get("assumptions") or [],
|
|
241
|
+
"depends_on": scope_item.get("depends_on") or [],
|
|
242
|
+
"origin": "source_to_scope",
|
|
243
|
+
"artifact_path": str(scope_payload_path),
|
|
244
|
+
"updated_at": datetime.now(UTC).isoformat(),
|
|
245
|
+
}
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
if not rows:
|
|
249
|
+
return
|
|
250
|
+
|
|
251
|
+
url, key = config
|
|
252
|
+
resolved_project_id = _resolve_source_scope_sync_project_id(
|
|
253
|
+
url=url,
|
|
254
|
+
key=key,
|
|
255
|
+
repo_root=repo_root,
|
|
256
|
+
project_id=project_id,
|
|
257
|
+
)
|
|
258
|
+
delete_url = (
|
|
259
|
+
f"{url}/rest/v1/devflow_project_scopes"
|
|
260
|
+
f"?project_id=eq.{quote(resolved_project_id)}&scope_set_id=eq.{quote(scope_set_id)}"
|
|
261
|
+
)
|
|
262
|
+
for row in rows:
|
|
263
|
+
row["project_id"] = resolved_project_id
|
|
264
|
+
_postgrest_request(method="DELETE", url=delete_url, key=key)
|
|
265
|
+
_postgrest_request(
|
|
266
|
+
method="POST",
|
|
267
|
+
url=f"{url}/rest/v1/devflow_project_scopes?on_conflict=scope_id",
|
|
268
|
+
key=key,
|
|
269
|
+
body=rows,
|
|
270
|
+
prefer="resolution=merge-duplicates",
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def _pipeline_root(repo_root: Path, *, scope_id: str, pipeline_key: str) -> Path:
|
|
275
|
+
return repo_root / ".devflow" / "scopes" / scope_id / "pipelines" / DAG_ID / pipeline_key
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def _classify_source_type(raw_type: str) -> str:
|
|
279
|
+
lowered = raw_type.strip().lower()
|
|
280
|
+
if lowered in {"transcript", "doc", "repo", "image", "notes", "design"}:
|
|
281
|
+
return lowered
|
|
282
|
+
if lowered in {"scope_doc", "brief", "proposal"}:
|
|
283
|
+
return "doc"
|
|
284
|
+
return "unknown"
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def _load_source_text(repo_root: Path, source: SourceRef) -> tuple[str, str]:
|
|
288
|
+
if source.text:
|
|
289
|
+
return source.text.strip(), f"inline:{source.type}"
|
|
290
|
+
if source.path:
|
|
291
|
+
path = Path(source.path)
|
|
292
|
+
if not path.is_absolute():
|
|
293
|
+
path = repo_root / path
|
|
294
|
+
if path.exists() and path.is_file():
|
|
295
|
+
return path.read_text(encoding="utf-8").strip(), str(path)
|
|
296
|
+
return "", str(path)
|
|
297
|
+
return "", f"unresolved:{source.type}"
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def _sentence_chunks(text: str) -> list[str]:
|
|
301
|
+
normalized = re.sub(r"\s+", " ", text.replace("\n", " ")).strip()
|
|
302
|
+
raw_parts = re.split(r"(?<=[.!?])\s+|\s*[;•]\s*", normalized)
|
|
303
|
+
chunks: list[str] = []
|
|
304
|
+
for part in raw_parts:
|
|
305
|
+
cleaned = part.strip(" .- ")
|
|
306
|
+
if len(cleaned) >= 12:
|
|
307
|
+
chunks.append(cleaned)
|
|
308
|
+
return chunks
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
def _normalize_text(text: str) -> str:
|
|
312
|
+
return re.sub(r"[^a-z0-9\s]", " ", text.lower())
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def _keyword_tokens(text: str) -> list[str]:
|
|
316
|
+
stop = {"the", "and", "with", "from", "that", "this", "into", "for", "will", "must", "should", "have", "has", "need", "needs", "able", "well", "when", "after", "before", "through"}
|
|
317
|
+
tokens: list[str] = []
|
|
318
|
+
for token in _normalize_text(text).split():
|
|
319
|
+
if len(token) >= 4 and token not in stop and token not in tokens:
|
|
320
|
+
tokens.append(token)
|
|
321
|
+
return tokens
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
_SCOPE_PATTERNS: list[tuple[str, list[str]]] = [
|
|
325
|
+
("Quote Creation", ["create quote", "create quotes", "quotes", "quote creation", "send them to customers"]),
|
|
326
|
+
("Quote Approval", ["approve quotes", "quote approval", "approve quote", "digitally", "secure link", "secure portal"]),
|
|
327
|
+
("Job Execution", ["create jobs", "jobs", "field staff", "progress updates", "job scheduling", "job completion"]),
|
|
328
|
+
("Invoicing", ["invoice", "invoices"]),
|
|
329
|
+
("Payments", ["pay online", "payments", "stripe", "payment", "credit card", "e-transfer"]),
|
|
330
|
+
("Reporting", ["reporting", "dashboard", "management reporting"]),
|
|
331
|
+
("Customer Intake", ["customer intake", "intake", "lead capture", "customer records"]),
|
|
332
|
+
("Document Management", ["photo", "upload", "attachment", "document", "supporting documentation"]),
|
|
333
|
+
("Field Workflow", ["ipad", "field", "onsite", "mobile"]),
|
|
334
|
+
("Admin experience redesign", ["admin dashboard", "organization detail hub", "organizations list", "admin user management", "newsletter", "demo requests", "settings/profile/2fa"]),
|
|
335
|
+
("Manager experience redesign", ["manager dashboard", "score trends", "ai analysis", "engagements", "participants", "manager question bank", "manager settings", "board reports"]),
|
|
336
|
+
("Participant survey experience redesign", ["landing page", "thank you", "vibe check", "need help", "participant", "scale form", "text form"]),
|
|
337
|
+
("Engagement lifecycle", ["create engagement", "recurrence", "group assignment", "question selection", "completed engagement", "calendar", "engagement"]),
|
|
338
|
+
("Add Organization wizard", ["add organization", "5-step wizard", "org details", "define groups", "review & send"]),
|
|
339
|
+
("Role and access model migration", ["super admin", "super manager", "billing role", "participant token", "permissions overhaul", "require_group_access", "userrole"]),
|
|
340
|
+
("Dynamic categories migration", ["dynamic categories", "categories crud", "pillar", "category_id"]),
|
|
341
|
+
("Group model and group-scoped permissions", ["hierarchical groups", "tags", "group selector", "manager_group_assignments", "group permissions"]),
|
|
342
|
+
("Context Library", ["context library", "about", "pdf_extracted_text", "context entry"]),
|
|
343
|
+
("Need Help workflow", ["need help", "988", "crisis lifeline", "safety-critical"]),
|
|
344
|
+
("Vibe Check", ["vibe check", "mood", "mood pills"]),
|
|
345
|
+
("Billing and invoice history", ["invoice history", "billing contact"]),
|
|
346
|
+
("Branding and theming model", ["branding", "theming", "hero image", "overlay", "branded"]),
|
|
347
|
+
("Terminology migration", ["terminology", "rename", "organizations", "participants"]),
|
|
348
|
+
("Scoring model and display transition", ["0-10", "100-point", "chi score", "rotation weight", "score transformation"]),
|
|
349
|
+
("Question type simplification", ["questiontype", "anchor", "narrative", "scale", "text", "event_pulse"]),
|
|
350
|
+
("Database migration package", ["migration strategy", "new tables", "modified tables", "enum changes", "deprecations", "phase 1", "phase 2", "phase 3", "phase 4", "database"]),
|
|
351
|
+
("API and middleware migration package", ["new route files", "endpoints", "middleware", "dependencies.py", "route file"]),
|
|
352
|
+
("Documentation and phased implementation roadmap", ["changelog", "handoff", "roadmap", "implementation plan", "phased rollout", "documentation package"]),
|
|
353
|
+
]
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
def _source_doc_readiness(packet: NormalizedSourcePacketArtifact) -> tuple[str, list[str]]:
|
|
357
|
+
notes = [str(note).strip() for note in packet.confidence_notes if str(note).strip()]
|
|
358
|
+
lowered_notes = " ".join(note.lower() for note in notes)
|
|
359
|
+
reasons: list[str] = []
|
|
360
|
+
|
|
361
|
+
low_readiness_markers = [
|
|
362
|
+
"heuristic_bootstrap",
|
|
363
|
+
"heuristic bootstrap",
|
|
364
|
+
"raw intake",
|
|
365
|
+
"vague intake",
|
|
366
|
+
"not yet refined",
|
|
367
|
+
"insufficient",
|
|
368
|
+
"needs refinement",
|
|
369
|
+
]
|
|
370
|
+
if any(marker in lowered_notes for marker in low_readiness_markers):
|
|
371
|
+
reasons.append("Source docs still look like heuristic/vague intake rather than refined scope input.")
|
|
372
|
+
if not packet.facts and packet.open_questions_with_assumptions:
|
|
373
|
+
reasons.append("Source docs contain open assumptions but no grounded facts yet.")
|
|
374
|
+
if not packet.lineage and packet.facts:
|
|
375
|
+
reasons.append("Source docs facts are missing traceable lineage support.")
|
|
376
|
+
|
|
377
|
+
return ("needs_refinement", reasons) if reasons else ("ready", [])
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def _coverage_gate(packet: NormalizedSourcePacketArtifact, outline: ScopeOutlineArtifact) -> ScopeCoverageReportArtifact:
|
|
381
|
+
fact_chunks = packet.facts[:12]
|
|
382
|
+
coverage_surface = " ".join(
|
|
383
|
+
[f"{item.title}. {item.description}" for item in outline.scope_items]
|
|
384
|
+
+ [f"{item.title}. {item.description}" for item in outline.migration_scope_items]
|
|
385
|
+
+ list(outline.cross_cutting_constraints)
|
|
386
|
+
).lower()
|
|
387
|
+
covered: list[str] = []
|
|
388
|
+
weak: list[str] = []
|
|
389
|
+
uncovered: list[str] = []
|
|
390
|
+
for fact in fact_chunks:
|
|
391
|
+
tokens = [token for token in fact.lower().split() if len(token) > 4][:5]
|
|
392
|
+
hits = sum(1 for token in tokens if token in coverage_surface)
|
|
393
|
+
if hits >= max(1, len(tokens) // 2):
|
|
394
|
+
covered.append(fact)
|
|
395
|
+
elif hits > 0:
|
|
396
|
+
weak.append(fact)
|
|
397
|
+
else:
|
|
398
|
+
uncovered.append(fact)
|
|
399
|
+
|
|
400
|
+
unsupported = [
|
|
401
|
+
item.title for item in (outline.scope_items + outline.migration_scope_items) if not item.source_support
|
|
402
|
+
]
|
|
403
|
+
ratio = 1.0 if not fact_chunks else len(covered) / len(fact_chunks)
|
|
404
|
+
source_doc_readiness, source_doc_readiness_reasons = _source_doc_readiness(packet)
|
|
405
|
+
contradiction_needs_review = (
|
|
406
|
+
bool(packet.contradictions)
|
|
407
|
+
and any(item.needs_confirmation for item in packet.open_questions_with_assumptions)
|
|
408
|
+
and any('source of truth' in item.question.lower() for item in packet.open_questions_with_assumptions)
|
|
409
|
+
)
|
|
410
|
+
if contradiction_needs_review:
|
|
411
|
+
status = "escalate"
|
|
412
|
+
reasons = ["Source contradictions require human review before scope registration."]
|
|
413
|
+
elif source_doc_readiness != "ready":
|
|
414
|
+
status = "revise"
|
|
415
|
+
reasons = [
|
|
416
|
+
"Source docs are not yet refined enough to act as the readiness gate for scope generation.",
|
|
417
|
+
*source_doc_readiness_reasons,
|
|
418
|
+
]
|
|
419
|
+
elif not outline.scope_items:
|
|
420
|
+
status = "escalate"
|
|
421
|
+
reasons = ["No product-facing scope items were shaped from the source packet."]
|
|
422
|
+
elif len(uncovered) >= 2:
|
|
423
|
+
status = "revise"
|
|
424
|
+
reasons = ["Multiple source-backed requirements remain uncovered by the draft scope set."]
|
|
425
|
+
elif uncovered or unsupported or packet.open_questions_with_assumptions or packet.contradictions or weak:
|
|
426
|
+
status = "pass_with_assumptions"
|
|
427
|
+
reasons = ["Draft scope is usable, but assumptions, weakly covered items, unsupported items, or survivable contradictions must stay visible in review."]
|
|
428
|
+
else:
|
|
429
|
+
status = "pass"
|
|
430
|
+
reasons = ["Coverage gate found sufficient support and requirement coverage for registration."]
|
|
431
|
+
|
|
432
|
+
if packet.open_questions_with_assumptions and not any("open questions" in reason.lower() for reason in reasons):
|
|
433
|
+
reasons.append("Open questions were carried forward as explicit assumptions for review.")
|
|
434
|
+
|
|
435
|
+
return ScopeCoverageReportArtifact(
|
|
436
|
+
coverage_status=status,
|
|
437
|
+
covered_requirements=covered,
|
|
438
|
+
weakly_covered_requirements=weak,
|
|
439
|
+
uncovered_requirements=uncovered,
|
|
440
|
+
over_consolidated_scope_items=[],
|
|
441
|
+
unsupported_inferred_scope_items=unsupported,
|
|
442
|
+
review_recommendations=["Confirm assumption ledger with Marcus/client before approval."] if status == "pass_with_assumptions" else [],
|
|
443
|
+
gate_reasons=reasons,
|
|
444
|
+
gate_metrics={
|
|
445
|
+
"coverage_ratio": ratio,
|
|
446
|
+
"unsupported_item_count": len(unsupported),
|
|
447
|
+
"weak_coverage_count": len(weak),
|
|
448
|
+
"scope_item_count": len(outline.scope_items),
|
|
449
|
+
"migration_scope_item_count": len(outline.migration_scope_items),
|
|
450
|
+
"cross_cutting_constraint_count": len(outline.cross_cutting_constraints),
|
|
451
|
+
"source_doc_readiness": source_doc_readiness,
|
|
452
|
+
"source_doc_readiness_reason_count": len(source_doc_readiness_reasons),
|
|
453
|
+
},
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
class InventorySourcesNode(Node):
|
|
458
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
459
|
+
event = task_context.event
|
|
460
|
+
repo_root = Path(event.repo_root)
|
|
461
|
+
with _node_attempt(
|
|
462
|
+
node_id="inventory_sources",
|
|
463
|
+
node_name="InventorySources",
|
|
464
|
+
task_context=task_context,
|
|
465
|
+
input_payload={
|
|
466
|
+
"project_id": event.project_id,
|
|
467
|
+
"source_intake_id": event.source_intake_id,
|
|
468
|
+
"source_ref_count": len(event.source_refs),
|
|
469
|
+
},
|
|
470
|
+
) as node_exec_id:
|
|
471
|
+
sources: list[SourceInventoryItem] = []
|
|
472
|
+
source_texts: dict[str, str] = {}
|
|
473
|
+
missing: list[str] = []
|
|
474
|
+
warnings: list[str] = []
|
|
475
|
+
for index, source in enumerate(event.source_refs, start=1):
|
|
476
|
+
source_type = _classify_source_type(source.type)
|
|
477
|
+
text, resolved_location = _load_source_text(repo_root, source)
|
|
478
|
+
source_id = f"src_{index:02d}"
|
|
479
|
+
ingest_status = "resolved" if text else "missing"
|
|
480
|
+
if not text:
|
|
481
|
+
missing.append(source.title or source.path or source.type)
|
|
482
|
+
warnings.append(f"Source '{source.title or source.type}' could not be fully resolved.")
|
|
483
|
+
source_texts[source_id] = text
|
|
484
|
+
sources.append(
|
|
485
|
+
SourceInventoryItem(
|
|
486
|
+
source_id=source_id,
|
|
487
|
+
source_type=source_type,
|
|
488
|
+
location=resolved_location,
|
|
489
|
+
title=source.title or Path(resolved_location).name or source.type,
|
|
490
|
+
content_ref=resolved_location,
|
|
491
|
+
ingest_status=ingest_status,
|
|
492
|
+
priority=10 if source_type == "transcript" else 50,
|
|
493
|
+
notes=["Transcript/design/repo context should be combined rather than overridden by polished docs."] if source_type in {"transcript", "repo", "design"} else [],
|
|
494
|
+
)
|
|
495
|
+
)
|
|
496
|
+
|
|
497
|
+
scope_seed = _stable_id(
|
|
498
|
+
"scope_seed_",
|
|
499
|
+
{
|
|
500
|
+
"project_id": event.project_id,
|
|
501
|
+
"source_intake_id": event.source_intake_id,
|
|
502
|
+
"source_refs": [source.model_dump() for source in event.source_refs],
|
|
503
|
+
},
|
|
504
|
+
)
|
|
505
|
+
pipeline_root = _pipeline_root(repo_root, scope_id=scope_seed, pipeline_key=event.pipeline_key)
|
|
506
|
+
artifact = SourceInventoryArtifact(
|
|
507
|
+
project_id=event.project_id,
|
|
508
|
+
source_intake_id=event.source_intake_id,
|
|
509
|
+
sources=sources,
|
|
510
|
+
inventory_summary=f"Inventoried {len(sources)} source(s) for scope extraction.",
|
|
511
|
+
missing_expected_sources=missing,
|
|
512
|
+
warnings=warnings,
|
|
513
|
+
)
|
|
514
|
+
inventory_path = pipeline_root / "source_inventory.json"
|
|
515
|
+
_write_json(inventory_path, artifact.model_dump())
|
|
516
|
+
task_context.metadata["scope_seed_id"] = scope_seed
|
|
517
|
+
task_context.metadata["pipeline_root"] = str(pipeline_root)
|
|
518
|
+
task_context.metadata["source_inventory"] = artifact
|
|
519
|
+
task_context.metadata["source_texts"] = source_texts
|
|
520
|
+
task_context.metadata["artifact_paths"] = {"source_inventory": str(inventory_path)}
|
|
521
|
+
store, run_id = _store_run()
|
|
522
|
+
store.add_artifact(run_id=run_id, node_exec_id=node_exec_id, kind="source_scope.source_inventory", uri=str(inventory_path), metadata=artifact.model_dump())
|
|
523
|
+
store.mark_node_finished(node_exec_id=node_exec_id, status="succeeded", output={"scope_seed_id": scope_seed, "pipeline_root": str(pipeline_root), "source_inventory_ref": str(inventory_path), "resolved_source_count": len([item for item in sources if item.ingest_status == "resolved"]), "missing_source_count": len(missing)})
|
|
524
|
+
self.save_output(artifact)
|
|
525
|
+
return task_context
|
|
526
|
+
|
|
527
|
+
|
|
528
|
+
def _agent_timeout_seconds() -> int | None:
|
|
529
|
+
raw = os.environ.get("DEVFLOW_SOURCE_SCOPE_AGENT_TIMEOUT_SECONDS")
|
|
530
|
+
if raw:
|
|
531
|
+
try:
|
|
532
|
+
return max(1, int(raw))
|
|
533
|
+
except ValueError:
|
|
534
|
+
pass
|
|
535
|
+
return 1800
|
|
536
|
+
|
|
537
|
+
|
|
538
|
+
def _annotate_packet_confidence(packet: NormalizedSourcePacketArtifact, note: str) -> NormalizedSourcePacketArtifact:
|
|
539
|
+
data = packet.model_dump()
|
|
540
|
+
notes = list(data.get("confidence_notes") or [])
|
|
541
|
+
if note not in notes:
|
|
542
|
+
notes.append(note)
|
|
543
|
+
data["confidence_notes"] = notes
|
|
544
|
+
return NormalizedSourcePacketArtifact.model_validate(data)
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
def _persist_agent_artifact(*, stage_path: Path, artifact: BaseModel, artifact_paths: dict[str, str], key: str, task_context: TaskContext) -> None:
|
|
548
|
+
_write_json(stage_path, artifact.model_dump())
|
|
549
|
+
artifact_paths[key] = str(stage_path)
|
|
550
|
+
task_context.metadata["artifact_paths"] = artifact_paths
|
|
551
|
+
task_context.metadata[key] = artifact
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
MAX_SCOPE_SHAPING_ATTEMPTS = 3
|
|
555
|
+
|
|
556
|
+
|
|
557
|
+
def _scope_validation_feedback(report: ScopeCoverageReportArtifact) -> dict[str, Any]:
|
|
558
|
+
return {
|
|
559
|
+
"coverage_status": report.coverage_status,
|
|
560
|
+
"gate_reasons": list(report.gate_reasons),
|
|
561
|
+
"uncovered_requirements": list(report.uncovered_requirements),
|
|
562
|
+
"weakly_covered_requirements": list(report.weakly_covered_requirements),
|
|
563
|
+
"unsupported_inferred_scope_items": list(report.unsupported_inferred_scope_items),
|
|
564
|
+
"review_recommendations": list(report.review_recommendations),
|
|
565
|
+
"gate_metrics": dict(report.gate_metrics),
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
|
|
569
|
+
def _persist_scope_shaping_attempts(*, pipeline_root: Path, attempts: list[dict[str, Any]]) -> Path:
|
|
570
|
+
path = pipeline_root / "scope_shaping_attempts.json"
|
|
571
|
+
payload = {
|
|
572
|
+
"max_attempts": MAX_SCOPE_SHAPING_ATTEMPTS,
|
|
573
|
+
"attempt_count": len(attempts),
|
|
574
|
+
"attempts": attempts,
|
|
575
|
+
}
|
|
576
|
+
_write_json(path, payload)
|
|
577
|
+
return path
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
def _generation_failure_result(
|
|
581
|
+
*,
|
|
582
|
+
pipeline_dir: Path,
|
|
583
|
+
run_id: str,
|
|
584
|
+
project_id: str,
|
|
585
|
+
source_intake_id: str,
|
|
586
|
+
stage_name: str,
|
|
587
|
+
reason: str,
|
|
588
|
+
) -> SourceToScopeDagResult:
|
|
589
|
+
failure_payload = {
|
|
590
|
+
"project_id": project_id,
|
|
591
|
+
"source_intake_id": source_intake_id,
|
|
592
|
+
"resolution_status": "generation_failed",
|
|
593
|
+
"failure_stage": stage_name,
|
|
594
|
+
"failure_reason": reason,
|
|
595
|
+
"registered_scope_count": 0,
|
|
596
|
+
}
|
|
597
|
+
_write_json(pipeline_dir / "generation_failure.json", failure_payload)
|
|
598
|
+
summary = SourceScopeDagSummary(
|
|
599
|
+
exit_code=2,
|
|
600
|
+
run_id=run_id,
|
|
601
|
+
pipeline_dir=str(pipeline_dir),
|
|
602
|
+
message="source->scope generation failed",
|
|
603
|
+
outcome=failure_payload,
|
|
604
|
+
)
|
|
605
|
+
_write_json(pipeline_dir / "summary.json", summary.model_dump())
|
|
606
|
+
return SourceToScopeDagResult(
|
|
607
|
+
exit_code=2,
|
|
608
|
+
run_id=run_id,
|
|
609
|
+
pipeline_dir=pipeline_dir,
|
|
610
|
+
message=json.dumps({**failure_payload, "run_id": run_id, "pipeline_dir": str(pipeline_dir)}, sort_keys=True) + "\n",
|
|
611
|
+
outcome=failure_payload,
|
|
612
|
+
)
|
|
613
|
+
|
|
614
|
+
|
|
615
|
+
def _run_source_scope_agent(
|
|
616
|
+
*,
|
|
617
|
+
repo_root: Path,
|
|
618
|
+
pipeline_root: Path,
|
|
619
|
+
stage_name: str,
|
|
620
|
+
output_model: type[BaseModel],
|
|
621
|
+
context_payload: dict[str, Any],
|
|
622
|
+
guidance: list[str],
|
|
623
|
+
) -> BaseModel:
|
|
624
|
+
if os.environ.get("PYTEST_CURRENT_TEST") and os.environ.get("DEVFLOW_ENABLE_SOURCE_SCOPE_AGENT") != "1":
|
|
625
|
+
raise SourceScopeGenerationFailure(
|
|
626
|
+
stage_name=stage_name,
|
|
627
|
+
reason="source->scope agent is disabled under pytest; heuristic fallback is not permitted",
|
|
628
|
+
)
|
|
629
|
+
try:
|
|
630
|
+
artifact, envelope = source_scope_agentic.run_source_scope_agent_step(
|
|
631
|
+
repo_root=repo_root,
|
|
632
|
+
stage_name=stage_name,
|
|
633
|
+
output_model=output_model,
|
|
634
|
+
context_payload=context_payload,
|
|
635
|
+
guidance=guidance,
|
|
636
|
+
timeout_seconds=_agent_timeout_seconds(),
|
|
637
|
+
)
|
|
638
|
+
source_scope_agentic.persist_agent_run(pipeline_root=pipeline_root, node_id=stage_name, envelope=envelope)
|
|
639
|
+
return artifact
|
|
640
|
+
except SourceScopeGenerationFailure:
|
|
641
|
+
raise
|
|
642
|
+
except Exception as exc:
|
|
643
|
+
raise SourceScopeGenerationFailure(
|
|
644
|
+
stage_name=stage_name,
|
|
645
|
+
reason=f"source->scope agent step failed: {exc}",
|
|
646
|
+
) from exc
|
|
647
|
+
|
|
648
|
+
|
|
649
|
+
class NormalizeSourcePacketNode(AgentNode):
|
|
650
|
+
def get_agent_config(self) -> AgentConfig:
|
|
651
|
+
return AgentConfig(
|
|
652
|
+
instructions="Normalize messy project input into a structured source packet with facts, assumptions, contradictions, and lineage.",
|
|
653
|
+
output_type=NormalizedSourcePacketArtifact,
|
|
654
|
+
)
|
|
655
|
+
|
|
656
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
657
|
+
event = task_context.event
|
|
658
|
+
repo_root = Path(event.repo_root)
|
|
659
|
+
pipeline_root = Path(task_context.metadata["pipeline_root"])
|
|
660
|
+
inventory = task_context.metadata["source_inventory"]
|
|
661
|
+
source_texts = dict(task_context.metadata.get("source_texts") or {})
|
|
662
|
+
artifact_paths = dict(task_context.metadata.get("artifact_paths") or {})
|
|
663
|
+
with _node_attempt(node_id="normalize_source_packet", node_name="NormalizeSourcePacket", task_context=task_context, input_payload={"project_id": event.project_id, "source_intake_id": event.source_intake_id}) as node_exec_id:
|
|
664
|
+
artifact = _run_source_scope_agent(
|
|
665
|
+
repo_root=repo_root,
|
|
666
|
+
pipeline_root=pipeline_root,
|
|
667
|
+
stage_name="normalize_source_packet",
|
|
668
|
+
output_model=NormalizedSourcePacketArtifact,
|
|
669
|
+
context_payload={
|
|
670
|
+
"project_id": event.project_id,
|
|
671
|
+
"source_intake_id": event.source_intake_id,
|
|
672
|
+
"source_inventory": inventory.model_dump(),
|
|
673
|
+
"source_texts": source_texts,
|
|
674
|
+
},
|
|
675
|
+
guidance=[
|
|
676
|
+
"Return a normalized packet with evidence-backed facts, explicit assumptions, open questions, contradictions, and lineage.",
|
|
677
|
+
"Do not collapse multiple requirements into one vague summary if the source distinguishes them.",
|
|
678
|
+
"Preserve traceability by returning lineage refs that map back to source chunks.",
|
|
679
|
+
],
|
|
680
|
+
)
|
|
681
|
+
note = "CLI LLM agent produced normalized source packet."
|
|
682
|
+
artifact = _annotate_packet_confidence(NormalizedSourcePacketArtifact.model_validate(artifact.model_dump()), note)
|
|
683
|
+
stage_path = pipeline_root / "normalized_source_packet.json"
|
|
684
|
+
_persist_agent_artifact(stage_path=stage_path, artifact=artifact, artifact_paths=artifact_paths, key="normalized_source_packet", task_context=task_context)
|
|
685
|
+
store, run_id = _store_run()
|
|
686
|
+
store.add_artifact(run_id=run_id, node_exec_id=node_exec_id, kind="source_scope.normalized_source_packet", uri=str(stage_path), metadata=artifact.model_dump())
|
|
687
|
+
store.mark_node_finished(node_exec_id=node_exec_id, status="succeeded", output={"normalized_source_packet_ref": str(stage_path), "mode": "agent", "lineage_count": len(artifact.lineage), "fact_count": len(artifact.facts), "open_question_count": len(artifact.open_questions_with_assumptions), "contradiction_count": len(artifact.contradictions)})
|
|
688
|
+
self.save_output(artifact)
|
|
689
|
+
return task_context
|
|
690
|
+
|
|
691
|
+
|
|
692
|
+
class ExtractScopeCandidatesNode(AgentNode):
|
|
693
|
+
def get_agent_config(self) -> AgentConfig:
|
|
694
|
+
return AgentConfig(
|
|
695
|
+
instructions="Extract candidate client-facing scope units from the normalized source packet while preserving source traceability.",
|
|
696
|
+
output_type=ScopeCandidatesArtifact,
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
700
|
+
event = task_context.event
|
|
701
|
+
repo_root = Path(event.repo_root)
|
|
702
|
+
pipeline_root = Path(task_context.metadata["pipeline_root"])
|
|
703
|
+
packet = task_context.metadata["normalized_source_packet"]
|
|
704
|
+
artifact_paths = dict(task_context.metadata.get("artifact_paths") or {})
|
|
705
|
+
with _node_attempt(node_id="extract_scope_candidates", node_name="ExtractScopeCandidates", task_context=task_context, input_payload={"project_id": event.project_id}) as node_exec_id:
|
|
706
|
+
artifact = _run_source_scope_agent(
|
|
707
|
+
repo_root=repo_root,
|
|
708
|
+
pipeline_root=pipeline_root,
|
|
709
|
+
stage_name="extract_scope_candidates",
|
|
710
|
+
output_model=ScopeCandidatesArtifact,
|
|
711
|
+
context_payload={
|
|
712
|
+
"project_id": event.project_id,
|
|
713
|
+
"normalized_source_packet": packet.model_dump(),
|
|
714
|
+
},
|
|
715
|
+
guidance=[
|
|
716
|
+
"Extract client-facing scope candidates, not implementation stories or task lists.",
|
|
717
|
+
"Each candidate should carry concrete source support when available.",
|
|
718
|
+
"Surface overlaps and candidate gaps explicitly rather than hiding ambiguity.",
|
|
719
|
+
],
|
|
720
|
+
)
|
|
721
|
+
artifact = ScopeCandidatesArtifact.model_validate(artifact.model_dump())
|
|
722
|
+
artifact.extraction_notes.append("CLI LLM agent extracted candidate scope units.")
|
|
723
|
+
stage_path = pipeline_root / "scope_candidates.json"
|
|
724
|
+
_persist_agent_artifact(stage_path=stage_path, artifact=artifact, artifact_paths=artifact_paths, key="scope_candidates", task_context=task_context)
|
|
725
|
+
store, run_id = _store_run()
|
|
726
|
+
store.add_artifact(run_id=run_id, node_exec_id=node_exec_id, kind="source_scope.scope_candidates", uri=str(stage_path), metadata=artifact.model_dump())
|
|
727
|
+
store.mark_node_finished(node_exec_id=node_exec_id, status="succeeded", output={"scope_candidates_ref": str(stage_path), "mode": "agent", "candidate_count": len(artifact.scope_candidates), "candidate_gap_count": len(artifact.candidate_gaps)})
|
|
728
|
+
self.save_output(artifact)
|
|
729
|
+
return task_context
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
class ShapeScopeDocumentNode(AgentNode):
|
|
733
|
+
def get_agent_config(self) -> AgentConfig:
|
|
734
|
+
return AgentConfig(
|
|
735
|
+
instructions="Shape candidate scope units into a reviewable scope outline without collapsing affirmed intent.",
|
|
736
|
+
output_type=ScopeOutlineArtifact,
|
|
737
|
+
)
|
|
738
|
+
|
|
739
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
740
|
+
event = task_context.event
|
|
741
|
+
repo_root = Path(event.repo_root)
|
|
742
|
+
pipeline_root = Path(task_context.metadata["pipeline_root"])
|
|
743
|
+
packet = task_context.metadata["normalized_source_packet"]
|
|
744
|
+
candidates = task_context.metadata["scope_candidates"]
|
|
745
|
+
artifact_paths = dict(task_context.metadata.get("artifact_paths") or {})
|
|
746
|
+
scope_set_id = _stable_id("scope_set_", {"project_id": event.project_id, "source_intake_id": event.source_intake_id})
|
|
747
|
+
with _node_attempt(node_id="shape_scope_document", node_name="ShapeScopeDocument", task_context=task_context, input_payload={"project_id": event.project_id, "scope_set_id": scope_set_id, "max_attempts": MAX_SCOPE_SHAPING_ATTEMPTS}) as node_exec_id:
|
|
748
|
+
shaping_feedback: dict[str, Any] | None = None
|
|
749
|
+
prior_outline: ScopeOutlineArtifact | None = None
|
|
750
|
+
attempts: list[dict[str, Any]] = []
|
|
751
|
+
latest_outline_path = pipeline_root / "scope_outline.json"
|
|
752
|
+
latest_report_path = pipeline_root / "scope_coverage_report.json"
|
|
753
|
+
for attempt_number in range(1, MAX_SCOPE_SHAPING_ATTEMPTS + 1):
|
|
754
|
+
artifact = _run_source_scope_agent(
|
|
755
|
+
repo_root=repo_root,
|
|
756
|
+
pipeline_root=pipeline_root,
|
|
757
|
+
stage_name="shape_scope_document",
|
|
758
|
+
output_model=ScopeOutlineArtifact,
|
|
759
|
+
context_payload={
|
|
760
|
+
"project_id": event.project_id,
|
|
761
|
+
"scope_set_id": scope_set_id,
|
|
762
|
+
"normalized_source_packet": packet.model_dump(),
|
|
763
|
+
"scope_candidates": candidates.model_dump(),
|
|
764
|
+
"shape_attempt": attempt_number,
|
|
765
|
+
"max_shape_attempts": MAX_SCOPE_SHAPING_ATTEMPTS,
|
|
766
|
+
"previous_scope_outline": prior_outline.model_dump() if prior_outline is not None else None,
|
|
767
|
+
"validation_feedback": shaping_feedback,
|
|
768
|
+
},
|
|
769
|
+
guidance=[
|
|
770
|
+
"Shape candidates into a reviewable scope outline that preserves source-backed boundaries.",
|
|
771
|
+
"Keep assumptions, out-of-scope notes, and review notes visible.",
|
|
772
|
+
"Do not invent dependency chains unless the source clearly implies them.",
|
|
773
|
+
"If validation feedback is provided, revise the outline against that feedback while staying grounded strictly in source docs and explicit assumptions.",
|
|
774
|
+
"Do not mask failed validation with a packaging artifact; either produce a gate-passing outline within the allowed attempts or fail clearly.",
|
|
775
|
+
],
|
|
776
|
+
)
|
|
777
|
+
artifact = ScopeOutlineArtifact.model_validate(artifact.model_dump())
|
|
778
|
+
artifact.review_notes.append(f"CLI LLM agent shaped the reviewable scope outline on attempt {attempt_number}.")
|
|
779
|
+
if shaping_feedback:
|
|
780
|
+
artifact.review_notes.append("Validation feedback from the prior attempt was applied during reshaping.")
|
|
781
|
+
|
|
782
|
+
attempt_outline_path = pipeline_root / f"scope_outline_attempt_{attempt_number}.json"
|
|
783
|
+
_write_json(attempt_outline_path, artifact.model_dump())
|
|
784
|
+
_write_json(latest_outline_path, artifact.model_dump())
|
|
785
|
+
report = _coverage_gate(packet, artifact)
|
|
786
|
+
report.gate_metrics["shape_attempt"] = attempt_number
|
|
787
|
+
report.gate_metrics["max_shape_attempts"] = MAX_SCOPE_SHAPING_ATTEMPTS
|
|
788
|
+
attempt_report_path = pipeline_root / f"scope_coverage_report_attempt_{attempt_number}.json"
|
|
789
|
+
_write_json(attempt_report_path, report.model_dump())
|
|
790
|
+
_write_json(latest_report_path, report.model_dump())
|
|
791
|
+
|
|
792
|
+
attempts.append(
|
|
793
|
+
{
|
|
794
|
+
"attempt": attempt_number,
|
|
795
|
+
"scope_outline_ref": str(attempt_outline_path),
|
|
796
|
+
"scope_coverage_report_ref": str(attempt_report_path),
|
|
797
|
+
"coverage_status": report.coverage_status,
|
|
798
|
+
"gate_reasons": list(report.gate_reasons),
|
|
799
|
+
}
|
|
800
|
+
)
|
|
801
|
+
attempts_path = _persist_scope_shaping_attempts(pipeline_root=pipeline_root, attempts=attempts)
|
|
802
|
+
artifact_paths["scope_shaping_attempts"] = str(attempts_path)
|
|
803
|
+
artifact_paths["scope_outline"] = str(latest_outline_path)
|
|
804
|
+
artifact_paths["scope_coverage_report"] = str(latest_report_path)
|
|
805
|
+
task_context.metadata["artifact_paths"] = artifact_paths
|
|
806
|
+
task_context.metadata["scope_outline"] = artifact
|
|
807
|
+
task_context.metadata["scope_coverage_report"] = report
|
|
808
|
+
task_context.metadata["scope_shaping_attempt_count"] = attempt_number
|
|
809
|
+
task_context.metadata["scope_shaping_attempts"] = attempts
|
|
810
|
+
prior_outline = artifact
|
|
811
|
+
|
|
812
|
+
if report.coverage_status in {"pass", "pass_with_assumptions"}:
|
|
813
|
+
store, run_id = _store_run()
|
|
814
|
+
store.add_artifact(run_id=run_id, node_exec_id=node_exec_id, kind="source_scope.scope_outline", uri=str(latest_outline_path), metadata=artifact.model_dump())
|
|
815
|
+
store.add_artifact(run_id=run_id, node_exec_id=node_exec_id, kind="source_scope.scope_coverage_report", uri=str(latest_report_path), metadata=report.model_dump())
|
|
816
|
+
store.mark_node_finished(node_exec_id=node_exec_id, status="succeeded", output={"scope_outline_ref": str(latest_outline_path), "scope_coverage_report_ref": str(latest_report_path), "mode": "agent_loop", "scope_item_count": len(artifact.scope_items), "scope_set_id": artifact.scope_set_id, "attempt_count": attempt_number, "coverage_status": report.coverage_status})
|
|
817
|
+
self.save_output(artifact)
|
|
818
|
+
return task_context
|
|
819
|
+
|
|
820
|
+
shaping_feedback = _scope_validation_feedback(report)
|
|
821
|
+
|
|
822
|
+
raise SourceScopeGenerationFailure(
|
|
823
|
+
stage_name="shape_scope_document",
|
|
824
|
+
reason=(
|
|
825
|
+
f"scope shaping validation failed after {MAX_SCOPE_SHAPING_ATTEMPTS} attempts; "
|
|
826
|
+
f"last coverage_status={report.coverage_status}; "
|
|
827
|
+
f"gate_reasons={' | '.join(report.gate_reasons)}"
|
|
828
|
+
),
|
|
829
|
+
)
|
|
830
|
+
|
|
831
|
+
|
|
832
|
+
class CoverageGateNode(Node):
|
|
833
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
834
|
+
packet = task_context.metadata["normalized_source_packet"]
|
|
835
|
+
outline = task_context.metadata["scope_outline"]
|
|
836
|
+
artifact = _coverage_gate(packet, outline)
|
|
837
|
+
stage_path = Path(task_context.metadata["pipeline_root"]) / "scope_coverage_report.json"
|
|
838
|
+
_write_json(stage_path, artifact.model_dump())
|
|
839
|
+
artifact_paths = dict(task_context.metadata.get("artifact_paths") or {})
|
|
840
|
+
artifact_paths["scope_coverage_report"] = str(stage_path)
|
|
841
|
+
task_context.metadata["artifact_paths"] = artifact_paths
|
|
842
|
+
task_context.metadata["scope_coverage_report"] = artifact
|
|
843
|
+
self.save_output(artifact)
|
|
844
|
+
return task_context
|
|
845
|
+
|
|
846
|
+
|
|
847
|
+
class CoverageDecisionNode(Node):
|
|
848
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
849
|
+
report = task_context.metadata["scope_coverage_report"]
|
|
850
|
+
next_node = "RegisterScopeSetNode" if report.coverage_status in {"pass", "pass_with_assumptions"} else "EscalationPackageNode"
|
|
851
|
+
artifact = CoverageDecisionArtifact(
|
|
852
|
+
decision=report.coverage_status,
|
|
853
|
+
reason="; ".join(report.gate_reasons) or report.coverage_status,
|
|
854
|
+
next_node=next_node,
|
|
855
|
+
)
|
|
856
|
+
stage_path = Path(task_context.metadata["pipeline_root"]) / "coverage_decision.json"
|
|
857
|
+
_write_json(stage_path, artifact.model_dump())
|
|
858
|
+
task_context.metadata["coverage_decision"] = artifact
|
|
859
|
+
self.save_output(artifact)
|
|
860
|
+
return task_context
|
|
861
|
+
|
|
862
|
+
|
|
863
|
+
class _RoutePass(RouterNode):
|
|
864
|
+
def determine_next_node(self, task_context: TaskContext) -> Node | None:
|
|
865
|
+
decision = task_context.metadata["coverage_decision"]
|
|
866
|
+
if decision.decision == "pass":
|
|
867
|
+
return RegisterScopeSetNode(task_context=task_context)
|
|
868
|
+
return None
|
|
869
|
+
|
|
870
|
+
|
|
871
|
+
class _RoutePassWithAssumptions(RouterNode):
|
|
872
|
+
def determine_next_node(self, task_context: TaskContext) -> Node | None:
|
|
873
|
+
decision = task_context.metadata["coverage_decision"]
|
|
874
|
+
if decision.decision == "pass_with_assumptions":
|
|
875
|
+
return RegisterScopeSetNode(task_context=task_context)
|
|
876
|
+
return None
|
|
877
|
+
|
|
878
|
+
|
|
879
|
+
class _RouteRevise(RouterNode):
|
|
880
|
+
def determine_next_node(self, task_context: TaskContext) -> Node | None:
|
|
881
|
+
decision = task_context.metadata["coverage_decision"]
|
|
882
|
+
if decision.decision == "revise":
|
|
883
|
+
return EscalationPackageNode(task_context=task_context)
|
|
884
|
+
return None
|
|
885
|
+
|
|
886
|
+
|
|
887
|
+
class _RouteEscalate(RouterNode):
|
|
888
|
+
def determine_next_node(self, task_context: TaskContext) -> Node | None:
|
|
889
|
+
decision = task_context.metadata["coverage_decision"]
|
|
890
|
+
if decision.decision == "escalate":
|
|
891
|
+
return EscalationPackageNode(task_context=task_context)
|
|
892
|
+
return None
|
|
893
|
+
|
|
894
|
+
|
|
895
|
+
class CoverageDecisionRouter(BaseRouter):
|
|
896
|
+
def __init__(self) -> None:
|
|
897
|
+
self.routes = [_RoutePass(), _RoutePassWithAssumptions(), _RouteRevise(), _RouteEscalate()]
|
|
898
|
+
self.fallback = EscalationPackageNode()
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
class RegisterScopeSetNode(Node):
|
|
902
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
903
|
+
repo_root = Path(task_context.event.repo_root)
|
|
904
|
+
outline = task_context.metadata["scope_outline"]
|
|
905
|
+
report = task_context.metadata["scope_coverage_report"]
|
|
906
|
+
run_id = _store_run()[1]
|
|
907
|
+
registry_root = repo_root / ".devflow" / "projects" / outline.project_id / "scopes"
|
|
908
|
+
registry_root.mkdir(parents=True, exist_ok=True)
|
|
909
|
+
registered: list[RegisteredScopeItemArtifact] = []
|
|
910
|
+
for item in outline.scope_items:
|
|
911
|
+
registry_path = registry_root / f"{item.scope_id}.json"
|
|
912
|
+
persisted = {
|
|
913
|
+
"project_id": outline.project_id,
|
|
914
|
+
"scope_set_id": outline.scope_set_id,
|
|
915
|
+
"scope_id": item.scope_id,
|
|
916
|
+
"status": "ready_for_review",
|
|
917
|
+
"coverage_status": report.coverage_status,
|
|
918
|
+
"scope_item": item.model_dump(),
|
|
919
|
+
}
|
|
920
|
+
_write_json(registry_path, persisted)
|
|
921
|
+
registered.append(
|
|
922
|
+
RegisteredScopeItemArtifact(
|
|
923
|
+
scope_id=item.scope_id,
|
|
924
|
+
registry_ref=str(registry_path),
|
|
925
|
+
status="ready_for_review",
|
|
926
|
+
source_traceability_refs=item.source_support,
|
|
927
|
+
)
|
|
928
|
+
)
|
|
929
|
+
scope_set_registry_root = repo_root / ".devflow" / "projects" / outline.project_id / "scope_sets"
|
|
930
|
+
scope_set_registry_root.mkdir(parents=True, exist_ok=True)
|
|
931
|
+
scope_set_registry_path = scope_set_registry_root / f"{outline.scope_set_id}.json"
|
|
932
|
+
_write_json(
|
|
933
|
+
scope_set_registry_path,
|
|
934
|
+
{
|
|
935
|
+
"project_id": outline.project_id,
|
|
936
|
+
"scope_set_id": outline.scope_set_id,
|
|
937
|
+
"scope_set_title": outline.scope_set_title,
|
|
938
|
+
"status": "ready_for_review",
|
|
939
|
+
"coverage_status": report.coverage_status,
|
|
940
|
+
"scope_item_ids": [item.scope_id for item in outline.scope_items],
|
|
941
|
+
"cross_cutting_constraints": outline.cross_cutting_constraints,
|
|
942
|
+
"review_notes": outline.review_notes,
|
|
943
|
+
"approval_required_before_scope_to_idea": True,
|
|
944
|
+
},
|
|
945
|
+
)
|
|
946
|
+
artifact = ScopeRegistryRecordArtifact(
|
|
947
|
+
project_id=outline.project_id,
|
|
948
|
+
scope_set_id=outline.scope_set_id,
|
|
949
|
+
registered_scope_items=registered,
|
|
950
|
+
coverage_status=report.coverage_status,
|
|
951
|
+
run_id=run_id,
|
|
952
|
+
registration_timestamp=datetime.now(UTC).isoformat(),
|
|
953
|
+
scope_set_registry_ref=str(scope_set_registry_path),
|
|
954
|
+
)
|
|
955
|
+
stage_path = Path(task_context.metadata["pipeline_root"]) / "scope_registry_record.json"
|
|
956
|
+
_write_json(stage_path, artifact.model_dump())
|
|
957
|
+
task_context.metadata["scope_registry_record"] = artifact
|
|
958
|
+
self.save_output(artifact)
|
|
959
|
+
return task_context
|
|
960
|
+
|
|
961
|
+
|
|
962
|
+
class ApprovalReadyPackageNode(Node):
|
|
963
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
964
|
+
outline = task_context.metadata["scope_outline"]
|
|
965
|
+
report = task_context.metadata["scope_coverage_report"]
|
|
966
|
+
registry = task_context.metadata["scope_registry_record"]
|
|
967
|
+
artifact = ApprovalReadyPackageArtifact(
|
|
968
|
+
project_id=outline.project_id,
|
|
969
|
+
scope_set_id=outline.scope_set_id,
|
|
970
|
+
scope_summary=outline.project_summary,
|
|
971
|
+
scope_items_for_review=[
|
|
972
|
+
{
|
|
973
|
+
"scope_id": item.scope_id,
|
|
974
|
+
"title": item.title,
|
|
975
|
+
"description": item.description,
|
|
976
|
+
"assumptions": item.assumptions,
|
|
977
|
+
"source_support": item.source_support,
|
|
978
|
+
}
|
|
979
|
+
for item in outline.scope_items
|
|
980
|
+
],
|
|
981
|
+
key_assumptions_to_confirm=[entry.assumed_answer for entry in outline.assumptions_ledger],
|
|
982
|
+
known_open_questions=[entry.question for entry in outline.assumptions_ledger],
|
|
983
|
+
coverage_status=report.coverage_status,
|
|
984
|
+
recommended_next_action="Review and approve this registered draft scope set. Do not treat it as idea-sufficiency output until humans approve the scope boundary.",
|
|
985
|
+
downstream_ready_if_approved="Once approved, these registered scope artifacts become the canonical client-affirmed scope boundary for downstream scope->idea enrichment, not direct story decomposition.",
|
|
986
|
+
scope_registry_record_ref=registry.scope_set_registry_ref,
|
|
987
|
+
)
|
|
988
|
+
stage_path = Path(task_context.metadata["pipeline_root"]) / "approval_package.json"
|
|
989
|
+
_write_json(stage_path, artifact.model_dump())
|
|
990
|
+
summary = SourceScopeDagSummary(
|
|
991
|
+
exit_code=0,
|
|
992
|
+
run_id=_store_run()[1],
|
|
993
|
+
pipeline_dir=str(task_context.metadata["pipeline_root"]),
|
|
994
|
+
message="source->scope run complete",
|
|
995
|
+
outcome={
|
|
996
|
+
"project_id": outline.project_id,
|
|
997
|
+
"scope_set_id": outline.scope_set_id,
|
|
998
|
+
"coverage_status": report.coverage_status,
|
|
999
|
+
"registered_scope_count": len(registry.registered_scope_items),
|
|
1000
|
+
"assumption_count": len(outline.assumptions_ledger),
|
|
1001
|
+
"scope_shaping_attempt_count": int(task_context.metadata.get("scope_shaping_attempt_count") or 1),
|
|
1002
|
+
"resolution_status": "ready_for_review",
|
|
1003
|
+
},
|
|
1004
|
+
)
|
|
1005
|
+
_write_json(Path(task_context.metadata["pipeline_root"]) / "summary.json", summary.model_dump())
|
|
1006
|
+
task_context.metadata["outcome"] = dict(summary.outcome)
|
|
1007
|
+
task_context.metadata["message"] = json.dumps({**summary.outcome, "run_id": summary.run_id, "pipeline_dir": summary.pipeline_dir}, sort_keys=True) + "\n"
|
|
1008
|
+
task_context.metadata["exit_code"] = 0
|
|
1009
|
+
self.save_output(artifact)
|
|
1010
|
+
return task_context
|
|
1011
|
+
|
|
1012
|
+
|
|
1013
|
+
class EscalationPackageNode(Node):
|
|
1014
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
1015
|
+
event = task_context.event
|
|
1016
|
+
report = task_context.metadata["scope_coverage_report"]
|
|
1017
|
+
outline = task_context.metadata["scope_outline"]
|
|
1018
|
+
packet = task_context.metadata["normalized_source_packet"]
|
|
1019
|
+
artifact = ScopeRevisionPackageArtifact(
|
|
1020
|
+
project_id=event.project_id,
|
|
1021
|
+
scope_set_id=outline.scope_set_id,
|
|
1022
|
+
proposed_status="needs_revision" if report.coverage_status == "revise" else "human_review_required",
|
|
1023
|
+
major_gaps=report.uncovered_requirements,
|
|
1024
|
+
unsupported_items=report.unsupported_inferred_scope_items,
|
|
1025
|
+
recommended_revision_actions=report.review_recommendations or ["Tighten scope coverage and ensure each major source-backed requirement is represented."],
|
|
1026
|
+
manual_review_questions=[item.question for item in packet.open_questions_with_assumptions],
|
|
1027
|
+
coverage_status=report.coverage_status,
|
|
1028
|
+
)
|
|
1029
|
+
stage_path = Path(task_context.metadata["pipeline_root"]) / "scope_revision_package.json"
|
|
1030
|
+
_write_json(stage_path, artifact.model_dump())
|
|
1031
|
+
resolution_status = "human_review_required" if report.coverage_status == "escalate" else "needs_revision"
|
|
1032
|
+
summary = SourceScopeDagSummary(
|
|
1033
|
+
exit_code=2,
|
|
1034
|
+
run_id=_store_run()[1],
|
|
1035
|
+
pipeline_dir=str(task_context.metadata["pipeline_root"]),
|
|
1036
|
+
message="source->scope run complete with revision/escalation package",
|
|
1037
|
+
outcome={
|
|
1038
|
+
"project_id": event.project_id,
|
|
1039
|
+
"scope_set_id": outline.scope_set_id,
|
|
1040
|
+
"coverage_status": report.coverage_status,
|
|
1041
|
+
"registered_scope_count": 0,
|
|
1042
|
+
"assumption_count": len(packet.open_questions_with_assumptions),
|
|
1043
|
+
"resolution_status": resolution_status,
|
|
1044
|
+
},
|
|
1045
|
+
)
|
|
1046
|
+
_write_json(Path(task_context.metadata["pipeline_root"]) / "summary.json", summary.model_dump())
|
|
1047
|
+
task_context.metadata["outcome"] = dict(summary.outcome)
|
|
1048
|
+
task_context.metadata["message"] = json.dumps({**summary.outcome, "run_id": summary.run_id, "pipeline_dir": summary.pipeline_dir}, sort_keys=True) + "\n"
|
|
1049
|
+
task_context.metadata["exit_code"] = int(summary.exit_code)
|
|
1050
|
+
self.save_output(artifact)
|
|
1051
|
+
return task_context
|
|
1052
|
+
|
|
1053
|
+
|
|
1054
|
+
SourceDocsToScopesDagResult = SourceToScopeDagResult
|
|
1055
|
+
|
|
1056
|
+
|
|
1057
|
+
class SourceToScopeWorkflow(Workflow):
|
|
1058
|
+
workflow_schema = WorkflowSchema(
|
|
1059
|
+
description="Source docs -> scopes DAG (inventory -> normalize -> extract -> shape+validate loop -> register -> approval package)",
|
|
1060
|
+
event_schema=SourceToScopeDagEvent,
|
|
1061
|
+
start=InventorySourcesNode,
|
|
1062
|
+
nodes=[
|
|
1063
|
+
NodeConfig(node=InventorySourcesNode, connections=[NormalizeSourcePacketNode]),
|
|
1064
|
+
NodeConfig(node=NormalizeSourcePacketNode, connections=[ExtractScopeCandidatesNode]),
|
|
1065
|
+
NodeConfig(node=ExtractScopeCandidatesNode, connections=[ShapeScopeDocumentNode]),
|
|
1066
|
+
NodeConfig(node=ShapeScopeDocumentNode, connections=[RegisterScopeSetNode]),
|
|
1067
|
+
NodeConfig(node=RegisterScopeSetNode, connections=[ApprovalReadyPackageNode]),
|
|
1068
|
+
NodeConfig(node=ApprovalReadyPackageNode, connections=[]),
|
|
1069
|
+
],
|
|
1070
|
+
)
|
|
1071
|
+
|
|
1072
|
+
|
|
1073
|
+
def build_pipeline_key(*, repo_root: Path, project_id: str, source_intake_id: str, source_refs: list[dict[str, Any]]) -> str:
|
|
1074
|
+
return _stable_id(
|
|
1075
|
+
"run_",
|
|
1076
|
+
{
|
|
1077
|
+
"repo_root": str(repo_root),
|
|
1078
|
+
"project_id": project_id,
|
|
1079
|
+
"source_intake_id": source_intake_id,
|
|
1080
|
+
"source_refs": source_refs,
|
|
1081
|
+
},
|
|
1082
|
+
)
|
|
1083
|
+
|
|
1084
|
+
|
|
1085
|
+
def run_source_docs_to_scopes_dag(
|
|
1086
|
+
*,
|
|
1087
|
+
repo_root: Path,
|
|
1088
|
+
store: ExecutionStore,
|
|
1089
|
+
project_id: str,
|
|
1090
|
+
source_intake_id: str | None = None,
|
|
1091
|
+
source_refs: list[dict[str, Any]] | None = None,
|
|
1092
|
+
source_packet_id: str | None = None,
|
|
1093
|
+
source_texts: list[dict[str, Any]] | None = None,
|
|
1094
|
+
requested_by: str = "marcus",
|
|
1095
|
+
mode: str = "draft_scope_generation",
|
|
1096
|
+
) -> SourceToScopeDagResult:
|
|
1097
|
+
return run_source_to_scope_dag(
|
|
1098
|
+
repo_root=repo_root,
|
|
1099
|
+
store=store,
|
|
1100
|
+
project_id=project_id,
|
|
1101
|
+
source_intake_id=source_intake_id,
|
|
1102
|
+
source_refs=source_refs,
|
|
1103
|
+
source_packet_id=source_packet_id,
|
|
1104
|
+
source_texts=source_texts,
|
|
1105
|
+
requested_by=requested_by,
|
|
1106
|
+
mode=mode,
|
|
1107
|
+
)
|
|
1108
|
+
|
|
1109
|
+
|
|
1110
|
+
def run_source_to_scope_resume(
|
|
1111
|
+
*,
|
|
1112
|
+
repo_root: Path,
|
|
1113
|
+
store: ExecutionStore,
|
|
1114
|
+
project_id: str,
|
|
1115
|
+
source_intake_id: str,
|
|
1116
|
+
source_refs: list[dict[str, Any]],
|
|
1117
|
+
resume_from: str,
|
|
1118
|
+
) -> SourceToScopeDagResult:
|
|
1119
|
+
normalized_refs = [SourceRef.model_validate(item) for item in source_refs]
|
|
1120
|
+
pipeline_key = build_pipeline_key(
|
|
1121
|
+
repo_root=repo_root,
|
|
1122
|
+
project_id=project_id,
|
|
1123
|
+
source_intake_id=source_intake_id,
|
|
1124
|
+
source_refs=[item.model_dump() for item in normalized_refs],
|
|
1125
|
+
)
|
|
1126
|
+
scope_seed = _stable_id("scope_seed_", {"project_id": project_id, "source_intake_id": source_intake_id, "source_refs": [item.model_dump() for item in normalized_refs]})
|
|
1127
|
+
pipeline_dir = _pipeline_root(repo_root, scope_id=scope_seed, pipeline_key=pipeline_key)
|
|
1128
|
+
if not pipeline_dir.exists():
|
|
1129
|
+
raise ValueError(f"pipeline_dir not found for resume: {pipeline_dir}")
|
|
1130
|
+
|
|
1131
|
+
run_id = store.create_run(
|
|
1132
|
+
dag_id=f"{DAG_ID}_resume",
|
|
1133
|
+
dag_version="v1_resume",
|
|
1134
|
+
root_correlation_id=f"corr_{pipeline_key}_resume_{resume_from}",
|
|
1135
|
+
config={
|
|
1136
|
+
"project_id": project_id,
|
|
1137
|
+
"source_intake_id": source_intake_id,
|
|
1138
|
+
"pipeline_key": pipeline_key,
|
|
1139
|
+
"resume_from": resume_from,
|
|
1140
|
+
},
|
|
1141
|
+
)
|
|
1142
|
+
store.mark_run_started(run_id=run_id)
|
|
1143
|
+
global _CURRENT_STORE, _CURRENT_RUN_ID
|
|
1144
|
+
_CURRENT_STORE = store
|
|
1145
|
+
_CURRENT_RUN_ID = run_id
|
|
1146
|
+
try:
|
|
1147
|
+
event = SourceToScopeDagEvent(
|
|
1148
|
+
repo_root=str(repo_root),
|
|
1149
|
+
project_id=project_id,
|
|
1150
|
+
source_intake_id=source_intake_id,
|
|
1151
|
+
source_refs=normalized_refs,
|
|
1152
|
+
requested_by="resume",
|
|
1153
|
+
mode="draft_scope_generation",
|
|
1154
|
+
pipeline_key=pipeline_key,
|
|
1155
|
+
)
|
|
1156
|
+
ctx = TaskContext(event=event)
|
|
1157
|
+
ctx.metadata["pipeline_root"] = str(pipeline_dir)
|
|
1158
|
+
ctx.metadata["scope_seed_id"] = scope_seed
|
|
1159
|
+
ctx.metadata["artifact_paths"] = {}
|
|
1160
|
+
|
|
1161
|
+
inventory = _load_json_model(pipeline_dir / "source_inventory.json", SourceInventoryArtifact)
|
|
1162
|
+
ctx.metadata["source_inventory"] = inventory
|
|
1163
|
+
|
|
1164
|
+
if resume_from == "normalize_source_packet":
|
|
1165
|
+
ctx = run_sync(NormalizeSourcePacketNode().process(ctx))
|
|
1166
|
+
ctx.metadata["normalized_source_packet"] = _load_json_model(pipeline_dir / "normalized_source_packet.json", NormalizedSourcePacketArtifact)
|
|
1167
|
+
ctx = run_sync(ExtractScopeCandidatesNode().process(ctx))
|
|
1168
|
+
ctx.metadata["scope_candidates"] = _load_json_model(pipeline_dir / "scope_candidates.json", ScopeCandidatesArtifact)
|
|
1169
|
+
ctx = run_sync(ShapeScopeDocumentNode().process(ctx))
|
|
1170
|
+
elif resume_from == "extract_scope_candidates":
|
|
1171
|
+
ctx.metadata["normalized_source_packet"] = _load_json_model(pipeline_dir / "normalized_source_packet.json", NormalizedSourcePacketArtifact)
|
|
1172
|
+
ctx = run_sync(ExtractScopeCandidatesNode().process(ctx))
|
|
1173
|
+
ctx.metadata["scope_candidates"] = _load_json_model(pipeline_dir / "scope_candidates.json", ScopeCandidatesArtifact)
|
|
1174
|
+
ctx = run_sync(ShapeScopeDocumentNode().process(ctx))
|
|
1175
|
+
elif resume_from == "shape_scope_document":
|
|
1176
|
+
ctx.metadata["normalized_source_packet"] = _load_json_model(pipeline_dir / "normalized_source_packet.json", NormalizedSourcePacketArtifact)
|
|
1177
|
+
ctx.metadata["scope_candidates"] = _load_json_model(pipeline_dir / "scope_candidates.json", ScopeCandidatesArtifact)
|
|
1178
|
+
ctx = run_sync(ShapeScopeDocumentNode().process(ctx))
|
|
1179
|
+
else:
|
|
1180
|
+
raise ValueError("resume_from must be one of normalize_source_packet, extract_scope_candidates, shape_scope_document")
|
|
1181
|
+
|
|
1182
|
+
ctx = run_sync(RegisterScopeSetNode().process(ctx))
|
|
1183
|
+
ctx = run_sync(ApprovalReadyPackageNode().process(ctx))
|
|
1184
|
+
exit_code = int(ctx.metadata.get("exit_code") or 0)
|
|
1185
|
+
store.mark_run_finished(run_id=run_id, status="succeeded" if exit_code == 0 else "failed")
|
|
1186
|
+
return SourceToScopeDagResult(
|
|
1187
|
+
exit_code=exit_code,
|
|
1188
|
+
run_id=run_id,
|
|
1189
|
+
pipeline_dir=Path(str(ctx.metadata.get("pipeline_root") or pipeline_dir)),
|
|
1190
|
+
message=str(ctx.metadata.get("message") or f"Resumed source->scope from {resume_from}"),
|
|
1191
|
+
outcome=dict(ctx.metadata.get("outcome") or {"resume_from": resume_from}),
|
|
1192
|
+
)
|
|
1193
|
+
except SourceScopeGenerationFailure as exc:
|
|
1194
|
+
store.mark_run_finished(run_id=run_id, status="failed")
|
|
1195
|
+
return _generation_failure_result(
|
|
1196
|
+
pipeline_dir=pipeline_dir,
|
|
1197
|
+
run_id=run_id,
|
|
1198
|
+
project_id=project_id,
|
|
1199
|
+
source_intake_id=source_intake_id,
|
|
1200
|
+
stage_name=exc.stage_name,
|
|
1201
|
+
reason=exc.reason,
|
|
1202
|
+
)
|
|
1203
|
+
except Exception:
|
|
1204
|
+
store.mark_run_finished(run_id=run_id, status="failed")
|
|
1205
|
+
raise
|
|
1206
|
+
finally:
|
|
1207
|
+
_CURRENT_STORE = None
|
|
1208
|
+
_CURRENT_RUN_ID = None
|
|
1209
|
+
|
|
1210
|
+
|
|
1211
|
+
def run_source_to_scope_dag(
|
|
1212
|
+
*,
|
|
1213
|
+
repo_root: Path,
|
|
1214
|
+
store: ExecutionStore,
|
|
1215
|
+
project_id: str,
|
|
1216
|
+
source_intake_id: str | None = None,
|
|
1217
|
+
source_refs: list[dict[str, Any]] | None = None,
|
|
1218
|
+
source_packet_id: str | None = None,
|
|
1219
|
+
source_texts: list[dict[str, Any]] | None = None,
|
|
1220
|
+
requested_by: str = "marcus",
|
|
1221
|
+
mode: str = "draft_scope_generation",
|
|
1222
|
+
) -> SourceToScopeDagResult:
|
|
1223
|
+
effective_source_intake_id = source_intake_id or source_packet_id
|
|
1224
|
+
if not effective_source_intake_id:
|
|
1225
|
+
raise ValueError("source_intake_id or source_packet_id is required")
|
|
1226
|
+
effective_refs = source_refs
|
|
1227
|
+
if effective_refs is None and source_texts is not None:
|
|
1228
|
+
effective_refs = [
|
|
1229
|
+
{
|
|
1230
|
+
"type": item.get("source_type") or item.get("type") or "notes",
|
|
1231
|
+
"path": item.get("path") or item.get("location"),
|
|
1232
|
+
"title": item.get("title"),
|
|
1233
|
+
"text": item.get("text"),
|
|
1234
|
+
"metadata": item.get("metadata") or {},
|
|
1235
|
+
}
|
|
1236
|
+
for item in source_texts
|
|
1237
|
+
]
|
|
1238
|
+
if effective_refs is None:
|
|
1239
|
+
raise ValueError("source_refs or source_texts is required")
|
|
1240
|
+
normalized_refs = [SourceRef.model_validate(item) for item in effective_refs]
|
|
1241
|
+
pipeline_key = build_pipeline_key(
|
|
1242
|
+
repo_root=repo_root,
|
|
1243
|
+
project_id=project_id,
|
|
1244
|
+
source_intake_id=effective_source_intake_id,
|
|
1245
|
+
source_refs=[item.model_dump() for item in normalized_refs],
|
|
1246
|
+
)
|
|
1247
|
+
scope_seed = _stable_id("scope_seed_", {"project_id": project_id, "source_intake_id": effective_source_intake_id, "source_refs": [item.model_dump() for item in normalized_refs]})
|
|
1248
|
+
pipeline_dir = _pipeline_root(repo_root, scope_id=scope_seed, pipeline_key=pipeline_key)
|
|
1249
|
+
pipeline_dir.mkdir(parents=True, exist_ok=True)
|
|
1250
|
+
|
|
1251
|
+
run_id = store.create_run(
|
|
1252
|
+
dag_id=DAG_ID,
|
|
1253
|
+
dag_version="v1_scaffold",
|
|
1254
|
+
root_correlation_id=f"corr_{pipeline_key}",
|
|
1255
|
+
config={
|
|
1256
|
+
"project_id": project_id,
|
|
1257
|
+
"source_intake_id": effective_source_intake_id,
|
|
1258
|
+
"pipeline_key": pipeline_key,
|
|
1259
|
+
"requested_by": requested_by,
|
|
1260
|
+
"mode": mode,
|
|
1261
|
+
},
|
|
1262
|
+
)
|
|
1263
|
+
store.mark_run_started(run_id=run_id)
|
|
1264
|
+
|
|
1265
|
+
wf = SourceToScopeWorkflow()
|
|
1266
|
+
global _CURRENT_STORE, _CURRENT_RUN_ID
|
|
1267
|
+
_CURRENT_STORE = store
|
|
1268
|
+
_CURRENT_RUN_ID = run_id
|
|
1269
|
+
try:
|
|
1270
|
+
ctx = wf.run(
|
|
1271
|
+
{
|
|
1272
|
+
"repo_root": str(repo_root),
|
|
1273
|
+
"project_id": project_id,
|
|
1274
|
+
"source_intake_id": effective_source_intake_id,
|
|
1275
|
+
"source_refs": [item.model_dump() for item in normalized_refs],
|
|
1276
|
+
"requested_by": requested_by,
|
|
1277
|
+
"mode": mode,
|
|
1278
|
+
"pipeline_key": pipeline_key,
|
|
1279
|
+
}
|
|
1280
|
+
)
|
|
1281
|
+
except SourceScopeGenerationFailure as exc:
|
|
1282
|
+
store.mark_run_finished(run_id=run_id, status="failed")
|
|
1283
|
+
return _generation_failure_result(
|
|
1284
|
+
pipeline_dir=pipeline_dir,
|
|
1285
|
+
run_id=run_id,
|
|
1286
|
+
project_id=project_id,
|
|
1287
|
+
source_intake_id=effective_source_intake_id,
|
|
1288
|
+
stage_name=exc.stage_name,
|
|
1289
|
+
reason=exc.reason,
|
|
1290
|
+
)
|
|
1291
|
+
finally:
|
|
1292
|
+
_CURRENT_STORE = None
|
|
1293
|
+
_CURRENT_RUN_ID = None
|
|
1294
|
+
|
|
1295
|
+
exit_code = int(ctx.metadata.get("exit_code") or 0)
|
|
1296
|
+
try:
|
|
1297
|
+
if exit_code == 0:
|
|
1298
|
+
_sync_registered_scopes_to_supabase(
|
|
1299
|
+
repo_root=repo_root,
|
|
1300
|
+
project_id=project_id,
|
|
1301
|
+
scope_set_id=str(ctx.metadata.get("outcome", {}).get("scope_set_id") or ""),
|
|
1302
|
+
run_id=run_id,
|
|
1303
|
+
pipeline_dir=Path(str(ctx.metadata.get("pipeline_root") or pipeline_dir)),
|
|
1304
|
+
)
|
|
1305
|
+
except Exception:
|
|
1306
|
+
store.mark_run_finished(run_id=run_id, status="failed")
|
|
1307
|
+
raise
|
|
1308
|
+
store.mark_run_finished(run_id=run_id, status="succeeded" if exit_code == 0 else "failed")
|
|
1309
|
+
return SourceToScopeDagResult(
|
|
1310
|
+
exit_code=exit_code,
|
|
1311
|
+
run_id=run_id,
|
|
1312
|
+
pipeline_dir=Path(str(ctx.metadata.get("pipeline_root") or pipeline_dir)),
|
|
1313
|
+
message=str(ctx.metadata.get("message") or ""),
|
|
1314
|
+
outcome=dict(ctx.metadata.get("outcome") or {}),
|
|
1315
|
+
)
|
|
1316
|
+
|
|
1317
|
+
# Deterministic gating/persistence scaffold for pre-shaped source->scope artifacts.
|
|
1318
|
+
|
|
1319
|
+
SourceDocsToScopesDagResult = SourceToScopeDagResult
|
|
1320
|
+
|
|
1321
|
+
|
|
1322
|
+
class SourceDocsToScopesDagEvent(BaseModel):
|
|
1323
|
+
repo_root: str
|
|
1324
|
+
project_id: str
|
|
1325
|
+
source_packet_path: str | None = None
|
|
1326
|
+
source_packet_inline: dict[str, Any] | None = None
|
|
1327
|
+
scope_outline_path: str | None = None
|
|
1328
|
+
scope_outline_inline: dict[str, Any] | None = None
|
|
1329
|
+
pipeline_key: str
|
|
1330
|
+
|
|
1331
|
+
|
|
1332
|
+
def _load_json_payload(*, inline_payload: dict[str, Any] | None, payload_path: Path | None) -> dict[str, Any]:
|
|
1333
|
+
if inline_payload and payload_path:
|
|
1334
|
+
raise ValueError("Provide exactly one of inline payload or payload path")
|
|
1335
|
+
if payload_path:
|
|
1336
|
+
return json.loads(payload_path.read_text(encoding="utf-8"))
|
|
1337
|
+
if inline_payload:
|
|
1338
|
+
return dict(inline_payload)
|
|
1339
|
+
raise ValueError("Missing required DAG payload")
|
|
1340
|
+
|
|
1341
|
+
|
|
1342
|
+
class LoadCoverageInputsNode(Node):
|
|
1343
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
1344
|
+
event = task_context.event
|
|
1345
|
+
repo_root = Path(event.repo_root)
|
|
1346
|
+
source_packet = NormalizedSourcePacketArtifact.model_validate(
|
|
1347
|
+
_load_json_payload(
|
|
1348
|
+
inline_payload=event.source_packet_inline,
|
|
1349
|
+
payload_path=Path(event.source_packet_path) if event.source_packet_path else None,
|
|
1350
|
+
)
|
|
1351
|
+
)
|
|
1352
|
+
outline = ScopeOutlineArtifact.model_validate(
|
|
1353
|
+
_load_json_payload(
|
|
1354
|
+
inline_payload=event.scope_outline_inline,
|
|
1355
|
+
payload_path=Path(event.scope_outline_path) if event.scope_outline_path else None,
|
|
1356
|
+
)
|
|
1357
|
+
)
|
|
1358
|
+
if source_packet.project_id != event.project_id or outline.project_id != event.project_id:
|
|
1359
|
+
raise ValueError("source->scope deterministic gate received mismatched project_id")
|
|
1360
|
+
pipeline_root = _pipeline_root(repo_root, scope_id=outline.scope_set_id, pipeline_key=event.pipeline_key)
|
|
1361
|
+
_write_json(pipeline_root / "normalized_source_packet.json", source_packet.model_dump())
|
|
1362
|
+
_write_json(pipeline_root / "scope_outline.json", outline.model_dump())
|
|
1363
|
+
task_context.metadata["pipeline_root"] = str(pipeline_root)
|
|
1364
|
+
task_context.metadata["normalized_source_packet"] = source_packet
|
|
1365
|
+
task_context.metadata["scope_outline"] = outline
|
|
1366
|
+
self.save_output(outline)
|
|
1367
|
+
return task_context
|
|
1368
|
+
|
|
1369
|
+
|
|
1370
|
+
class DeterministicEscalationPackageNode(Node):
|
|
1371
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
1372
|
+
event = task_context.event
|
|
1373
|
+
report = task_context.metadata["scope_coverage_report"]
|
|
1374
|
+
outline = task_context.metadata["scope_outline"]
|
|
1375
|
+
packet = task_context.metadata["normalized_source_packet"]
|
|
1376
|
+
artifact = ScopeRevisionPackageArtifact(
|
|
1377
|
+
project_id=event.project_id,
|
|
1378
|
+
scope_set_id=outline.scope_set_id,
|
|
1379
|
+
proposed_status="needs_revision" if report.coverage_status == "revise" else "human_review_required",
|
|
1380
|
+
major_gaps=report.uncovered_requirements,
|
|
1381
|
+
unsupported_items=report.unsupported_inferred_scope_items,
|
|
1382
|
+
recommended_revision_actions=report.review_recommendations
|
|
1383
|
+
or ["Tighten scope coverage and ensure each major source-backed requirement is represented."],
|
|
1384
|
+
manual_review_questions=[item.question for item in packet.open_questions_with_assumptions],
|
|
1385
|
+
coverage_status=report.coverage_status,
|
|
1386
|
+
)
|
|
1387
|
+
stage_path = Path(task_context.metadata["pipeline_root"]) / "scope_revision_package.json"
|
|
1388
|
+
_write_json(stage_path, artifact.model_dump())
|
|
1389
|
+
resolution_status = "human_review_required" if report.coverage_status == "escalate" else "needs_revision"
|
|
1390
|
+
summary = SourceScopeDagSummary(
|
|
1391
|
+
exit_code=2,
|
|
1392
|
+
run_id=_store_run()[1],
|
|
1393
|
+
pipeline_dir=str(task_context.metadata["pipeline_root"]),
|
|
1394
|
+
message="source->scope run complete with revision/escalation package",
|
|
1395
|
+
outcome={
|
|
1396
|
+
"project_id": event.project_id,
|
|
1397
|
+
"scope_set_id": outline.scope_set_id,
|
|
1398
|
+
"coverage_status": report.coverage_status,
|
|
1399
|
+
"registered_scope_count": 0,
|
|
1400
|
+
"resolution_status": resolution_status,
|
|
1401
|
+
},
|
|
1402
|
+
)
|
|
1403
|
+
_write_json(Path(task_context.metadata["pipeline_root"]) / "summary.json", summary.model_dump())
|
|
1404
|
+
task_context.metadata["outcome"] = dict(summary.outcome)
|
|
1405
|
+
task_context.metadata["message"] = json.dumps({**summary.outcome, "run_id": summary.run_id, "pipeline_dir": summary.pipeline_dir}, sort_keys=True) + "\n"
|
|
1406
|
+
task_context.metadata["exit_code"] = int(summary.exit_code)
|
|
1407
|
+
self.save_output(artifact)
|
|
1408
|
+
return task_context
|
|
1409
|
+
|
|
1410
|
+
|
|
1411
|
+
class _DeterministicRoutePass(RouterNode):
|
|
1412
|
+
def determine_next_node(self, task_context: TaskContext) -> Node | None:
|
|
1413
|
+
decision = task_context.metadata["coverage_decision"]
|
|
1414
|
+
if decision.decision == "pass":
|
|
1415
|
+
return DeterministicRegisterScopeSetNode(task_context=task_context)
|
|
1416
|
+
return None
|
|
1417
|
+
|
|
1418
|
+
|
|
1419
|
+
class _DeterministicRoutePassWithAssumptions(RouterNode):
|
|
1420
|
+
def determine_next_node(self, task_context: TaskContext) -> Node | None:
|
|
1421
|
+
decision = task_context.metadata["coverage_decision"]
|
|
1422
|
+
if decision.decision == "pass_with_assumptions":
|
|
1423
|
+
return DeterministicRegisterScopeSetNode(task_context=task_context)
|
|
1424
|
+
return None
|
|
1425
|
+
|
|
1426
|
+
|
|
1427
|
+
class _DeterministicRouteRevise(RouterNode):
|
|
1428
|
+
def determine_next_node(self, task_context: TaskContext) -> Node | None:
|
|
1429
|
+
decision = task_context.metadata["coverage_decision"]
|
|
1430
|
+
if decision.decision == "revise":
|
|
1431
|
+
return DeterministicEscalationPackageNode(task_context=task_context)
|
|
1432
|
+
return None
|
|
1433
|
+
|
|
1434
|
+
|
|
1435
|
+
class _DeterministicRouteEscalate(RouterNode):
|
|
1436
|
+
def determine_next_node(self, task_context: TaskContext) -> Node | None:
|
|
1437
|
+
decision = task_context.metadata["coverage_decision"]
|
|
1438
|
+
if decision.decision == "escalate":
|
|
1439
|
+
return DeterministicEscalationPackageNode(task_context=task_context)
|
|
1440
|
+
return None
|
|
1441
|
+
|
|
1442
|
+
|
|
1443
|
+
class DeterministicCoverageDecisionRouter(BaseRouter):
|
|
1444
|
+
def __init__(self) -> None:
|
|
1445
|
+
self.routes = [
|
|
1446
|
+
_DeterministicRoutePass(),
|
|
1447
|
+
_DeterministicRoutePassWithAssumptions(),
|
|
1448
|
+
_DeterministicRouteRevise(),
|
|
1449
|
+
_DeterministicRouteEscalate(),
|
|
1450
|
+
]
|
|
1451
|
+
self.fallback = DeterministicEscalationPackageNode()
|
|
1452
|
+
|
|
1453
|
+
|
|
1454
|
+
class DeterministicRegisterScopeSetNode(RegisterScopeSetNode):
|
|
1455
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
1456
|
+
await super().process(task_context)
|
|
1457
|
+
outline = task_context.metadata["scope_outline"]
|
|
1458
|
+
repo_root = Path(task_context.event.repo_root)
|
|
1459
|
+
registry_root = repo_root / ".devflow" / "projects" / outline.project_id / "scope_sets"
|
|
1460
|
+
registry_root.mkdir(parents=True, exist_ok=True)
|
|
1461
|
+
scope_set_registry_path = registry_root / f"{outline.scope_set_id}.json"
|
|
1462
|
+
record = task_context.metadata["scope_registry_record"]
|
|
1463
|
+
_write_json(
|
|
1464
|
+
scope_set_registry_path,
|
|
1465
|
+
{
|
|
1466
|
+
"project_id": outline.project_id,
|
|
1467
|
+
"scope_set_id": outline.scope_set_id,
|
|
1468
|
+
"scope_set_title": outline.scope_set_title,
|
|
1469
|
+
"status": "draft",
|
|
1470
|
+
"coverage_status": record.coverage_status,
|
|
1471
|
+
"scope_item_ids": [item.scope_id for item in outline.scope_items],
|
|
1472
|
+
"cross_cutting_constraints": outline.cross_cutting_constraints,
|
|
1473
|
+
"review_notes": outline.review_notes,
|
|
1474
|
+
"approval_required_before_scope_to_idea": True,
|
|
1475
|
+
},
|
|
1476
|
+
)
|
|
1477
|
+
record.scope_set_registry_ref = str(scope_set_registry_path)
|
|
1478
|
+
_write_json(Path(task_context.metadata["pipeline_root"]) / "scope_registry_record.json", record.model_dump())
|
|
1479
|
+
task_context.metadata["scope_registry_record"] = record
|
|
1480
|
+
return task_context
|
|
1481
|
+
|
|
1482
|
+
|
|
1483
|
+
class DeterministicApprovalReadyPackageNode(ApprovalReadyPackageNode):
|
|
1484
|
+
async def process(self, task_context: TaskContext) -> TaskContext:
|
|
1485
|
+
await super().process(task_context)
|
|
1486
|
+
outline = task_context.metadata["scope_outline"]
|
|
1487
|
+
report = task_context.metadata["scope_coverage_report"]
|
|
1488
|
+
registry = task_context.metadata["scope_registry_record"]
|
|
1489
|
+
stage_path = Path(task_context.metadata["pipeline_root"]) / "approval_package.json"
|
|
1490
|
+
artifact = ApprovalReadyPackageArtifact.model_validate(json.loads(stage_path.read_text(encoding="utf-8")))
|
|
1491
|
+
artifact.recommended_next_action = (
|
|
1492
|
+
"Review and approve this registered draft scope set. Do not treat it as idea-sufficiency output until humans approve the scope boundary."
|
|
1493
|
+
)
|
|
1494
|
+
artifact.downstream_ready_if_approved = (
|
|
1495
|
+
"Once approved, these registered scope artifacts become the canonical client-affirmed scope boundary for downstream scope->idea enrichment, not direct story decomposition."
|
|
1496
|
+
)
|
|
1497
|
+
artifact.scope_registry_record_ref = registry.scope_set_registry_ref
|
|
1498
|
+
_write_json(stage_path, artifact.model_dump())
|
|
1499
|
+
summary = SourceScopeDagSummary(
|
|
1500
|
+
exit_code=0,
|
|
1501
|
+
run_id=_store_run()[1],
|
|
1502
|
+
pipeline_dir=str(task_context.metadata["pipeline_root"]),
|
|
1503
|
+
message="source->scope run complete",
|
|
1504
|
+
outcome={
|
|
1505
|
+
"project_id": outline.project_id,
|
|
1506
|
+
"scope_set_id": outline.scope_set_id,
|
|
1507
|
+
"coverage_status": report.coverage_status,
|
|
1508
|
+
"registered_scope_count": len(registry.registered_scope_items),
|
|
1509
|
+
"resolution_status": "ready_for_review",
|
|
1510
|
+
},
|
|
1511
|
+
)
|
|
1512
|
+
_write_json(Path(task_context.metadata["pipeline_root"]) / "summary.json", summary.model_dump())
|
|
1513
|
+
task_context.metadata["outcome"] = dict(summary.outcome)
|
|
1514
|
+
task_context.metadata["message"] = json.dumps({**summary.outcome, "run_id": summary.run_id, "pipeline_dir": summary.pipeline_dir}, sort_keys=True) + "\n"
|
|
1515
|
+
task_context.metadata["exit_code"] = int(summary.exit_code)
|
|
1516
|
+
return task_context
|
|
1517
|
+
|
|
1518
|
+
|
|
1519
|
+
class SourceDocsToScopesWorkflow(Workflow):
|
|
1520
|
+
workflow_schema = WorkflowSchema(
|
|
1521
|
+
description="Source docs -> scopes deterministic gate/register DAG (load packet+outline -> gate -> route -> register/package)",
|
|
1522
|
+
event_schema=SourceDocsToScopesDagEvent,
|
|
1523
|
+
start=LoadCoverageInputsNode,
|
|
1524
|
+
nodes=[
|
|
1525
|
+
NodeConfig(node=LoadCoverageInputsNode, connections=[CoverageGateNode]),
|
|
1526
|
+
NodeConfig(node=CoverageGateNode, connections=[CoverageDecisionNode]),
|
|
1527
|
+
NodeConfig(node=CoverageDecisionNode, connections=[DeterministicCoverageDecisionRouter]),
|
|
1528
|
+
NodeConfig(node=DeterministicCoverageDecisionRouter, connections=[DeterministicRegisterScopeSetNode, DeterministicEscalationPackageNode], is_router=True),
|
|
1529
|
+
NodeConfig(node=DeterministicRegisterScopeSetNode, connections=[DeterministicApprovalReadyPackageNode]),
|
|
1530
|
+
NodeConfig(node=DeterministicApprovalReadyPackageNode, connections=[]),
|
|
1531
|
+
NodeConfig(node=DeterministicEscalationPackageNode, connections=[]),
|
|
1532
|
+
],
|
|
1533
|
+
)
|
|
1534
|
+
|
|
1535
|
+
|
|
1536
|
+
def run_source_docs_to_scopes_dag(
|
|
1537
|
+
*,
|
|
1538
|
+
repo_root: Path,
|
|
1539
|
+
store: ExecutionStore,
|
|
1540
|
+
project_id: str,
|
|
1541
|
+
source_packet_inline: dict[str, Any] | None = None,
|
|
1542
|
+
source_packet_path: Path | None = None,
|
|
1543
|
+
scope_outline_inline: dict[str, Any] | None = None,
|
|
1544
|
+
scope_outline_path: Path | None = None,
|
|
1545
|
+
) -> SourceDocsToScopesDagResult:
|
|
1546
|
+
source_packet = _load_json_payload(inline_payload=source_packet_inline, payload_path=source_packet_path)
|
|
1547
|
+
outline = _load_json_payload(inline_payload=scope_outline_inline, payload_path=scope_outline_path)
|
|
1548
|
+
scope_set_id = str(outline.get("scope_set_id") or "scope_set_unknown")
|
|
1549
|
+
pipeline_key = _stable_id(
|
|
1550
|
+
"run_",
|
|
1551
|
+
{"repo_root": str(repo_root), "project_id": project_id, "source_packet": source_packet, "scope_outline": outline},
|
|
1552
|
+
)
|
|
1553
|
+
pipeline_dir = _pipeline_root(repo_root, scope_id=scope_set_id, pipeline_key=pipeline_key)
|
|
1554
|
+
pipeline_dir.mkdir(parents=True, exist_ok=True)
|
|
1555
|
+
|
|
1556
|
+
run_id = store.create_run(
|
|
1557
|
+
dag_id="source_docs_to_scopes_dag",
|
|
1558
|
+
dag_version="v1_gates_scaffold",
|
|
1559
|
+
root_correlation_id=f"corr_{pipeline_key}",
|
|
1560
|
+
config={"project_id": project_id, "scope_set_id": scope_set_id, "pipeline_key": pipeline_key},
|
|
1561
|
+
)
|
|
1562
|
+
store.mark_run_started(run_id=run_id)
|
|
1563
|
+
|
|
1564
|
+
wf = SourceDocsToScopesWorkflow()
|
|
1565
|
+
global _CURRENT_STORE, _CURRENT_RUN_ID
|
|
1566
|
+
_CURRENT_STORE = store
|
|
1567
|
+
_CURRENT_RUN_ID = run_id
|
|
1568
|
+
try:
|
|
1569
|
+
ctx = wf.run(
|
|
1570
|
+
{
|
|
1571
|
+
"repo_root": str(repo_root),
|
|
1572
|
+
"project_id": project_id,
|
|
1573
|
+
"source_packet_inline": source_packet,
|
|
1574
|
+
"source_packet_path": None,
|
|
1575
|
+
"scope_outline_inline": outline,
|
|
1576
|
+
"scope_outline_path": None,
|
|
1577
|
+
"pipeline_key": pipeline_key,
|
|
1578
|
+
}
|
|
1579
|
+
)
|
|
1580
|
+
finally:
|
|
1581
|
+
_CURRENT_STORE = None
|
|
1582
|
+
_CURRENT_RUN_ID = None
|
|
1583
|
+
|
|
1584
|
+
exit_code = int(ctx.metadata.get("exit_code") or 0)
|
|
1585
|
+
try:
|
|
1586
|
+
if exit_code == 0:
|
|
1587
|
+
_sync_registered_scopes_to_supabase(
|
|
1588
|
+
repo_root=repo_root,
|
|
1589
|
+
project_id=project_id,
|
|
1590
|
+
scope_set_id=scope_set_id,
|
|
1591
|
+
run_id=run_id,
|
|
1592
|
+
pipeline_dir=Path(str(ctx.metadata.get("pipeline_root") or pipeline_dir)),
|
|
1593
|
+
)
|
|
1594
|
+
except Exception:
|
|
1595
|
+
store.mark_run_finished(run_id=run_id, status="failed")
|
|
1596
|
+
raise
|
|
1597
|
+
store.mark_run_finished(run_id=run_id, status="succeeded" if exit_code == 0 else "failed")
|
|
1598
|
+
if exit_code == 0:
|
|
1599
|
+
scope_set_registry_path = repo_root / ".devflow" / "projects" / project_id / "scope_sets" / f"{scope_set_id}.json"
|
|
1600
|
+
scope_set_registry = json.loads(scope_set_registry_path.read_text(encoding="utf-8")) if scope_set_registry_path.exists() else {}
|
|
1601
|
+
if not bool(scope_set_registry.get("approval_required_before_scope_to_idea")):
|
|
1602
|
+
registry_path = pipeline_dir / "scope_registry_record.json"
|
|
1603
|
+
if registry_path.exists():
|
|
1604
|
+
registry = json.loads(registry_path.read_text(encoding="utf-8"))
|
|
1605
|
+
for item in registry.get("registered_scope_items") or []:
|
|
1606
|
+
if not isinstance(item, dict):
|
|
1607
|
+
continue
|
|
1608
|
+
scope_id = str(item.get("scope_id") or "").strip()
|
|
1609
|
+
scope_payload_path = str(item.get("registry_ref") or "").strip()
|
|
1610
|
+
if not scope_id or not scope_payload_path:
|
|
1611
|
+
continue
|
|
1612
|
+
store.enqueue_scope_task(
|
|
1613
|
+
project_id=project_id,
|
|
1614
|
+
enqueue_run_id=run_id,
|
|
1615
|
+
scope_set_id=scope_set_id,
|
|
1616
|
+
scope_id=scope_id,
|
|
1617
|
+
title=scope_id,
|
|
1618
|
+
scope_payload_path=scope_payload_path,
|
|
1619
|
+
)
|
|
1620
|
+
return SourceDocsToScopesDagResult(
|
|
1621
|
+
exit_code=exit_code,
|
|
1622
|
+
run_id=run_id,
|
|
1623
|
+
pipeline_dir=Path(str(ctx.metadata.get("pipeline_root") or pipeline_dir)),
|
|
1624
|
+
message=str(ctx.metadata.get("message") or ""),
|
|
1625
|
+
outcome=dict(ctx.metadata.get("outcome") or {}),
|
|
1626
|
+
)
|