@trac3r/oh-my-god 2.2.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +188 -0
- package/INSTALL-VERIFICATION-INDEX.md +51 -0
- package/LICENSE +21 -0
- package/OMG-setup.sh +2549 -0
- package/QUICK-REFERENCE.md +58 -0
- package/README.md +207 -0
- package/agents/__init__.py +1 -0
- package/agents/__pycache__/model_roles.cpython-313.pyc +0 -0
- package/agents/_model_roles.yaml +26 -0
- package/agents/designer.md +67 -0
- package/agents/explore.md +60 -0
- package/agents/model_roles.py +196 -0
- package/agents/omg-api-builder.md +23 -0
- package/agents/omg-architect-mode.md +41 -0
- package/agents/omg-architect.md +13 -0
- package/agents/omg-backend-engineer.md +41 -0
- package/agents/omg-critic.md +16 -0
- package/agents/omg-database-engineer.md +41 -0
- package/agents/omg-escalation-router.md +17 -0
- package/agents/omg-executor.md +12 -0
- package/agents/omg-frontend-designer.md +41 -0
- package/agents/omg-implement-mode.md +49 -0
- package/agents/omg-infra-engineer.md +41 -0
- package/agents/omg-qa-tester.md +16 -0
- package/agents/omg-research-mode.md +41 -0
- package/agents/omg-security-auditor.md +41 -0
- package/agents/omg-testing-engineer.md +41 -0
- package/agents/plan.md +80 -0
- package/agents/quick_task.md +64 -0
- package/agents/reviewer.md +83 -0
- package/agents/task.md +71 -0
- package/bin/omg +41 -0
- package/commands/OMG:ai-commit.md +113 -0
- package/commands/OMG:api-twin.md +22 -0
- package/commands/OMG:arch.md +313 -0
- package/commands/OMG:browser.md +29 -0
- package/commands/OMG:ccg.md +22 -0
- package/commands/OMG:compat.md +57 -0
- package/commands/OMG:cost.md +181 -0
- package/commands/OMG:crazy.md +125 -0
- package/commands/OMG:create-agent.md +183 -0
- package/commands/OMG:deep-plan.md +18 -0
- package/commands/OMG:deps.md +248 -0
- package/commands/OMG:diagnose-plugins.md +33 -0
- package/commands/OMG:doctor.md +37 -0
- package/commands/OMG:domain-init.md +11 -0
- package/commands/OMG:escalate.md +52 -0
- package/commands/OMG:forge.md +103 -0
- package/commands/OMG:health-check.md +48 -0
- package/commands/OMG:init.md +134 -0
- package/commands/OMG:issue.md +56 -0
- package/commands/OMG:mode.md +44 -0
- package/commands/OMG:playwright.md +17 -0
- package/commands/OMG:preflight.md +26 -0
- package/commands/OMG:preset.md +49 -0
- package/commands/OMG:profile-review.md +58 -0
- package/commands/OMG:project-init.md +11 -0
- package/commands/OMG:ralph-start.md +43 -0
- package/commands/OMG:ralph-stop.md +23 -0
- package/commands/OMG:security-check.md +28 -0
- package/commands/OMG:session-branch.md +101 -0
- package/commands/OMG:session-fork.md +57 -0
- package/commands/OMG:session-merge.md +138 -0
- package/commands/OMG:setup.md +82 -0
- package/commands/OMG:ship.md +18 -0
- package/commands/OMG:stats.md +225 -0
- package/commands/OMG:teams.md +54 -0
- package/commands/OMG:theme.md +44 -0
- package/commands/OMG:validate.md +59 -0
- package/commands/__init__.py +1 -0
- package/docs/command-surface.md +55 -0
- package/docs/install/claude-code.md +53 -0
- package/docs/install/codex.md +45 -0
- package/docs/install/gemini.md +43 -0
- package/docs/install/github-action.md +81 -0
- package/docs/install/github-app-required-checks.md +107 -0
- package/docs/install/github-app.md +161 -0
- package/docs/install/kimi.md +43 -0
- package/docs/install/opencode.md +38 -0
- package/docs/proof.md +182 -0
- package/hooks/__init__.py +0 -0
- package/hooks/__pycache__/__init__.cpython-313.pyc +0 -0
- package/hooks/__pycache__/_agent_registry.cpython-313.pyc +0 -0
- package/hooks/__pycache__/_analytics.cpython-313.pyc +0 -0
- package/hooks/__pycache__/_budget.cpython-313.pyc +0 -0
- package/hooks/__pycache__/_common.cpython-313.pyc +0 -0
- package/hooks/__pycache__/_compression_optimizer.cpython-313.pyc +0 -0
- package/hooks/__pycache__/_cost_ledger.cpython-313.pyc +0 -0
- package/hooks/__pycache__/_learnings.cpython-313.pyc +0 -0
- package/hooks/__pycache__/_memory.cpython-313.pyc +0 -0
- package/hooks/__pycache__/_post_write.cpython-313.pyc +0 -0
- package/hooks/__pycache__/_protected_context.cpython-313.pyc +0 -0
- package/hooks/__pycache__/_token_counter.cpython-313.pyc +0 -0
- package/hooks/__pycache__/branch_manager.cpython-313.pyc +0 -0
- package/hooks/__pycache__/budget_governor.cpython-313.pyc +0 -0
- package/hooks/__pycache__/circuit-breaker.cpython-313.pyc +0 -0
- package/hooks/__pycache__/compression_feedback.cpython-313.pyc +0 -0
- package/hooks/__pycache__/config-guard.cpython-313.pyc +0 -0
- package/hooks/__pycache__/context_pressure.cpython-313.pyc +0 -0
- package/hooks/__pycache__/credential_store.cpython-313.pyc +0 -0
- package/hooks/__pycache__/fetch-rate-limits.cpython-313.pyc +0 -0
- package/hooks/__pycache__/firewall.cpython-313.pyc +0 -0
- package/hooks/__pycache__/hashline-formatter-bridge.cpython-313.pyc +0 -0
- package/hooks/__pycache__/hashline-injector.cpython-313.pyc +0 -0
- package/hooks/__pycache__/hashline-validator.cpython-313.pyc +0 -0
- package/hooks/__pycache__/idle-detector.cpython-313.pyc +0 -0
- package/hooks/__pycache__/instructions-loaded.cpython-313.pyc +0 -0
- package/hooks/__pycache__/intentgate-keyword-detector.cpython-313.pyc +0 -0
- package/hooks/__pycache__/magic-keyword-router.cpython-313.pyc +0 -0
- package/hooks/__pycache__/policy_engine.cpython-313.pyc +0 -0
- package/hooks/__pycache__/post-tool-failure.cpython-313.pyc +0 -0
- package/hooks/__pycache__/post-write.cpython-313.pyc +0 -0
- package/hooks/__pycache__/post_write.cpython-313.pyc +0 -0
- package/hooks/__pycache__/pre-compact.cpython-313.pyc +0 -0
- package/hooks/__pycache__/pre-tool-inject.cpython-313.pyc +0 -0
- package/hooks/__pycache__/prompt-enhancer.cpython-313.pyc +0 -0
- package/hooks/__pycache__/quality-runner.cpython-313.pyc +0 -0
- package/hooks/__pycache__/query.cpython-313.pyc +0 -0
- package/hooks/__pycache__/secret-guard.cpython-313.pyc +0 -0
- package/hooks/__pycache__/secret_audit.cpython-313.pyc +0 -0
- package/hooks/__pycache__/security_validators.cpython-313.pyc +0 -0
- package/hooks/__pycache__/session-end-capture.cpython-313.pyc +0 -0
- package/hooks/__pycache__/session-start.cpython-313.pyc +0 -0
- package/hooks/__pycache__/setup_wizard.cpython-313.pyc +0 -0
- package/hooks/__pycache__/shadow_manager.cpython-313.pyc +0 -0
- package/hooks/__pycache__/state_migration.cpython-313.pyc +0 -0
- package/hooks/__pycache__/stop-gate.cpython-313.pyc +0 -0
- package/hooks/__pycache__/stop_dispatcher.cpython-313.pyc +0 -0
- package/hooks/__pycache__/tdd-gate.cpython-313.pyc +0 -0
- package/hooks/__pycache__/terms-guard.cpython-313.pyc +0 -0
- package/hooks/__pycache__/test-validator.cpython-313.pyc +0 -0
- package/hooks/__pycache__/test_generator_hook.cpython-313.pyc +0 -0
- package/hooks/__pycache__/todo-state-tracker.cpython-313.pyc +0 -0
- package/hooks/__pycache__/tool-ledger.cpython-313.pyc +0 -0
- package/hooks/__pycache__/trust_review.cpython-313.pyc +0 -0
- package/hooks/__pycache__/user-prompt-submit.cpython-313.pyc +0 -0
- package/hooks/_agent_registry.py +481 -0
- package/hooks/_analytics.py +291 -0
- package/hooks/_budget.py +31 -0
- package/hooks/_common.py +761 -0
- package/hooks/_compression_optimizer.py +119 -0
- package/hooks/_cost_ledger.py +176 -0
- package/hooks/_learnings.py +126 -0
- package/hooks/_memory.py +103 -0
- package/hooks/_post_write.py +46 -0
- package/hooks/_protected_context.py +150 -0
- package/hooks/_token_counter.py +221 -0
- package/hooks/branch_manager.py +255 -0
- package/hooks/budget_governor.py +326 -0
- package/hooks/circuit-breaker.py +270 -0
- package/hooks/compression_feedback.py +254 -0
- package/hooks/config-guard.py +193 -0
- package/hooks/context_pressure.py +119 -0
- package/hooks/credential_store.py +970 -0
- package/hooks/fetch-rate-limits.py +212 -0
- package/hooks/firewall.py +323 -0
- package/hooks/hashline-formatter-bridge.py +224 -0
- package/hooks/hashline-injector.py +273 -0
- package/hooks/hashline-validator.py +216 -0
- package/hooks/idle-detector.py +97 -0
- package/hooks/instructions-loaded.py +26 -0
- package/hooks/intentgate-keyword-detector.py +200 -0
- package/hooks/magic-keyword-router.py +195 -0
- package/hooks/policy_engine.py +767 -0
- package/hooks/post-tool-failure.py +19 -0
- package/hooks/post-write.py +233 -0
- package/hooks/pre-compact.py +470 -0
- package/hooks/pre-tool-inject.py +98 -0
- package/hooks/prompt-enhancer.py +879 -0
- package/hooks/quality-runner.py +191 -0
- package/hooks/query.py +512 -0
- package/hooks/secret-guard.py +120 -0
- package/hooks/secret_audit.py +144 -0
- package/hooks/security_validators.py +93 -0
- package/hooks/session-end-capture.py +505 -0
- package/hooks/session-start.py +261 -0
- package/hooks/setup_wizard.py +1101 -0
- package/hooks/shadow_manager.py +476 -0
- package/hooks/state_migration.py +228 -0
- package/hooks/stop-gate.py +7 -0
- package/hooks/stop_dispatcher.py +1259 -0
- package/hooks/tdd-gate.py +10 -0
- package/hooks/terms-guard.py +98 -0
- package/hooks/test-validator.py +462 -0
- package/hooks/test_generator_hook.py +123 -0
- package/hooks/todo-state-tracker.py +114 -0
- package/hooks/tool-ledger.py +165 -0
- package/hooks/trust_review.py +662 -0
- package/hooks/user-prompt-submit.py +12 -0
- package/hud/omg-hud.mjs +1571 -0
- package/lab/__init__.py +1 -0
- package/lab/__pycache__/__init__.cpython-313.pyc +0 -0
- package/lab/__pycache__/axolotl_adapter.cpython-313.pyc +0 -0
- package/lab/__pycache__/forge_runner.cpython-313.pyc +0 -0
- package/lab/__pycache__/gazebo_adapter.cpython-313.pyc +0 -0
- package/lab/__pycache__/isaac_gym_adapter.cpython-313.pyc +0 -0
- package/lab/__pycache__/mock_isaac_env.cpython-313.pyc +0 -0
- package/lab/__pycache__/pipeline.cpython-313.pyc +0 -0
- package/lab/__pycache__/policies.cpython-313.pyc +0 -0
- package/lab/__pycache__/pybullet_adapter.cpython-313.pyc +0 -0
- package/lab/axolotl_adapter.py +531 -0
- package/lab/forge_runner.py +103 -0
- package/lab/gazebo_adapter.py +168 -0
- package/lab/isaac_gym_adapter.py +190 -0
- package/lab/mock_isaac_env.py +47 -0
- package/lab/pipeline.py +712 -0
- package/lab/policies.py +52 -0
- package/lab/pybullet_adapter.py +192 -0
- package/package.json +61 -0
- package/plugins/README.md +78 -0
- package/plugins/__init__.py +1 -0
- package/plugins/__pycache__/__init__.cpython-313.pyc +0 -0
- package/plugins/advanced/commands/OMG-code-review.md +114 -0
- package/plugins/advanced/commands/OMG-deep-plan.md +266 -0
- package/plugins/advanced/commands/OMG-handoff.md +115 -0
- package/plugins/advanced/commands/OMG-learn.md +110 -0
- package/plugins/advanced/commands/OMG-maintainer.md +31 -0
- package/plugins/advanced/commands/OMG-ralph-start.md +43 -0
- package/plugins/advanced/commands/OMG-ralph-stop.md +23 -0
- package/plugins/advanced/commands/OMG-security-review.md +16 -0
- package/plugins/advanced/commands/OMG-sequential-thinking.md +20 -0
- package/plugins/advanced/commands/OMG-ship.md +46 -0
- package/plugins/advanced/commands/OMG:code-review.md +114 -0
- package/plugins/advanced/commands/OMG:deep-plan.md +266 -0
- package/plugins/advanced/commands/OMG:handoff.md +115 -0
- package/plugins/advanced/commands/OMG:learn.md +110 -0
- package/plugins/advanced/commands/OMG:maintainer.md +31 -0
- package/plugins/advanced/commands/OMG:ralph-start.md +43 -0
- package/plugins/advanced/commands/OMG:ralph-stop.md +23 -0
- package/plugins/advanced/commands/OMG:security-review.md +16 -0
- package/plugins/advanced/commands/OMG:sequential-thinking.md +20 -0
- package/plugins/advanced/commands/OMG:ship.md +46 -0
- package/plugins/advanced/plugin.json +104 -0
- package/plugins/core/plugin.json +204 -0
- package/plugins/dephealth/__init__.py +0 -0
- package/plugins/dephealth/__pycache__/__init__.cpython-313.pyc +0 -0
- package/plugins/dephealth/__pycache__/cve_scanner.cpython-313.pyc +0 -0
- package/plugins/dephealth/__pycache__/license_checker.cpython-313.pyc +0 -0
- package/plugins/dephealth/__pycache__/manifest_detector.cpython-313.pyc +0 -0
- package/plugins/dephealth/__pycache__/vuln_analyzer.cpython-313.pyc +0 -0
- package/plugins/dephealth/cve_scanner.py +279 -0
- package/plugins/dephealth/license_checker.py +135 -0
- package/plugins/dephealth/manifest_detector.py +423 -0
- package/plugins/dephealth/vuln_analyzer.py +176 -0
- package/plugins/testgen/__init__.py +0 -0
- package/plugins/testgen/__pycache__/__init__.cpython-313.pyc +0 -0
- package/plugins/testgen/__pycache__/codamosa_engine.cpython-313.pyc +0 -0
- package/plugins/testgen/__pycache__/edge_case_synthesizer.cpython-313.pyc +0 -0
- package/plugins/testgen/__pycache__/framework_detector.cpython-313.pyc +0 -0
- package/plugins/testgen/__pycache__/skeleton_generator.cpython-313.pyc +0 -0
- package/plugins/testgen/codamosa_engine.py +402 -0
- package/plugins/testgen/edge_case_synthesizer.py +184 -0
- package/plugins/testgen/framework_detector.py +271 -0
- package/plugins/testgen/skeleton_generator.py +219 -0
- package/plugins/viz/__init__.py +0 -0
- package/plugins/viz/__pycache__/__init__.cpython-313.pyc +0 -0
- package/plugins/viz/__pycache__/ast_parser.cpython-313.pyc +0 -0
- package/plugins/viz/__pycache__/diagram_generator.cpython-313.pyc +0 -0
- package/plugins/viz/__pycache__/graph_builder.cpython-313.pyc +0 -0
- package/plugins/viz/__pycache__/native_parsers.cpython-313.pyc +0 -0
- package/plugins/viz/__pycache__/regex_parser.cpython-313.pyc +0 -0
- package/plugins/viz/ast_parser.py +139 -0
- package/plugins/viz/diagram_generator.py +192 -0
- package/plugins/viz/graph_builder.py +444 -0
- package/plugins/viz/native_parsers.py +259 -0
- package/plugins/viz/regex_parser.py +112 -0
- package/pyproject.toml +143 -0
- package/registry/__init__.py +1 -0
- package/registry/__pycache__/__init__.cpython-313.pyc +0 -0
- package/registry/__pycache__/approval_artifact.cpython-313.pyc +0 -0
- package/registry/__pycache__/verify_artifact.cpython-313.pyc +0 -0
- package/registry/approval_artifact.py +236 -0
- package/registry/bundles/algorithms.yaml +45 -0
- package/registry/bundles/api-twin.yaml +48 -0
- package/registry/bundles/ast-pack.yaml +80 -0
- package/registry/bundles/claim-judge.yaml +49 -0
- package/registry/bundles/control-plane.yaml +192 -0
- package/registry/bundles/data-lineage.yaml +47 -0
- package/registry/bundles/delta-classifier.yaml +47 -0
- package/registry/bundles/eval-gate.yaml +47 -0
- package/registry/bundles/hash-edit.yaml +73 -0
- package/registry/bundles/health.yaml +45 -0
- package/registry/bundles/hook-governor.yaml +101 -0
- package/registry/bundles/incident-replay.yaml +47 -0
- package/registry/bundles/lsp-pack.yaml +80 -0
- package/registry/bundles/mcp-fabric.yaml +53 -0
- package/registry/bundles/plan-council.yaml +56 -0
- package/registry/bundles/preflight.yaml +48 -0
- package/registry/bundles/proof-gate.yaml +49 -0
- package/registry/bundles/remote-supervisor.yaml +49 -0
- package/registry/bundles/robotics.yaml +45 -0
- package/registry/bundles/secure-worktree-pipeline.yaml +69 -0
- package/registry/bundles/security-check.yaml +50 -0
- package/registry/bundles/terminal-lane.yaml +61 -0
- package/registry/bundles/test-intent-lock.yaml +49 -0
- package/registry/bundles/tracebank.yaml +47 -0
- package/registry/bundles/vision.yaml +45 -0
- package/registry/omg-capability.schema.json +378 -0
- package/registry/policy-packs/airgapped.lock.json +11 -0
- package/registry/policy-packs/airgapped.signature.json +10 -0
- package/registry/policy-packs/airgapped.yaml +16 -0
- package/registry/policy-packs/fintech.lock.json +11 -0
- package/registry/policy-packs/fintech.signature.json +10 -0
- package/registry/policy-packs/fintech.yaml +15 -0
- package/registry/policy-packs/locked-prod.lock.json +11 -0
- package/registry/policy-packs/locked-prod.signature.json +10 -0
- package/registry/policy-packs/locked-prod.yaml +18 -0
- package/registry/trusted_signers.json +44 -0
- package/registry/verify_artifact.py +493 -0
- package/runtime/__init__.py +36 -0
- package/runtime/__pycache__/__init__.cpython-313.pyc +0 -0
- package/runtime/__pycache__/adoption.cpython-313.pyc +0 -0
- package/runtime/__pycache__/agent_selector.cpython-313.pyc +0 -0
- package/runtime/__pycache__/api_twin.cpython-313.pyc +0 -0
- package/runtime/__pycache__/architecture_signal.cpython-313.pyc +0 -0
- package/runtime/__pycache__/artifact_parsers.cpython-313.pyc +0 -0
- package/runtime/__pycache__/asset_loader.cpython-313.pyc +0 -0
- package/runtime/__pycache__/background_verification.cpython-313.pyc +0 -0
- package/runtime/__pycache__/budget_envelopes.cpython-313.pyc +0 -0
- package/runtime/__pycache__/business_workflow.cpython-313.pyc +0 -0
- package/runtime/__pycache__/canonical_surface.cpython-313.pyc +0 -0
- package/runtime/__pycache__/canonical_taxonomy.cpython-313.pyc +0 -0
- package/runtime/__pycache__/claim_judge.cpython-313.pyc +0 -0
- package/runtime/__pycache__/cli_provider.cpython-313.pyc +0 -0
- package/runtime/__pycache__/compat.cpython-313.pyc +0 -0
- package/runtime/__pycache__/complexity_scorer.cpython-313.pyc +0 -0
- package/runtime/__pycache__/compliance_governor.cpython-313.pyc +0 -0
- package/runtime/__pycache__/config_transaction.cpython-313.pyc +0 -0
- package/runtime/__pycache__/context_compiler.cpython-313.pyc +0 -0
- package/runtime/__pycache__/context_engine.cpython-313.pyc +0 -0
- package/runtime/__pycache__/context_limits.cpython-313.pyc +0 -0
- package/runtime/__pycache__/contract_compiler.cpython-313.pyc +0 -0
- package/runtime/__pycache__/custom_agent_loader.cpython-313.pyc +0 -0
- package/runtime/__pycache__/data_lineage.cpython-313.pyc +0 -0
- package/runtime/__pycache__/defense_state.cpython-313.pyc +0 -0
- package/runtime/__pycache__/delta_classifier.cpython-313.pyc +0 -0
- package/runtime/__pycache__/dispatcher.cpython-313.pyc +0 -0
- package/runtime/__pycache__/doc_generator.cpython-313.pyc +0 -0
- package/runtime/__pycache__/domain_packs.cpython-313.pyc +0 -0
- package/runtime/__pycache__/ecosystem.cpython-313.pyc +0 -0
- package/runtime/__pycache__/equalizer.cpython-313.pyc +0 -0
- package/runtime/__pycache__/eval_gate.cpython-313.pyc +0 -0
- package/runtime/__pycache__/evidence_narrator.cpython-313.pyc +0 -0
- package/runtime/__pycache__/evidence_query.cpython-313.pyc +0 -0
- package/runtime/__pycache__/evidence_registry.cpython-313.pyc +0 -0
- package/runtime/__pycache__/evidence_requirements.cpython-313.pyc +0 -0
- package/runtime/__pycache__/exec_kernel.cpython-313.pyc +0 -0
- package/runtime/__pycache__/explainer_formatter.cpython-313.pyc +0 -0
- package/runtime/__pycache__/feature_registry.cpython-313.pyc +0 -0
- package/runtime/__pycache__/forge_agents.cpython-313.pyc +0 -0
- package/runtime/__pycache__/forge_contracts.cpython-313.pyc +0 -0
- package/runtime/__pycache__/forge_domains.cpython-313.pyc +0 -0
- package/runtime/__pycache__/forge_run_id.cpython-313.pyc +0 -0
- package/runtime/__pycache__/github_integration.cpython-313.pyc +0 -0
- package/runtime/__pycache__/github_review_bot.cpython-313.pyc +0 -0
- package/runtime/__pycache__/github_review_contract.cpython-313.pyc +0 -0
- package/runtime/__pycache__/github_review_formatter.cpython-313.pyc +0 -0
- package/runtime/__pycache__/guide_assert.cpython-313.pyc +0 -0
- package/runtime/__pycache__/hook_governor.cpython-313.pyc +0 -0
- package/runtime/__pycache__/host_parity.cpython-313.pyc +0 -0
- package/runtime/__pycache__/incident_replay.cpython-313.pyc +0 -0
- package/runtime/__pycache__/install_planner.cpython-313.pyc +0 -0
- package/runtime/__pycache__/interaction_journal.cpython-313.pyc +0 -0
- package/runtime/__pycache__/issue_surface.cpython-313.pyc +0 -0
- package/runtime/__pycache__/legacy_compat.cpython-313.pyc +0 -0
- package/runtime/__pycache__/mcp_config_writers.cpython-313.pyc +0 -0
- package/runtime/__pycache__/mcp_lifecycle.cpython-313.pyc +0 -0
- package/runtime/__pycache__/mcp_memory_server.cpython-313.pyc +0 -0
- package/runtime/__pycache__/memory_store.cpython-313.pyc +0 -0
- package/runtime/__pycache__/merge_writer.cpython-313.pyc +0 -0
- package/runtime/__pycache__/music_omr_testbed.cpython-313.pyc +0 -0
- package/runtime/__pycache__/mutation_gate.cpython-313.pyc +0 -0
- package/runtime/__pycache__/omc_compat.cpython-313.pyc +0 -0
- package/runtime/__pycache__/omg_browser_cli.cpython-313.pyc +0 -0
- package/runtime/__pycache__/omg_mcp_server.cpython-313.pyc +0 -0
- package/runtime/__pycache__/opus_plan.cpython-313.pyc +0 -0
- package/runtime/__pycache__/playwright_adapter.cpython-313.pyc +0 -0
- package/runtime/__pycache__/playwright_pack.cpython-313.pyc +0 -0
- package/runtime/__pycache__/plugin_diagnostics.cpython-313.pyc +0 -0
- package/runtime/__pycache__/plugin_interop.cpython-313.pyc +0 -0
- package/runtime/__pycache__/policy_pack_loader.cpython-313.pyc +0 -0
- package/runtime/__pycache__/preflight.cpython-313.pyc +0 -0
- package/runtime/__pycache__/profile_io.cpython-313.pyc +0 -0
- package/runtime/__pycache__/prompt_compiler.cpython-313.pyc +0 -0
- package/runtime/__pycache__/proof_chain.cpython-313.pyc +0 -0
- package/runtime/__pycache__/proof_gate.cpython-313.pyc +0 -0
- package/runtime/__pycache__/provider_parity_eval.cpython-313.pyc +0 -0
- package/runtime/__pycache__/release_artifact_audit.cpython-313.pyc +0 -0
- package/runtime/__pycache__/release_run_coordinator.cpython-313.pyc +0 -0
- package/runtime/__pycache__/release_surface_compiler.cpython-313.pyc +0 -0
- package/runtime/__pycache__/release_surface_registry.cpython-313.pyc +0 -0
- package/runtime/__pycache__/release_surfaces.cpython-313.pyc +0 -0
- package/runtime/__pycache__/remote_supervisor.cpython-313.pyc +0 -0
- package/runtime/__pycache__/repro_pack.cpython-313.pyc +0 -0
- package/runtime/__pycache__/rollback_manifest.cpython-313.pyc +0 -0
- package/runtime/__pycache__/router_critics.cpython-313.pyc +0 -0
- package/runtime/__pycache__/router_executor.cpython-313.pyc +0 -0
- package/runtime/__pycache__/router_selector.cpython-313.pyc +0 -0
- package/runtime/__pycache__/runtime_contracts.cpython-313.pyc +0 -0
- package/runtime/__pycache__/runtime_profile.cpython-313.pyc +0 -0
- package/runtime/__pycache__/security_check.cpython-313.pyc +0 -0
- package/runtime/__pycache__/session_health.cpython-313.pyc +0 -0
- package/runtime/__pycache__/skill_evolution.cpython-313.pyc +0 -0
- package/runtime/__pycache__/skill_registry.cpython-313.pyc +0 -0
- package/runtime/__pycache__/subagent_dispatcher.cpython-313.pyc +0 -0
- package/runtime/__pycache__/subscription_tiers.cpython-313.pyc +0 -0
- package/runtime/__pycache__/team_router.cpython-313.pyc +0 -0
- package/runtime/__pycache__/test_intent_lock.cpython-313-pytest-9.0.2.pyc +0 -0
- package/runtime/__pycache__/test_intent_lock.cpython-313.pyc +0 -0
- package/runtime/__pycache__/tmux_session_manager.cpython-313.pyc +0 -0
- package/runtime/__pycache__/tool_fabric.cpython-313.pyc +0 -0
- package/runtime/__pycache__/tool_plan_gate.cpython-313.pyc +0 -0
- package/runtime/__pycache__/tool_relevance.cpython-313.pyc +0 -0
- package/runtime/__pycache__/tracebank.cpython-313.pyc +0 -0
- package/runtime/__pycache__/untrusted_content.cpython-313.pyc +0 -0
- package/runtime/__pycache__/validate.cpython-313.pyc +0 -0
- package/runtime/__pycache__/verdict_schema.cpython-313.pyc +0 -0
- package/runtime/__pycache__/verification_controller.cpython-313.pyc +0 -0
- package/runtime/__pycache__/verification_loop.cpython-313.pyc +0 -0
- package/runtime/__pycache__/vision_artifacts.cpython-313.pyc +0 -0
- package/runtime/__pycache__/vision_cache.cpython-313.pyc +0 -0
- package/runtime/__pycache__/vision_jobs.cpython-313.pyc +0 -0
- package/runtime/__pycache__/worker_watchdog.cpython-313.pyc +0 -0
- package/runtime/adapters/__init__.py +13 -0
- package/runtime/adapters/__pycache__/__init__.cpython-313.pyc +0 -0
- package/runtime/adapters/__pycache__/claude.cpython-313.pyc +0 -0
- package/runtime/adapters/__pycache__/gpt.cpython-313.pyc +0 -0
- package/runtime/adapters/__pycache__/local.cpython-313.pyc +0 -0
- package/runtime/adapters/claude.py +63 -0
- package/runtime/adapters/gpt.py +56 -0
- package/runtime/adapters/local.py +56 -0
- package/runtime/adoption.py +280 -0
- package/runtime/api_twin.py +450 -0
- package/runtime/architecture_signal.py +226 -0
- package/runtime/artifact_parsers.py +161 -0
- package/runtime/asset_loader.py +62 -0
- package/runtime/background_verification.py +178 -0
- package/runtime/budget_envelopes.py +398 -0
- package/runtime/business_workflow.py +234 -0
- package/runtime/canonical_surface.py +53 -0
- package/runtime/canonical_taxonomy.py +27 -0
- package/runtime/claim_judge.py +648 -0
- package/runtime/cli_provider.py +105 -0
- package/runtime/compat.py +2222 -0
- package/runtime/complexity_scorer.py +148 -0
- package/runtime/compliance_governor.py +505 -0
- package/runtime/config_transaction.py +304 -0
- package/runtime/context_compiler.py +131 -0
- package/runtime/context_engine.py +708 -0
- package/runtime/context_limits.py +363 -0
- package/runtime/contract_compiler.py +3664 -0
- package/runtime/custom_agent_loader.py +366 -0
- package/runtime/data_lineage.py +244 -0
- package/runtime/defense_state.py +261 -0
- package/runtime/delta_classifier.py +231 -0
- package/runtime/dispatcher.py +47 -0
- package/runtime/doc_generator.py +319 -0
- package/runtime/domain_packs.py +75 -0
- package/runtime/ecosystem.py +371 -0
- package/runtime/equalizer.py +268 -0
- package/runtime/eval_gate.py +96 -0
- package/runtime/evidence_narrator.py +147 -0
- package/runtime/evidence_query.py +303 -0
- package/runtime/evidence_registry.py +16 -0
- package/runtime/evidence_requirements.py +157 -0
- package/runtime/exec_kernel.py +267 -0
- package/runtime/explainer_formatter.py +82 -0
- package/runtime/feature_registry.py +109 -0
- package/runtime/forge_agents.py +915 -0
- package/runtime/forge_contracts.py +519 -0
- package/runtime/forge_domains.py +68 -0
- package/runtime/forge_run_id.py +86 -0
- package/runtime/guide_assert.py +135 -0
- package/runtime/hook_governor.py +156 -0
- package/runtime/host_parity.py +373 -0
- package/runtime/incident_replay.py +310 -0
- package/runtime/install_planner.py +617 -0
- package/runtime/interaction_journal.py +566 -0
- package/runtime/issue_surface.py +472 -0
- package/runtime/legacy_compat.py +7 -0
- package/runtime/mcp_config_writers.py +360 -0
- package/runtime/mcp_lifecycle.py +175 -0
- package/runtime/mcp_memory_server.py +220 -0
- package/runtime/memory_parsers/__init__.py +0 -0
- package/runtime/memory_parsers/__pycache__/__init__.cpython-313.pyc +0 -0
- package/runtime/memory_parsers/__pycache__/chatgpt_parser.cpython-313.pyc +0 -0
- package/runtime/memory_parsers/__pycache__/claude_import.cpython-313.pyc +0 -0
- package/runtime/memory_parsers/__pycache__/export.cpython-313.pyc +0 -0
- package/runtime/memory_parsers/__pycache__/gemini_import.cpython-313.pyc +0 -0
- package/runtime/memory_parsers/__pycache__/kimi_import.cpython-313.pyc +0 -0
- package/runtime/memory_parsers/chatgpt_parser.py +257 -0
- package/runtime/memory_parsers/claude_import.py +107 -0
- package/runtime/memory_parsers/export.py +97 -0
- package/runtime/memory_parsers/gemini_import.py +91 -0
- package/runtime/memory_parsers/kimi_import.py +91 -0
- package/runtime/memory_store.py +1182 -0
- package/runtime/merge_writer.py +445 -0
- package/runtime/music_omr_testbed.py +336 -0
- package/runtime/mutation_gate.py +320 -0
- package/runtime/omc_compat.py +7 -0
- package/runtime/omg_browser_cli.py +95 -0
- package/runtime/omg_compat_contract_snapshot.json +936 -0
- package/runtime/omg_contract_snapshot.json +936 -0
- package/runtime/omg_mcp_server.py +306 -0
- package/runtime/playwright_adapter.py +39 -0
- package/runtime/playwright_pack.py +253 -0
- package/runtime/plugin_diagnostics.py +308 -0
- package/runtime/plugin_interop.py +1060 -0
- package/runtime/policy_pack_loader.py +147 -0
- package/runtime/preflight.py +135 -0
- package/runtime/profile_io.py +328 -0
- package/runtime/proof_chain.py +472 -0
- package/runtime/proof_gate.py +442 -0
- package/runtime/provider_parity_eval.py +109 -0
- package/runtime/providers/__init__.py +0 -0
- package/runtime/providers/__pycache__/__init__.cpython-313.pyc +0 -0
- package/runtime/providers/__pycache__/codex_provider.cpython-313.pyc +0 -0
- package/runtime/providers/__pycache__/gemini_provider.cpython-313.pyc +0 -0
- package/runtime/providers/__pycache__/kimi_provider.cpython-313.pyc +0 -0
- package/runtime/providers/__pycache__/opencode_provider.cpython-313.pyc +0 -0
- package/runtime/providers/codex_provider.py +129 -0
- package/runtime/providers/gemini_provider.py +143 -0
- package/runtime/providers/kimi_provider.py +167 -0
- package/runtime/providers/opencode_provider.py +99 -0
- package/runtime/release_artifact_audit.py +556 -0
- package/runtime/release_run_coordinator.py +574 -0
- package/runtime/release_surface_compiler.py +643 -0
- package/runtime/release_surface_registry.py +283 -0
- package/runtime/release_surfaces.py +320 -0
- package/runtime/remote_supervisor.py +79 -0
- package/runtime/repro_pack.py +398 -0
- package/runtime/rollback_manifest.py +143 -0
- package/runtime/router_critics.py +229 -0
- package/runtime/router_executor.py +142 -0
- package/runtime/router_selector.py +99 -0
- package/runtime/runtime_contracts.py +292 -0
- package/runtime/runtime_profile.py +133 -0
- package/runtime/security_check.py +1094 -0
- package/runtime/session_health.py +546 -0
- package/runtime/skill_evolution.py +221 -0
- package/runtime/skill_registry.py +53 -0
- package/runtime/subagent_dispatcher.py +604 -0
- package/runtime/subscription_tiers.py +258 -0
- package/runtime/team_router.py +1399 -0
- package/runtime/test_intent_lock.py +543 -0
- package/runtime/tmux_session_manager.py +172 -0
- package/runtime/tool_fabric.py +570 -0
- package/runtime/tool_plan_gate.py +460 -0
- package/runtime/tracebank.py +125 -0
- package/runtime/untrusted_content.py +360 -0
- package/runtime/validate.py +293 -0
- package/runtime/verdict_schema.py +198 -0
- package/runtime/verification_controller.py +235 -0
- package/runtime/verification_loop.py +73 -0
- package/runtime/vision_artifacts.py +31 -0
- package/runtime/vision_cache.py +38 -0
- package/runtime/vision_jobs.py +92 -0
- package/runtime/worker_watchdog.py +526 -0
- package/scripts/__pycache__/audit-published-artifact.cpython-313.pyc +0 -0
- package/scripts/__pycache__/check-doc-parity.cpython-313.pyc +0 -0
- package/scripts/__pycache__/check-omg-standalone-clean.cpython-313.pyc +0 -0
- package/scripts/__pycache__/github_review_helpers.cpython-313.pyc +0 -0
- package/scripts/__pycache__/omg.cpython-313.pyc +0 -0
- package/scripts/__pycache__/prepare-release-proof-fixtures.cpython-313.pyc +0 -0
- package/scripts/__pycache__/sync-release-identity.cpython-313.pyc +0 -0
- package/scripts/__pycache__/validate-release-identity.cpython-313.pyc +0 -0
- package/scripts/audit-published-artifact.py +59 -0
- package/scripts/check-omg-compat-contract-snapshot.py +137 -0
- package/scripts/check-omg-contract-snapshot.py +12 -0
- package/scripts/check-omg-public-ready.py +273 -0
- package/scripts/check-omg-standalone-clean.py +133 -0
- package/scripts/emit_host_parity.py +72 -0
- package/scripts/legacy_to_omg_migrate.py +29 -0
- package/scripts/migrate-legacy.py +464 -0
- package/scripts/omc_to_omg_migrate.py +12 -0
- package/scripts/omg.py +2962 -0
- package/scripts/pre-release-check.sh +38 -0
- package/scripts/prepare-release-proof-fixtures.py +602 -0
- package/scripts/print-canonical-version.py +80 -0
- package/scripts/settings-merge.py +289 -0
- package/scripts/sync-release-identity.py +481 -0
- package/scripts/validate-release-identity.py +632 -0
- package/scripts/verify-no-omc.sh +5 -0
- package/scripts/verify-standalone.sh +35 -0
- package/settings.json +751 -0
- package/tools/__init__.py +2 -0
- package/tools/__pycache__/__init__.cpython-313.pyc +0 -0
- package/tools/__pycache__/browser_consent.cpython-313.pyc +0 -0
- package/tools/__pycache__/browser_stealth.cpython-313.pyc +0 -0
- package/tools/__pycache__/browser_tool.cpython-313.pyc +0 -0
- package/tools/__pycache__/changelog_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/commit_splitter.cpython-313.pyc +0 -0
- package/tools/__pycache__/config_discovery.cpython-313.pyc +0 -0
- package/tools/__pycache__/config_merger.cpython-313.pyc +0 -0
- package/tools/__pycache__/dashboard_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/git_inspector.cpython-313.pyc +0 -0
- package/tools/__pycache__/lsp_client.cpython-313.pyc +0 -0
- package/tools/__pycache__/lsp_operations.cpython-313.pyc +0 -0
- package/tools/__pycache__/pr_generator.cpython-313.pyc +0 -0
- package/tools/__pycache__/python_repl.cpython-313.pyc +0 -0
- package/tools/__pycache__/python_sandbox.cpython-313.pyc +0 -0
- package/tools/__pycache__/session_snapshot.cpython-313.pyc +0 -0
- package/tools/__pycache__/ssh_manager.cpython-313.pyc +0 -0
- package/tools/__pycache__/theme_engine.cpython-313.pyc +0 -0
- package/tools/__pycache__/theme_selector.cpython-313.pyc +0 -0
- package/tools/__pycache__/web_search.cpython-313.pyc +0 -0
- package/tools/browser_consent.py +289 -0
- package/tools/browser_stealth.py +481 -0
- package/tools/browser_tool.py +448 -0
- package/tools/changelog_generator.py +347 -0
- package/tools/commit_splitter.py +749 -0
- package/tools/config_discovery.py +151 -0
- package/tools/config_merger.py +449 -0
- package/tools/dashboard_generator.py +300 -0
- package/tools/git_inspector.py +298 -0
- package/tools/lsp_client.py +275 -0
- package/tools/lsp_discovery.py +231 -0
- package/tools/lsp_operations.py +392 -0
- package/tools/pr_generator.py +404 -0
- package/tools/python_repl.py +712 -0
- package/tools/python_sandbox.py +768 -0
- package/tools/search_providers/__init__.py +77 -0
- package/tools/search_providers/__pycache__/__init__.cpython-313.pyc +0 -0
- package/tools/search_providers/__pycache__/brave.cpython-313.pyc +0 -0
- package/tools/search_providers/__pycache__/exa.cpython-313.pyc +0 -0
- package/tools/search_providers/__pycache__/jina.cpython-313.pyc +0 -0
- package/tools/search_providers/__pycache__/perplexity.cpython-313.pyc +0 -0
- package/tools/search_providers/__pycache__/synthetic.cpython-313.pyc +0 -0
- package/tools/search_providers/brave.py +115 -0
- package/tools/search_providers/exa.py +116 -0
- package/tools/search_providers/jina.py +104 -0
- package/tools/search_providers/perplexity.py +139 -0
- package/tools/search_providers/synthetic.py +74 -0
- package/tools/session_snapshot.py +851 -0
- package/tools/ssh_manager.py +912 -0
- package/tools/theme_engine.py +296 -0
- package/tools/theme_selector.py +137 -0
- package/tools/web_search.py +675 -0
|
@@ -0,0 +1,2222 @@
|
|
|
1
|
+
"""OMG standalone legacy-compat dispatcher.
|
|
2
|
+
|
|
3
|
+
Primary namespace is `compat/*` while legacy `omg/*` aliases remain supported.
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
from collections import Counter
|
|
8
|
+
from datetime import datetime, timezone
|
|
9
|
+
from importlib import import_module
|
|
10
|
+
import json
|
|
11
|
+
import os
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
import re
|
|
14
|
+
import sys
|
|
15
|
+
import tempfile
|
|
16
|
+
from typing import Any, Callable, TypedDict
|
|
17
|
+
|
|
18
|
+
from hooks.policy_engine import evaluate_bash_command
|
|
19
|
+
from lab.pipeline import run_pipeline
|
|
20
|
+
from runtime.adoption import CANONICAL_VERSION
|
|
21
|
+
from runtime.canonical_surface import get_canonical_hosts, get_compat_hosts
|
|
22
|
+
from runtime.dispatcher import dispatch_runtime
|
|
23
|
+
from runtime.plugin_diagnostics import run_plugin_diagnostics
|
|
24
|
+
from runtime.security_check import run_security_check
|
|
25
|
+
from runtime.team_router import TeamDispatchRequest, dispatch_team
|
|
26
|
+
|
|
27
|
+
CONTRACT_SNAPSHOT_SCHEMA = "OmgCompatContractSnapshot"
|
|
28
|
+
LEGACY_CONTRACT_SNAPSHOT_SCHEMA = "OmgCompatContractSnapshot"
|
|
29
|
+
CONTRACT_SNAPSHOT_VERSION = CANONICAL_VERSION
|
|
30
|
+
LEGACY_SNAPSHOT_VERSION = "0.9.0"
|
|
31
|
+
GAP_REPORT_SCHEMA = "OmgCompatGapReport"
|
|
32
|
+
LEGACY_GAP_REPORT_SCHEMA = "OmgCompatGapReport"
|
|
33
|
+
RESULT_SCHEMA = "OmgCompatResult"
|
|
34
|
+
LEGACY_RESULT_SCHEMA = "OmgCompatResult"
|
|
35
|
+
DEFAULT_CONTRACT_SNAPSHOT_PATH = "runtime/omg_compat_contract_snapshot.json"
|
|
36
|
+
LEGACY_CONTRACT_SNAPSHOT_PATH = "runtime/omg_contract_snapshot.json"
|
|
37
|
+
DEFAULT_GAP_REPORT_PATH = ".omg/evidence/omg-compat-gap.json"
|
|
38
|
+
LEGACY_GAP_REPORT_PATH = ".omg/evidence/compat-gap.json"
|
|
39
|
+
DEFAULT_AUDIT_LEDGER_PATH = ".omg/state/ledger/omg-compat-audit.jsonl"
|
|
40
|
+
LEGACY_AUDIT_LEDGER_PATH = ".omg/state/ledger/compat-audit.jsonl"
|
|
41
|
+
DEFAULT_EVENT_DISPATCH = "compat_dispatch"
|
|
42
|
+
DEFAULT_EVENT_REQUEST = "compat_dispatch_request"
|
|
43
|
+
LEGACY_EVENT_ALIASES: dict[str, str] = {
|
|
44
|
+
DEFAULT_EVENT_DISPATCH: "omg_dispatch",
|
|
45
|
+
DEFAULT_EVENT_REQUEST: "omg_dispatch_request",
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
MAX_PROBLEM_CHARS = 4000
|
|
49
|
+
MAX_CONTEXT_CHARS = 12000
|
|
50
|
+
MAX_EXPECTED_OUTCOME_CHARS = 3000
|
|
51
|
+
MAX_FILES_PER_REQUEST = 50
|
|
52
|
+
MAX_FILE_PATH_CHARS = 260
|
|
53
|
+
WINDOWS_ABS_PATH_RE = re.compile(r"^[A-Za-z]:[\\/]")
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _now() -> str:
|
|
57
|
+
return datetime.now(timezone.utc).isoformat()
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _normalize_verdict_payload(payload: dict[str, Any]) -> dict[str, Any]:
|
|
61
|
+
module = import_module("runtime.verdict_schema")
|
|
62
|
+
return dict(module.normalize_verdict(payload))
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _project_dir(project_dir: str | None) -> str:
|
|
66
|
+
return project_dir or os.environ.get("CLAUDE_PROJECT_DIR", os.getcwd())
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _is_safe_relative_path(file_path: str) -> bool:
|
|
70
|
+
normalized = file_path.replace("\\", "/")
|
|
71
|
+
if not normalized or normalized.startswith(("/", "~")) or normalized.startswith("//"):
|
|
72
|
+
return False
|
|
73
|
+
if WINDOWS_ABS_PATH_RE.match(file_path):
|
|
74
|
+
return False
|
|
75
|
+
parts = [part for part in normalized.split("/") if part not in {"", "."}]
|
|
76
|
+
return ".." not in parts
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
LEGACY_SKILL_ROUTES: dict[str, str] = {
|
|
80
|
+
"analyze": "maintainer",
|
|
81
|
+
"autopilot": "runtime_ship",
|
|
82
|
+
"beads": "maintainer",
|
|
83
|
+
"build-fix": "runtime_ship",
|
|
84
|
+
"cancel": "cancel",
|
|
85
|
+
"ccg": "ccg",
|
|
86
|
+
"claude-flow": "ccg",
|
|
87
|
+
"claude-mem": "memory",
|
|
88
|
+
"code-review": "review",
|
|
89
|
+
"compound-engineering": "ccg",
|
|
90
|
+
"compounding-engineering": "ccg",
|
|
91
|
+
"configure-notifications": "health",
|
|
92
|
+
"configure-openclaw": "health",
|
|
93
|
+
"deepinit": "init",
|
|
94
|
+
"external-context": "teams",
|
|
95
|
+
"hooks-mastery": "health",
|
|
96
|
+
"hud": "health",
|
|
97
|
+
"learn-about-omg": "learn",
|
|
98
|
+
"learner": "learn",
|
|
99
|
+
"mcp-setup": "health",
|
|
100
|
+
"memsearch": "memory",
|
|
101
|
+
"note": "memory",
|
|
102
|
+
"omg-doctor": "health",
|
|
103
|
+
"omg-help": "help",
|
|
104
|
+
"omg-setup": "init",
|
|
105
|
+
"omg-teams": "teams",
|
|
106
|
+
"pipeline": "pipeline",
|
|
107
|
+
"plan": "plan",
|
|
108
|
+
"planning-with-files": "plan",
|
|
109
|
+
"project-session-manager": "memory",
|
|
110
|
+
"ralph": "runtime_ship",
|
|
111
|
+
"ralph-wiggum": "runtime_ship",
|
|
112
|
+
"ralph-init": "init",
|
|
113
|
+
"ralplan": "plan",
|
|
114
|
+
"release": "runtime_ship",
|
|
115
|
+
"review": "review",
|
|
116
|
+
"sci-omg": "maintainer",
|
|
117
|
+
"security-review": "security_check",
|
|
118
|
+
"skill": "learn",
|
|
119
|
+
"omg-superpowers": "plan",
|
|
120
|
+
"tdd": "plan",
|
|
121
|
+
"team": "teams",
|
|
122
|
+
"trace": "maintainer",
|
|
123
|
+
"ultrapilot": "runtime_ship",
|
|
124
|
+
"ultraqa": "review",
|
|
125
|
+
"ultrawork": "runtime_ship",
|
|
126
|
+
"writer-memory": "memory",
|
|
127
|
+
}
|
|
128
|
+
# Backward-compatible export
|
|
129
|
+
OMG_COMPAT_SKILL_ROUTES = LEGACY_SKILL_ROUTES
|
|
130
|
+
|
|
131
|
+
ROUTE_MATURITY: dict[str, str] = {
|
|
132
|
+
"teams": "native",
|
|
133
|
+
"ccg": "native",
|
|
134
|
+
"runtime_ship": "native",
|
|
135
|
+
"pipeline": "native",
|
|
136
|
+
"memory": "native",
|
|
137
|
+
"init": "native",
|
|
138
|
+
"health": "native",
|
|
139
|
+
"help": "native",
|
|
140
|
+
"review": "native",
|
|
141
|
+
"plan": "native",
|
|
142
|
+
"secure": "native",
|
|
143
|
+
"security_check": "native",
|
|
144
|
+
"learn": "native",
|
|
145
|
+
"maintainer": "native",
|
|
146
|
+
"cancel": "native",
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
SKILL_MATURITY_OVERRIDES: dict[str, str] = {
|
|
150
|
+
# Next-phase native promotion batch
|
|
151
|
+
"autopilot": "native",
|
|
152
|
+
"ralph": "native",
|
|
153
|
+
"ultrapilot": "native",
|
|
154
|
+
"ultrawork": "native",
|
|
155
|
+
"review": "native",
|
|
156
|
+
"code-review": "native",
|
|
157
|
+
"ultraqa": "native",
|
|
158
|
+
"release": "native",
|
|
159
|
+
"tdd": "native",
|
|
160
|
+
"plan": "native",
|
|
161
|
+
"ralplan": "native",
|
|
162
|
+
# Final bridge -> native promotion batch
|
|
163
|
+
"analyze": "native",
|
|
164
|
+
"build-fix": "native",
|
|
165
|
+
"learn-about-omg": "native",
|
|
166
|
+
"learner": "native",
|
|
167
|
+
"note": "native",
|
|
168
|
+
"project-session-manager": "native",
|
|
169
|
+
"sci-omg": "native",
|
|
170
|
+
"skill": "native",
|
|
171
|
+
"trace": "native",
|
|
172
|
+
"writer-memory": "native",
|
|
173
|
+
# Ecosystem imports promoted as first-class native routes
|
|
174
|
+
"omg-superpowers": "native",
|
|
175
|
+
"ralph-wiggum": "native",
|
|
176
|
+
"claude-flow": "native",
|
|
177
|
+
"claude-mem": "native",
|
|
178
|
+
"memsearch": "native",
|
|
179
|
+
"beads": "native",
|
|
180
|
+
"planning-with-files": "native",
|
|
181
|
+
"hooks-mastery": "native",
|
|
182
|
+
"compound-engineering": "native",
|
|
183
|
+
"compounding-engineering": "native",
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
ROUTE_INPUTS: dict[str, dict[str, Any]] = {
|
|
187
|
+
"teams": {"required": ["problem"], "optional": ["context", "files", "expected_outcome"]},
|
|
188
|
+
"ccg": {"required": ["problem"], "optional": ["context", "files", "expected_outcome"]},
|
|
189
|
+
"runtime_ship": {"required": ["problem"], "optional": ["expected_outcome"]},
|
|
190
|
+
"pipeline": {"required": ["problem"], "optional": ["context"]},
|
|
191
|
+
"memory": {"required": ["problem"], "optional": ["context"]},
|
|
192
|
+
"init": {"required": [], "optional": ["problem"]},
|
|
193
|
+
"health": {"required": [], "optional": ["problem"]},
|
|
194
|
+
"help": {"required": [], "optional": []},
|
|
195
|
+
"review": {"required": ["problem"], "optional": ["context", "files"]},
|
|
196
|
+
"plan": {"required": ["problem"], "optional": ["expected_outcome"]},
|
|
197
|
+
"secure": {"required": ["problem"], "optional": []},
|
|
198
|
+
"security_check": {"required": [], "optional": ["problem"]},
|
|
199
|
+
"learn": {"required": ["problem"], "optional": ["context"]},
|
|
200
|
+
"maintainer": {"required": ["problem"], "optional": ["context"]},
|
|
201
|
+
"cancel": {"required": [], "optional": []},
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
ROUTE_OUTPUTS: dict[str, dict[str, Any]] = {
|
|
205
|
+
"teams": {"schema": "TeamDispatchResult"},
|
|
206
|
+
"ccg": {"schema": "TeamDispatchResult"},
|
|
207
|
+
"runtime_ship": {"schema": "RuntimeDispatchResult"},
|
|
208
|
+
"pipeline": {"schema": "LabPipelineResult"},
|
|
209
|
+
"memory": {"schema": "StateMutationResult"},
|
|
210
|
+
"init": {"schema": "BootstrapResult"},
|
|
211
|
+
"health": {"schema": "HealthSnapshot"},
|
|
212
|
+
"help": {"schema": "CompatibilityHelp"},
|
|
213
|
+
"review": {"schema": "TeamDispatchResult"},
|
|
214
|
+
"plan": {"schema": "PlanningArtifacts"},
|
|
215
|
+
"secure": {"schema": "PolicyDecision"},
|
|
216
|
+
"security_check": {"schema": "SecurityCheckResult"},
|
|
217
|
+
"learn": {"schema": "LearningArtifact"},
|
|
218
|
+
"maintainer": {"schema": "MaintainerCompatArtifact"},
|
|
219
|
+
"cancel": {"schema": "CancelResult"},
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
ROUTE_SIDE_EFFECTS: dict[str, list[str]] = {
|
|
223
|
+
"teams": [],
|
|
224
|
+
"ccg": [],
|
|
225
|
+
"runtime_ship": [],
|
|
226
|
+
"pipeline": [],
|
|
227
|
+
"memory": [".omg/state/working-memory.md", ".omg/state/session.json (psm only)"],
|
|
228
|
+
"init": [".omg/state/*", ".omg/idea.yml", ".omg/policy.yaml", ".omg/runtime.yaml"],
|
|
229
|
+
"health": [],
|
|
230
|
+
"help": [],
|
|
231
|
+
"review": [],
|
|
232
|
+
"plan": [".omg/state/_plan.md", ".omg/state/_checklist.md", ".omg/idea.yml"],
|
|
233
|
+
"secure": [],
|
|
234
|
+
"security_check": [],
|
|
235
|
+
"learn": [".omg/state/working-memory.md"],
|
|
236
|
+
"maintainer": [".omg/evidence/compat-*.json"],
|
|
237
|
+
"cancel": [".omg/shadow/active-run (removed when exists)"],
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
SKILL_OUTPUT_SCHEMA_OVERRIDES: dict[str, str] = {
|
|
241
|
+
"review": "ReviewSynthesis",
|
|
242
|
+
"code-review": "ReviewSynthesis",
|
|
243
|
+
"ultraqa": "ReviewSynthesis",
|
|
244
|
+
"analyze": "AnalysisCompatArtifact",
|
|
245
|
+
"trace": "AnalysisCompatArtifact",
|
|
246
|
+
"sci-omg": "AnalysisCompatArtifact",
|
|
247
|
+
"project-session-manager": "SessionState",
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
SKILL_SIDE_EFFECT_OVERRIDES: dict[str, list[str]] = {
|
|
251
|
+
"autopilot": [".omg/state/persistent-mode.json"],
|
|
252
|
+
"ralph": [".omg/state/persistent-mode.json"],
|
|
253
|
+
"ralph-wiggum": [".omg/state/persistent-mode.json"],
|
|
254
|
+
"ultrapilot": [".omg/state/persistent-mode.json"],
|
|
255
|
+
"ultrawork": [".omg/state/persistent-mode.json"],
|
|
256
|
+
"release": [".omg/evidence/release-draft.md"],
|
|
257
|
+
"build-fix": [".omg/state/build-fix.md"],
|
|
258
|
+
"analyze": [".omg/evidence/analysis-analyze.json"],
|
|
259
|
+
"trace": [".omg/evidence/analysis-trace.json"],
|
|
260
|
+
"sci-omg": [".omg/evidence/analysis-sci-omg.json"],
|
|
261
|
+
"project-session-manager": [".omg/state/session.json"],
|
|
262
|
+
"learn-about-omg": [".omg/knowledge/learning/learn-about-omg.md"],
|
|
263
|
+
"learner": [".omg/knowledge/learning/learner.md"],
|
|
264
|
+
"skill": [".omg/knowledge/learning/skill.md"],
|
|
265
|
+
"note": [".omg/knowledge/notes.md"],
|
|
266
|
+
"writer-memory": [".omg/knowledge/writer-memory.md"],
|
|
267
|
+
"omg-superpowers": [".omg/state/_plan.md", ".omg/state/_checklist.md"],
|
|
268
|
+
"planning-with-files": [".omg/state/_plan.md", ".omg/state/_checklist.md"],
|
|
269
|
+
"claude-mem": [".omg/state/working-memory.md"],
|
|
270
|
+
"memsearch": [".omg/state/working-memory.md"],
|
|
271
|
+
"beads": [".omg/evidence/compat-beads.json"],
|
|
272
|
+
"hooks-mastery": [],
|
|
273
|
+
"claude-flow": [],
|
|
274
|
+
"compound-engineering": [],
|
|
275
|
+
"compounding-engineering": [],
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
SKILL_ROUTE_NOTES: dict[str, str] = {
|
|
279
|
+
"omg-teams": "Legacy tmux worker dispatch replaced by internal Team router.",
|
|
280
|
+
"project-session-manager": "Session metadata maintained in .omg/state/session.json.",
|
|
281
|
+
"omg-setup": "Bootstraps OMG standalone state and baseline config files.",
|
|
282
|
+
"omg-doctor": "Health checks run against OMG standalone layout.",
|
|
283
|
+
"pipeline": "Routes to OMG lab policy+pipeline executor.",
|
|
284
|
+
"release": "Routes to runtime ship and emits release draft artifact.",
|
|
285
|
+
"tdd": "Generates plan/checklist scaffolding for red-green-refactor workflow.",
|
|
286
|
+
"security-review": "Deprecated alias to the canonical OMG security-check engine.",
|
|
287
|
+
"build-fix": "Creates targeted fix checklist and routes execution to runtime.",
|
|
288
|
+
"analyze": "Writes structured analysis evidence artifact.",
|
|
289
|
+
"trace": "Writes trace evidence artifact for debugging chain.",
|
|
290
|
+
"learner": "Writes learning note into .omg/knowledge/learning.",
|
|
291
|
+
"writer-memory": "Writes long-form memory artifact for writing workflows.",
|
|
292
|
+
"omg-superpowers": "Imports TDD-first planning discipline into OMG plan route.",
|
|
293
|
+
"ralph-wiggum": "Persistent iteration loop via runtime persistent-mode state.",
|
|
294
|
+
"claude-flow": "Maps to CCG route for multi-agent orchestration semantics.",
|
|
295
|
+
"claude-mem": "Maps to memory route for durable working-context updates.",
|
|
296
|
+
"memsearch": "Maps to memory route for retrieval-oriented context search workflow.",
|
|
297
|
+
"beads": "Maps to maintainer route for context engineering artifacts.",
|
|
298
|
+
"planning-with-files": "Strengthens file-native planning artifacts in .omg/state.",
|
|
299
|
+
"hooks-mastery": "Maps to health route for hook quality and readiness checks.",
|
|
300
|
+
"compound-engineering": "Maps to CCG route for compounding, iterative orchestration.",
|
|
301
|
+
"compounding-engineering": "Alias to compound-engineering orchestration route.",
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def _contract_for(skill: str, route: str) -> dict[str, Any]:
|
|
306
|
+
outputs = dict(ROUTE_OUTPUTS.get(route, {"schema": "Unknown"}))
|
|
307
|
+
if skill in SKILL_OUTPUT_SCHEMA_OVERRIDES:
|
|
308
|
+
outputs["schema"] = SKILL_OUTPUT_SCHEMA_OVERRIDES[skill]
|
|
309
|
+
side_effects = SKILL_SIDE_EFFECT_OVERRIDES.get(skill, ROUTE_SIDE_EFFECTS.get(route, []))
|
|
310
|
+
return {
|
|
311
|
+
"skill": skill,
|
|
312
|
+
"route": route,
|
|
313
|
+
"maturity": SKILL_MATURITY_OVERRIDES.get(skill, ROUTE_MATURITY.get(route, "bridge")),
|
|
314
|
+
"inputs": ROUTE_INPUTS.get(route, {"required": [], "optional": []}),
|
|
315
|
+
"outputs": outputs,
|
|
316
|
+
"side_effects": side_effects,
|
|
317
|
+
"notes": SKILL_ROUTE_NOTES.get(skill, ""),
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
LEGACY_SKILL_CONTRACTS: dict[str, dict[str, Any]] = {
|
|
322
|
+
skill: _contract_for(skill, route) for skill, route in LEGACY_SKILL_ROUTES.items()
|
|
323
|
+
}
|
|
324
|
+
# Backward-compatible export
|
|
325
|
+
OMG_COMPAT_SKILL_CONTRACTS = LEGACY_SKILL_CONTRACTS
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
def list_compat_skills() -> list[str]:
|
|
329
|
+
return sorted(LEGACY_SKILL_ROUTES.keys())
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def list_compat_skill_contracts() -> list[dict[str, Any]]:
|
|
333
|
+
return [LEGACY_SKILL_CONTRACTS[name] for name in list_compat_skills()]
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
def get_compat_skill_contract(skill: str) -> dict[str, Any] | None:
|
|
337
|
+
return LEGACY_SKILL_CONTRACTS.get(skill)
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def build_contract_snapshot_payload(*, include_generated_at: bool = True) -> dict[str, Any]:
|
|
341
|
+
canonical_hosts = get_canonical_hosts()
|
|
342
|
+
compat_hosts = get_compat_hosts()
|
|
343
|
+
payload: dict[str, Any] = {
|
|
344
|
+
"schema": CONTRACT_SNAPSHOT_SCHEMA,
|
|
345
|
+
"contract_version": CONTRACT_SNAPSHOT_VERSION,
|
|
346
|
+
"count": len(LEGACY_SKILL_CONTRACTS),
|
|
347
|
+
"contracts": list_compat_skill_contracts(),
|
|
348
|
+
"host_surfaces": {
|
|
349
|
+
"canonical_parity_hosts": canonical_hosts,
|
|
350
|
+
"release_blocking_hosts": canonical_hosts,
|
|
351
|
+
"compatibility_only_hosts": compat_hosts,
|
|
352
|
+
"release_non_blocking_hosts": compat_hosts,
|
|
353
|
+
},
|
|
354
|
+
}
|
|
355
|
+
if include_generated_at:
|
|
356
|
+
payload["generated_at"] = _now()
|
|
357
|
+
return payload
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def migrate_contract_snapshot_payload(payload: dict[str, Any]) -> tuple[dict[str, Any], list[str]]:
|
|
361
|
+
migrated = dict(payload)
|
|
362
|
+
migrations: list[str] = []
|
|
363
|
+
|
|
364
|
+
if "schema" not in migrated:
|
|
365
|
+
migrated["schema"] = LEGACY_CONTRACT_SNAPSHOT_SCHEMA
|
|
366
|
+
migrations.append("assign-missing-schema:legacy-omg")
|
|
367
|
+
|
|
368
|
+
if migrated.get("schema") == LEGACY_CONTRACT_SNAPSHOT_SCHEMA:
|
|
369
|
+
migrated["schema"] = CONTRACT_SNAPSHOT_SCHEMA
|
|
370
|
+
migrations.append("migrate-schema-legacy-to-omg")
|
|
371
|
+
|
|
372
|
+
if "contract_version" not in migrated:
|
|
373
|
+
migrated["contract_version"] = LEGACY_SNAPSHOT_VERSION
|
|
374
|
+
migrations.append("assign-missing-contract-version:0.9.0")
|
|
375
|
+
|
|
376
|
+
if migrated.get("contract_version") == LEGACY_SNAPSHOT_VERSION:
|
|
377
|
+
# v0.9.0 lacked explicit schema/version constraints.
|
|
378
|
+
migrated["schema"] = CONTRACT_SNAPSHOT_SCHEMA
|
|
379
|
+
migrated["contract_version"] = CONTRACT_SNAPSHOT_VERSION
|
|
380
|
+
migrations.append(f"migrate-0.9.0-to-{CONTRACT_SNAPSHOT_VERSION}")
|
|
381
|
+
|
|
382
|
+
return migrated, migrations
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
def build_compat_gap_report(project_dir: str | None = None) -> dict[str, Any]:
|
|
386
|
+
root = _project_dir(project_dir)
|
|
387
|
+
_ensure_state_layout(root)
|
|
388
|
+
contracts = list_compat_skill_contracts()
|
|
389
|
+
maturity_counts = Counter(c["maturity"] for c in contracts)
|
|
390
|
+
route_counts = Counter(c["route"] for c in contracts)
|
|
391
|
+
bridge_skills = [c["skill"] for c in contracts if c["maturity"] != "native"]
|
|
392
|
+
report = {
|
|
393
|
+
"schema": GAP_REPORT_SCHEMA,
|
|
394
|
+
"generated_at": _now(),
|
|
395
|
+
"total_skills": len(contracts),
|
|
396
|
+
"maturity_counts": dict(sorted(maturity_counts.items())),
|
|
397
|
+
"route_counts": dict(sorted(route_counts.items())),
|
|
398
|
+
"native_skills": sorted(c["skill"] for c in contracts if c["maturity"] == "native"),
|
|
399
|
+
"bridge_skills": sorted(bridge_skills),
|
|
400
|
+
}
|
|
401
|
+
out = os.path.join(root, DEFAULT_GAP_REPORT_PATH)
|
|
402
|
+
with open(out, "w", encoding="utf-8") as f:
|
|
403
|
+
json.dump(report, f, indent=2, ensure_ascii=True)
|
|
404
|
+
legacy_out = os.path.join(root, LEGACY_GAP_REPORT_PATH)
|
|
405
|
+
if legacy_out != out:
|
|
406
|
+
with open(legacy_out, "w", encoding="utf-8") as f:
|
|
407
|
+
json.dump(report, f, indent=2, ensure_ascii=True)
|
|
408
|
+
report["report_path"] = out
|
|
409
|
+
report["legacy_report_path"] = legacy_out
|
|
410
|
+
return report
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
def _result(
|
|
414
|
+
*,
|
|
415
|
+
skill: str,
|
|
416
|
+
route: str,
|
|
417
|
+
status: str = "ok",
|
|
418
|
+
routed_to: str = "",
|
|
419
|
+
findings: list[str] | None = None,
|
|
420
|
+
actions: list[str] | None = None,
|
|
421
|
+
artifacts: list[str] | None = None,
|
|
422
|
+
result: dict[str, Any] | None = None,
|
|
423
|
+
) -> dict[str, Any]:
|
|
424
|
+
return {
|
|
425
|
+
"schema": RESULT_SCHEMA,
|
|
426
|
+
"status": status,
|
|
427
|
+
"skill": skill,
|
|
428
|
+
"route": route,
|
|
429
|
+
"routed_to": routed_to,
|
|
430
|
+
"contract": get_compat_skill_contract(skill) or {},
|
|
431
|
+
"findings": findings or [],
|
|
432
|
+
"actions": actions or [],
|
|
433
|
+
"artifacts": artifacts or [],
|
|
434
|
+
"result": result or {},
|
|
435
|
+
"generated_at": _now(),
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
def _ensure_state_layout(project_dir: str) -> None:
|
|
440
|
+
for rel in ["state", "knowledge", "evidence", "trust", "shadow"]:
|
|
441
|
+
os.makedirs(os.path.join(project_dir, ".omg", rel), exist_ok=True)
|
|
442
|
+
|
|
443
|
+
|
|
444
|
+
def _append_audit_event(project_dir: str, event: dict[str, Any]) -> None:
|
|
445
|
+
_ensure_state_layout(project_dir)
|
|
446
|
+
ledger_dir = os.path.join(project_dir, ".omg", "state", "ledger")
|
|
447
|
+
os.makedirs(ledger_dir, exist_ok=True)
|
|
448
|
+
payload = dict(event)
|
|
449
|
+
payload.setdefault("ts", _now())
|
|
450
|
+
payloads = [payload]
|
|
451
|
+
event_name = str(payload.get("event", ""))
|
|
452
|
+
if event_name in LEGACY_EVENT_ALIASES:
|
|
453
|
+
legacy_payload = dict(payload)
|
|
454
|
+
legacy_payload["event"] = LEGACY_EVENT_ALIASES[event_name]
|
|
455
|
+
legacy_payload["alias_of"] = event_name
|
|
456
|
+
payloads.append(legacy_payload)
|
|
457
|
+
for rel in (DEFAULT_AUDIT_LEDGER_PATH, LEGACY_AUDIT_LEDGER_PATH):
|
|
458
|
+
ledger_path = os.path.join(project_dir, rel)
|
|
459
|
+
os.makedirs(os.path.dirname(ledger_path), exist_ok=True)
|
|
460
|
+
with open(ledger_path, "a", encoding="utf-8") as f:
|
|
461
|
+
for one in payloads:
|
|
462
|
+
f.write(json.dumps(one, ensure_ascii=True) + "\n")
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
def validate_compat_request(
|
|
466
|
+
*,
|
|
467
|
+
skill: str,
|
|
468
|
+
problem: str,
|
|
469
|
+
context: str,
|
|
470
|
+
files: list[str] | None,
|
|
471
|
+
expected_outcome: str,
|
|
472
|
+
) -> tuple[bool, str]:
|
|
473
|
+
if skill not in LEGACY_SKILL_ROUTES:
|
|
474
|
+
return False, f"Unknown skill: {skill}"
|
|
475
|
+
|
|
476
|
+
if len(problem) > MAX_PROBLEM_CHARS:
|
|
477
|
+
return False, f"problem too long (max {MAX_PROBLEM_CHARS})"
|
|
478
|
+
if len(context) > MAX_CONTEXT_CHARS:
|
|
479
|
+
return False, f"context too long (max {MAX_CONTEXT_CHARS})"
|
|
480
|
+
if len(expected_outcome) > MAX_EXPECTED_OUTCOME_CHARS:
|
|
481
|
+
return False, f"expected_outcome too long (max {MAX_EXPECTED_OUTCOME_CHARS})"
|
|
482
|
+
|
|
483
|
+
route = LEGACY_SKILL_ROUTES[skill]
|
|
484
|
+
required = set(ROUTE_INPUTS.get(route, {}).get("required", []))
|
|
485
|
+
if "problem" in required and not problem.strip():
|
|
486
|
+
return False, "problem is required for this skill"
|
|
487
|
+
|
|
488
|
+
file_list = files or []
|
|
489
|
+
if len(file_list) > MAX_FILES_PER_REQUEST:
|
|
490
|
+
return False, f"too many files (max {MAX_FILES_PER_REQUEST})"
|
|
491
|
+
for file_path in file_list:
|
|
492
|
+
if not isinstance(file_path, str) or not file_path:
|
|
493
|
+
return False, "invalid file path: must be non-empty string"
|
|
494
|
+
if "\x00" in file_path:
|
|
495
|
+
return False, "invalid file path: contains null byte"
|
|
496
|
+
if len(file_path) > MAX_FILE_PATH_CHARS:
|
|
497
|
+
return False, f"invalid file path: exceeds {MAX_FILE_PATH_CHARS} chars"
|
|
498
|
+
if file_path != file_path.strip():
|
|
499
|
+
return False, "invalid file path: leading/trailing whitespace is not allowed"
|
|
500
|
+
if not _is_safe_relative_path(file_path):
|
|
501
|
+
return False, "invalid file path: must be a safe relative path"
|
|
502
|
+
|
|
503
|
+
return True, "ok"
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
def _write_if_missing(path: str, content: str) -> None:
|
|
507
|
+
if os.path.exists(path):
|
|
508
|
+
return
|
|
509
|
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
|
510
|
+
with open(path, "w", encoding="utf-8") as f:
|
|
511
|
+
f.write(content)
|
|
512
|
+
|
|
513
|
+
|
|
514
|
+
def _append_memory(project_dir: str, message: str) -> str:
|
|
515
|
+
_ensure_state_layout(project_dir)
|
|
516
|
+
wm_path = os.path.join(project_dir, ".omg", "state", "working-memory.md")
|
|
517
|
+
ts = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%SZ")
|
|
518
|
+
line = f"- [{ts}] {message}\n"
|
|
519
|
+
with open(wm_path, "a", encoding="utf-8") as f:
|
|
520
|
+
f.write(line)
|
|
521
|
+
return wm_path
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
def _update_session_state(project_dir: str, message: str) -> str:
|
|
525
|
+
_ensure_state_layout(project_dir)
|
|
526
|
+
session_path = os.path.join(project_dir, ".omg", "state", "session.json")
|
|
527
|
+
payload: dict[str, Any] = {"last_updated": _now(), "entries": []}
|
|
528
|
+
if os.path.exists(session_path):
|
|
529
|
+
try:
|
|
530
|
+
with open(session_path, "r", encoding="utf-8") as f:
|
|
531
|
+
data = json.load(f)
|
|
532
|
+
if isinstance(data, dict):
|
|
533
|
+
payload = data
|
|
534
|
+
except (json.JSONDecodeError, OSError):
|
|
535
|
+
pass
|
|
536
|
+
payload.setdefault("entries", [])
|
|
537
|
+
if not isinstance(payload["entries"], list):
|
|
538
|
+
payload["entries"] = []
|
|
539
|
+
payload["entries"].append({"ts": _now(), "message": message})
|
|
540
|
+
payload["entries"] = payload["entries"][-100:]
|
|
541
|
+
payload["last_updated"] = _now()
|
|
542
|
+
with open(session_path, "w", encoding="utf-8") as f:
|
|
543
|
+
json.dump(payload, f, indent=2, ensure_ascii=True)
|
|
544
|
+
return session_path
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
def _append_knowledge_note(project_dir: str, rel_path: str, line: str) -> str:
|
|
548
|
+
_ensure_state_layout(project_dir)
|
|
549
|
+
full = os.path.join(project_dir, ".omg", rel_path)
|
|
550
|
+
os.makedirs(os.path.dirname(full), exist_ok=True)
|
|
551
|
+
with open(full, "a", encoding="utf-8") as f:
|
|
552
|
+
f.write(line.rstrip() + "\n")
|
|
553
|
+
return full
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
def _write_learning_artifact(project_dir: str, skill: str, message: str, context: str) -> str:
|
|
557
|
+
path = os.path.join(project_dir, ".omg", "knowledge", "learning", f"{skill}.md")
|
|
558
|
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
|
559
|
+
with open(path, "a", encoding="utf-8") as f:
|
|
560
|
+
f.write(f"## {_now()}\n")
|
|
561
|
+
f.write(f"- skill: {skill}\n")
|
|
562
|
+
f.write(f"- message: {message}\n")
|
|
563
|
+
if context:
|
|
564
|
+
f.write(f"- context: {context}\n")
|
|
565
|
+
f.write("\n")
|
|
566
|
+
return path
|
|
567
|
+
|
|
568
|
+
|
|
569
|
+
def _write_analysis_artifact(project_dir: str, skill: str, message: str, context: str, files: list[str]) -> str:
|
|
570
|
+
_ensure_state_layout(project_dir)
|
|
571
|
+
out = os.path.join(project_dir, ".omg", "evidence", f"analysis-{skill}.json")
|
|
572
|
+
payload = {
|
|
573
|
+
"schema": "AnalysisCompatArtifact",
|
|
574
|
+
"skill": skill,
|
|
575
|
+
"generated_at": _now(),
|
|
576
|
+
"problem": message,
|
|
577
|
+
"context": context,
|
|
578
|
+
"files": files,
|
|
579
|
+
"findings": [
|
|
580
|
+
"Structured analysis generated by OMG compat dispatcher.",
|
|
581
|
+
"Use review/teams routes for deeper remediation proposals.",
|
|
582
|
+
],
|
|
583
|
+
}
|
|
584
|
+
with open(out, "w", encoding="utf-8") as f:
|
|
585
|
+
json.dump(payload, f, indent=2, ensure_ascii=True)
|
|
586
|
+
return out
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
def _write_maintainer_artifact(project_dir: str, skill: str, problem: str) -> str:
|
|
590
|
+
_ensure_state_layout(project_dir)
|
|
591
|
+
out_path = os.path.join(project_dir, ".omg", "evidence", f"compat-{skill}.json")
|
|
592
|
+
payload = {
|
|
593
|
+
"schema": "MaintainerCompatArtifact",
|
|
594
|
+
"skill": skill,
|
|
595
|
+
"generated_at": _now(),
|
|
596
|
+
"summary": problem or f"compat route for {skill}",
|
|
597
|
+
"signals": {
|
|
598
|
+
"triage": "unverified",
|
|
599
|
+
"release_notes": "unverified",
|
|
600
|
+
"review": "unverified",
|
|
601
|
+
},
|
|
602
|
+
}
|
|
603
|
+
with open(out_path, "w", encoding="utf-8") as f:
|
|
604
|
+
json.dump(payload, f, indent=2, ensure_ascii=True)
|
|
605
|
+
return out_path
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
def _init_bootstrap(project_dir: str, reason: str) -> list[str]:
|
|
609
|
+
_ensure_state_layout(project_dir)
|
|
610
|
+
profile_path = os.path.join(project_dir, ".omg", "state", "profile.yaml")
|
|
611
|
+
idea_path = os.path.join(project_dir, ".omg", "idea.yml")
|
|
612
|
+
policy_path = os.path.join(project_dir, ".omg", "policy.yaml")
|
|
613
|
+
runtime_path = os.path.join(project_dir, ".omg", "runtime.yaml")
|
|
614
|
+
plan_path = os.path.join(project_dir, ".omg", "state", "_plan.md")
|
|
615
|
+
checklist_path = os.path.join(project_dir, ".omg", "state", "_checklist.md")
|
|
616
|
+
qg_path = os.path.join(project_dir, ".omg", "state", "quality-gate.json")
|
|
617
|
+
|
|
618
|
+
_write_if_missing(
|
|
619
|
+
profile_path,
|
|
620
|
+
"name: omg-project\n"
|
|
621
|
+
"description: initialized by OMG standalone compat bootstrap\n"
|
|
622
|
+
"language: unknown\n"
|
|
623
|
+
"framework: unknown\n"
|
|
624
|
+
"stack: []\n"
|
|
625
|
+
"conventions: {}\n"
|
|
626
|
+
"ai_behavior: {}\n"
|
|
627
|
+
"preferences:\n"
|
|
628
|
+
" architecture_requests: []\n"
|
|
629
|
+
" constraints: {}\n"
|
|
630
|
+
" routing:\n"
|
|
631
|
+
" prefer_clarification: false\n"
|
|
632
|
+
"user_vector:\n"
|
|
633
|
+
" tags: []\n"
|
|
634
|
+
" summary: \"\"\n"
|
|
635
|
+
" confidence: 0.0\n"
|
|
636
|
+
"profile_provenance:\n"
|
|
637
|
+
" recent_updates: []\n",
|
|
638
|
+
)
|
|
639
|
+
_write_if_missing(
|
|
640
|
+
idea_path,
|
|
641
|
+
"goal: \"\"\n"
|
|
642
|
+
"constraints: []\n"
|
|
643
|
+
"acceptance: []\n"
|
|
644
|
+
"risk:\n"
|
|
645
|
+
" security: []\n"
|
|
646
|
+
" performance: []\n"
|
|
647
|
+
" compatibility: []\n"
|
|
648
|
+
"evidence_required:\n"
|
|
649
|
+
" tests: []\n"
|
|
650
|
+
" security_scans: []\n"
|
|
651
|
+
" reproducibility: []\n"
|
|
652
|
+
" artifacts: []\n",
|
|
653
|
+
)
|
|
654
|
+
_write_if_missing(
|
|
655
|
+
policy_path,
|
|
656
|
+
"mode: warn_and_run\ncritical_block: true\n",
|
|
657
|
+
)
|
|
658
|
+
_write_if_missing(
|
|
659
|
+
runtime_path,
|
|
660
|
+
"default: claude\navailable:\n - claude\n - gpt\n - local\n",
|
|
661
|
+
)
|
|
662
|
+
_write_if_missing(
|
|
663
|
+
plan_path,
|
|
664
|
+
"# Compat Plan\n"
|
|
665
|
+
f"goal: {reason or 'bootstrap'}\n"
|
|
666
|
+
"CHANGE_BUDGET=small\n",
|
|
667
|
+
)
|
|
668
|
+
_write_if_missing(
|
|
669
|
+
checklist_path,
|
|
670
|
+
"- [ ] define goal\n- [ ] run verification\n- [ ] ship with evidence\n",
|
|
671
|
+
)
|
|
672
|
+
if not os.path.exists(qg_path):
|
|
673
|
+
with open(qg_path, "w", encoding="utf-8") as f:
|
|
674
|
+
json.dump({"lint": "pytest -q", "test": "pytest -q"}, f, indent=2, ensure_ascii=True)
|
|
675
|
+
return [
|
|
676
|
+
os.path.relpath(profile_path, project_dir),
|
|
677
|
+
os.path.relpath(idea_path, project_dir),
|
|
678
|
+
os.path.relpath(policy_path, project_dir),
|
|
679
|
+
os.path.relpath(runtime_path, project_dir),
|
|
680
|
+
os.path.relpath(plan_path, project_dir),
|
|
681
|
+
os.path.relpath(checklist_path, project_dir),
|
|
682
|
+
os.path.relpath(qg_path, project_dir),
|
|
683
|
+
]
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
def _health_snapshot(project_dir: str) -> dict[str, Any]:
|
|
687
|
+
p = Path(project_dir)
|
|
688
|
+
omg_root = p / ".omg"
|
|
689
|
+
checks = [
|
|
690
|
+
{"name": "python>=3.8", "ok": sys.version_info >= (3, 8)},
|
|
691
|
+
{"name": ".omg exists", "ok": omg_root.exists()},
|
|
692
|
+
{"name": ".omg/state exists", "ok": (omg_root / "state").exists()},
|
|
693
|
+
{"name": ".omg/idea.yml exists", "ok": (omg_root / "idea.yml").exists()},
|
|
694
|
+
{"name": ".omg/policy.yaml exists", "ok": (omg_root / "policy.yaml").exists()},
|
|
695
|
+
]
|
|
696
|
+
all_ok = all(c["ok"] for c in checks)
|
|
697
|
+
return {
|
|
698
|
+
"project_dir": str(p),
|
|
699
|
+
"status": "pass" if all_ok else "warn",
|
|
700
|
+
"checks": checks,
|
|
701
|
+
"omg_exists": omg_root.exists(),
|
|
702
|
+
"state_exists": (omg_root / "state").exists(),
|
|
703
|
+
"knowledge_exists": (omg_root / "knowledge").exists(),
|
|
704
|
+
"evidence_exists": (omg_root / "evidence").exists(),
|
|
705
|
+
}
|
|
706
|
+
|
|
707
|
+
|
|
708
|
+
def _doctor_check(name: str, *, ok: bool, message: str, required: bool = True) -> dict[str, Any]:
|
|
709
|
+
if ok:
|
|
710
|
+
status = "ok"
|
|
711
|
+
elif required:
|
|
712
|
+
status = "blocker"
|
|
713
|
+
else:
|
|
714
|
+
status = "warning"
|
|
715
|
+
return {"name": name, "status": status, "message": message, "required": required}
|
|
716
|
+
|
|
717
|
+
|
|
718
|
+
def _check_plugin_compat(root_dir: Path) -> dict[str, Any]:
|
|
719
|
+
try:
|
|
720
|
+
result = run_plugin_diagnostics(str(root_dir))
|
|
721
|
+
except Exception as exc:
|
|
722
|
+
return _doctor_check(
|
|
723
|
+
"plugin_compatibility",
|
|
724
|
+
ok=False,
|
|
725
|
+
message=f"plugin diagnostics error: {exc}",
|
|
726
|
+
required=False,
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
status = str(result.get("status", "error"))
|
|
730
|
+
summary = result.get("summary", {})
|
|
731
|
+
summary = summary if isinstance(summary, dict) else {}
|
|
732
|
+
total_records = int(summary.get("total_records", 0))
|
|
733
|
+
total_conflicts = int(summary.get("total_conflicts", 0))
|
|
734
|
+
blockers = int(summary.get("blockers", 0))
|
|
735
|
+
return _doctor_check(
|
|
736
|
+
"plugin_compatibility",
|
|
737
|
+
ok=status in {"ok", "warn"},
|
|
738
|
+
message=(
|
|
739
|
+
f"plugin compatibility: {total_records} records, "
|
|
740
|
+
f"{total_conflicts} conflicts, {blockers} blockers"
|
|
741
|
+
),
|
|
742
|
+
required=False,
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
|
|
746
|
+
_ORPHANED_RUNTIME_MARKER = "omg-runtime"
|
|
747
|
+
|
|
748
|
+
|
|
749
|
+
def _collect_orphaned_runtime_refs(claude_dir: str, *, home_dir: str | None = None) -> list[str]:
|
|
750
|
+
refs: list[str] = []
|
|
751
|
+
managed_runtime_dir = os.path.join(claude_dir, "omg-runtime")
|
|
752
|
+
runtime_absent = not os.path.isdir(managed_runtime_dir)
|
|
753
|
+
_home = home_dir if home_dir is not None else os.environ.get("OMG_TEST_HOME_DIR", os.path.expanduser("~"))
|
|
754
|
+
|
|
755
|
+
settings_path = os.path.join(claude_dir, "settings.json")
|
|
756
|
+
if os.path.isfile(settings_path):
|
|
757
|
+
try:
|
|
758
|
+
with open(settings_path, "r", encoding="utf-8") as f:
|
|
759
|
+
settings_data = json.load(f)
|
|
760
|
+
hooks = settings_data.get("hooks", {})
|
|
761
|
+
for _event, hook_list in hooks.items():
|
|
762
|
+
if not isinstance(hook_list, list):
|
|
763
|
+
continue
|
|
764
|
+
for hook_entry in hook_list:
|
|
765
|
+
cmd = ""
|
|
766
|
+
if isinstance(hook_entry, dict):
|
|
767
|
+
cmd = hook_entry.get("command", "")
|
|
768
|
+
for sub in hook_entry.get("hooks", []):
|
|
769
|
+
if isinstance(sub, dict) and _ORPHANED_RUNTIME_MARKER in sub.get("command", ""):
|
|
770
|
+
if runtime_absent or not os.path.isfile(
|
|
771
|
+
os.path.join(managed_runtime_dir, ".venv", "bin", "python")
|
|
772
|
+
):
|
|
773
|
+
refs.append(f"settings.json:hooks:{sub.get('command', '')}")
|
|
774
|
+
if _ORPHANED_RUNTIME_MARKER in cmd and (
|
|
775
|
+
runtime_absent
|
|
776
|
+
or not os.path.isfile(os.path.join(managed_runtime_dir, ".venv", "bin", "python"))
|
|
777
|
+
):
|
|
778
|
+
refs.append(f"settings.json:hooks:{cmd}")
|
|
779
|
+
except (json.JSONDecodeError, OSError):
|
|
780
|
+
pass
|
|
781
|
+
|
|
782
|
+
mcp_json_path = os.path.join(claude_dir, ".mcp.json")
|
|
783
|
+
if os.path.isfile(mcp_json_path):
|
|
784
|
+
try:
|
|
785
|
+
with open(mcp_json_path, "r", encoding="utf-8") as f:
|
|
786
|
+
mcp_data = json.load(f)
|
|
787
|
+
ctrl = mcp_data.get("mcpServers", {}).get("omg-control", {})
|
|
788
|
+
cmd = ctrl.get("command", "")
|
|
789
|
+
if _ORPHANED_RUNTIME_MARKER in cmd and (
|
|
790
|
+
runtime_absent
|
|
791
|
+
or not os.path.isfile(os.path.join(managed_runtime_dir, ".venv", "bin", "python"))
|
|
792
|
+
):
|
|
793
|
+
refs.append(f".mcp.json:mcpServers.omg-control:{cmd}")
|
|
794
|
+
except (json.JSONDecodeError, OSError):
|
|
795
|
+
pass
|
|
796
|
+
|
|
797
|
+
for cfg_path, key_path, mcp_top_key in [
|
|
798
|
+
(os.path.join(_home, ".codex", "config.toml"), "mcp_servers.omg-control", None),
|
|
799
|
+
(os.path.join(_home, ".gemini", "settings.json"), "mcpServers.omg-control", "mcpServers"),
|
|
800
|
+
(os.path.join(_home, ".kimi", "mcp.json"), "mcpServers.omg-control", "mcpServers"),
|
|
801
|
+
(os.path.join(_home, ".config", "opencode", "opencode.json"), "mcp.omg-control", "mcp"),
|
|
802
|
+
]:
|
|
803
|
+
if not os.path.isfile(cfg_path):
|
|
804
|
+
continue
|
|
805
|
+
try:
|
|
806
|
+
if cfg_path.endswith(".toml"):
|
|
807
|
+
import tomlkit
|
|
808
|
+
|
|
809
|
+
with open(cfg_path, "r", encoding="utf-8") as _f:
|
|
810
|
+
content = _f.read()
|
|
811
|
+
doc = tomlkit.parse(content)
|
|
812
|
+
mcp_servers = doc.get("mcp_servers", {})
|
|
813
|
+
ctrl = mcp_servers.get("omg-control", {})
|
|
814
|
+
cmd = ctrl.get("command", "")
|
|
815
|
+
if isinstance(cmd, str) and _ORPHANED_RUNTIME_MARKER in cmd and (
|
|
816
|
+
runtime_absent
|
|
817
|
+
or not os.path.isfile(os.path.join(managed_runtime_dir, ".venv", "bin", "python"))
|
|
818
|
+
):
|
|
819
|
+
refs.append(f"{cfg_path}:{key_path}")
|
|
820
|
+
else:
|
|
821
|
+
with open(cfg_path, "r", encoding="utf-8") as f:
|
|
822
|
+
data = json.load(f)
|
|
823
|
+
ctrl = data.get(mcp_top_key, {}).get("omg-control", {})
|
|
824
|
+
cmd = ctrl.get("command", "")
|
|
825
|
+
if _ORPHANED_RUNTIME_MARKER in cmd and (
|
|
826
|
+
runtime_absent
|
|
827
|
+
or not os.path.isfile(os.path.join(managed_runtime_dir, ".venv", "bin", "python"))
|
|
828
|
+
):
|
|
829
|
+
refs.append(f"{cfg_path}:{key_path}:{cmd}")
|
|
830
|
+
except (json.JSONDecodeError, OSError):
|
|
831
|
+
pass
|
|
832
|
+
|
|
833
|
+
return refs
|
|
834
|
+
|
|
835
|
+
|
|
836
|
+
def _check_orphaned_runtime(claude_dir: str, *, home_dir: str | None = None) -> dict[str, Any]:
|
|
837
|
+
refs = _collect_orphaned_runtime_refs(claude_dir, home_dir=home_dir)
|
|
838
|
+
if refs:
|
|
839
|
+
return _doctor_check(
|
|
840
|
+
"orphaned_runtime",
|
|
841
|
+
ok=False,
|
|
842
|
+
message=f"orphaned omg-runtime references found ({len(refs)}): {'; '.join(refs[:3])}",
|
|
843
|
+
required=False,
|
|
844
|
+
)
|
|
845
|
+
return _doctor_check(
|
|
846
|
+
"orphaned_runtime",
|
|
847
|
+
ok=True,
|
|
848
|
+
message="no orphaned omg-runtime references detected",
|
|
849
|
+
required=False,
|
|
850
|
+
)
|
|
851
|
+
|
|
852
|
+
|
|
853
|
+
def run_doctor(*, root_dir: Path | None = None) -> dict[str, Any]:
|
|
854
|
+
"""Canonical install/runtime verification engine.
|
|
855
|
+
|
|
856
|
+
Called by both ``omg doctor`` CLI and the ``omg-doctor`` compat route.
|
|
857
|
+
"""
|
|
858
|
+
from runtime.contract_compiler import _check_version_identity_drift
|
|
859
|
+
|
|
860
|
+
repo_root = root_dir or Path(__file__).resolve().parent.parent
|
|
861
|
+
checks: list[dict[str, Any]] = []
|
|
862
|
+
|
|
863
|
+
# 1. Python version >= 3.10
|
|
864
|
+
py_ok = sys.version_info >= (3, 10)
|
|
865
|
+
checks.append(_doctor_check(
|
|
866
|
+
"python_version",
|
|
867
|
+
ok=py_ok,
|
|
868
|
+
message=f"Python {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
|
|
869
|
+
+ ("" if py_ok else " (requires >=3.10)"),
|
|
870
|
+
))
|
|
871
|
+
|
|
872
|
+
# 2. fastmcp availability
|
|
873
|
+
fastmcp_ok = False
|
|
874
|
+
fastmcp_msg = ""
|
|
875
|
+
try:
|
|
876
|
+
import importlib
|
|
877
|
+
importlib.import_module("fastmcp")
|
|
878
|
+
fastmcp_ok = True
|
|
879
|
+
fastmcp_msg = "fastmcp importable"
|
|
880
|
+
except ImportError:
|
|
881
|
+
fastmcp_msg = "fastmcp not installed — required for MCP server"
|
|
882
|
+
checks.append(_doctor_check("fastmcp", ok=fastmcp_ok, message=fastmcp_msg))
|
|
883
|
+
|
|
884
|
+
# 3. omg-control reachable (stdio config present in .mcp.json)
|
|
885
|
+
mcp_json_path = repo_root / ".mcp.json"
|
|
886
|
+
omg_control_ok = False
|
|
887
|
+
omg_control_msg = ".mcp.json not found"
|
|
888
|
+
if mcp_json_path.exists():
|
|
889
|
+
try:
|
|
890
|
+
with open(mcp_json_path, "r", encoding="utf-8") as f:
|
|
891
|
+
mcp_data = json.load(f)
|
|
892
|
+
servers = mcp_data.get("mcpServers", {})
|
|
893
|
+
if "omg-control" in servers:
|
|
894
|
+
ctrl = servers["omg-control"]
|
|
895
|
+
if ctrl.get("command"):
|
|
896
|
+
omg_control_ok = True
|
|
897
|
+
omg_control_msg = f"omg-control configured (stdio: {ctrl['command']})"
|
|
898
|
+
else:
|
|
899
|
+
omg_control_ok = True
|
|
900
|
+
omg_control_msg = "omg-control configured (non-stdio)"
|
|
901
|
+
else:
|
|
902
|
+
omg_control_msg = "omg-control not found in .mcp.json mcpServers"
|
|
903
|
+
except (json.JSONDecodeError, KeyError) as exc:
|
|
904
|
+
omg_control_msg = f".mcp.json parse error: {exc}"
|
|
905
|
+
checks.append(_doctor_check("omg_control_reachable", ok=omg_control_ok, message=omg_control_msg))
|
|
906
|
+
|
|
907
|
+
# 4. Policy files present
|
|
908
|
+
policy_path = repo_root / ".omg" / "policy.yaml"
|
|
909
|
+
commands_dir = repo_root / "commands"
|
|
910
|
+
policy_ok = policy_path.exists() or commands_dir.exists()
|
|
911
|
+
if policy_path.exists() and commands_dir.exists():
|
|
912
|
+
policy_msg = "policy.yaml and commands/ present"
|
|
913
|
+
elif policy_path.exists():
|
|
914
|
+
policy_msg = "policy.yaml present (commands/ missing)"
|
|
915
|
+
elif commands_dir.exists():
|
|
916
|
+
policy_msg = "commands/ present (policy.yaml missing)"
|
|
917
|
+
else:
|
|
918
|
+
policy_msg = "neither policy.yaml nor commands/ found"
|
|
919
|
+
checks.append(_doctor_check("policy_files", ok=policy_ok, message=policy_msg))
|
|
920
|
+
|
|
921
|
+
# 5. Metadata drift (release identity)
|
|
922
|
+
drift_result = _check_version_identity_drift(repo_root)
|
|
923
|
+
drift_ok = drift_result.get("status") == "ok"
|
|
924
|
+
drift_blockers = drift_result.get("blockers", [])
|
|
925
|
+
drift_msg = "all version surfaces aligned" if drift_ok else f"{len(drift_blockers)} drift(s): {'; '.join(drift_blockers[:3])}"
|
|
926
|
+
checks.append(_doctor_check("metadata_drift", ok=drift_ok, message=drift_msg))
|
|
927
|
+
|
|
928
|
+
# 6. Compiled bundles exist (optional)
|
|
929
|
+
bundles_dir = repo_root / "dist"
|
|
930
|
+
bundles_ok = bundles_dir.exists() and any(bundles_dir.iterdir()) if bundles_dir.exists() else False
|
|
931
|
+
bundles_msg = "dist/ contains compiled bundles" if bundles_ok else "dist/ missing or empty"
|
|
932
|
+
checks.append(_doctor_check("compiled_bundles", ok=bundles_ok, message=bundles_msg, required=False))
|
|
933
|
+
|
|
934
|
+
# 7. Host compatibility (optional)
|
|
935
|
+
claude_dir = os.environ.get("CLAUDE_DIR", os.path.expanduser("~/.claude"))
|
|
936
|
+
host_ok = os.path.isdir(claude_dir)
|
|
937
|
+
host_msg = f"host config dir exists ({claude_dir})" if host_ok else f"host config dir not found ({claude_dir})"
|
|
938
|
+
checks.append(_doctor_check("host_compatibility", ok=host_ok, message=host_msg, required=False))
|
|
939
|
+
|
|
940
|
+
# 8. HTTP memory — optional, never required
|
|
941
|
+
memory_msg = "HTTP memory not configured (optional)"
|
|
942
|
+
if mcp_json_path.exists():
|
|
943
|
+
try:
|
|
944
|
+
with open(mcp_json_path, "r", encoding="utf-8") as f:
|
|
945
|
+
mcp_data = json.load(f)
|
|
946
|
+
mem_cfg = mcp_data.get("mcpServers", {}).get("omg-memory", {})
|
|
947
|
+
if mem_cfg.get("type") == "http" and mem_cfg.get("url"):
|
|
948
|
+
memory_msg = f"omg-memory configured at {mem_cfg['url']} (optional, not probed)"
|
|
949
|
+
except (json.JSONDecodeError, KeyError):
|
|
950
|
+
pass
|
|
951
|
+
checks.append(_doctor_check("memory_reachable", ok=True, message=memory_msg, required=False))
|
|
952
|
+
|
|
953
|
+
# 9. Managed runtime venv (optional)
|
|
954
|
+
managed_venv_path = Path(claude_dir) / "omg-runtime" / ".venv"
|
|
955
|
+
venv_ok = managed_venv_path.exists()
|
|
956
|
+
venv_msg = f"managed venv at {managed_venv_path}" if venv_ok else f"managed venv not found at {managed_venv_path} (install via OMG-setup.sh)"
|
|
957
|
+
checks.append(_doctor_check("managed_runtime", ok=venv_ok, message=venv_msg, required=False))
|
|
958
|
+
|
|
959
|
+
# 10. Orphaned runtime references (optional)
|
|
960
|
+
orphan_check = _check_orphaned_runtime(claude_dir)
|
|
961
|
+
checks.append(orphan_check)
|
|
962
|
+
|
|
963
|
+
plugin_check = _check_plugin_compat(repo_root)
|
|
964
|
+
checks.append(plugin_check)
|
|
965
|
+
|
|
966
|
+
has_blocker = any(c["status"] == "blocker" for c in checks)
|
|
967
|
+
verdict_receipt = _normalize_verdict_payload({
|
|
968
|
+
"status": "fail" if has_blocker else "pass",
|
|
969
|
+
"blockers": [
|
|
970
|
+
str(check.get("name", ""))
|
|
971
|
+
for check in checks
|
|
972
|
+
if check.get("status") == "blocker"
|
|
973
|
+
],
|
|
974
|
+
"planned_actions": [
|
|
975
|
+
"Fix blocker checks before shipping.",
|
|
976
|
+
],
|
|
977
|
+
"executed_actions": [
|
|
978
|
+
"run_doctor",
|
|
979
|
+
],
|
|
980
|
+
"provenance": "runtime.compat.run_doctor",
|
|
981
|
+
"evidence_paths": {},
|
|
982
|
+
"next_steps": [
|
|
983
|
+
"Fix any blocker checks before shipping.",
|
|
984
|
+
],
|
|
985
|
+
"executed": True,
|
|
986
|
+
})
|
|
987
|
+
return {
|
|
988
|
+
"schema": "DoctorResult",
|
|
989
|
+
"status": "fail" if has_blocker else "pass",
|
|
990
|
+
"verdict": "fail" if has_blocker else "pass",
|
|
991
|
+
"checks": checks,
|
|
992
|
+
"plugin_compatibility": plugin_check,
|
|
993
|
+
"version": CANONICAL_VERSION,
|
|
994
|
+
"verdict_receipt": verdict_receipt,
|
|
995
|
+
}
|
|
996
|
+
|
|
997
|
+
|
|
998
|
+
_ENV_HOST_CLIS = ("codex", "gemini", "kimi", "opencode")
|
|
999
|
+
|
|
1000
|
+
_ENV_HOST_CONFIG_DIRS: dict[str, tuple[str, ...]] = {
|
|
1001
|
+
"codex": (".codex",),
|
|
1002
|
+
"gemini": (".gemini",),
|
|
1003
|
+
"kimi": (".kimi",),
|
|
1004
|
+
"claude": (".claude",),
|
|
1005
|
+
"opencode": (".config/opencode",),
|
|
1006
|
+
}
|
|
1007
|
+
|
|
1008
|
+
|
|
1009
|
+
def _env_check(
|
|
1010
|
+
name: str, *, ok: bool, message: str, required: bool = False, remediation: str = "",
|
|
1011
|
+
) -> dict[str, Any]:
|
|
1012
|
+
if ok:
|
|
1013
|
+
status = "ok"
|
|
1014
|
+
elif required:
|
|
1015
|
+
status = "blocker"
|
|
1016
|
+
else:
|
|
1017
|
+
status = "warning"
|
|
1018
|
+
return {
|
|
1019
|
+
"name": name,
|
|
1020
|
+
"status": status,
|
|
1021
|
+
"message": message,
|
|
1022
|
+
"required": required,
|
|
1023
|
+
"remediation": remediation,
|
|
1024
|
+
}
|
|
1025
|
+
|
|
1026
|
+
|
|
1027
|
+
def _check_node_version() -> dict[str, Any]:
|
|
1028
|
+
import shutil
|
|
1029
|
+
import subprocess as _sp
|
|
1030
|
+
|
|
1031
|
+
if shutil.which("node") is None:
|
|
1032
|
+
return _env_check(
|
|
1033
|
+
"node_version", ok=False,
|
|
1034
|
+
message="node not found on PATH",
|
|
1035
|
+
remediation="Install Node.js >= 18 from https://nodejs.org",
|
|
1036
|
+
)
|
|
1037
|
+
try:
|
|
1038
|
+
proc = _sp.run(
|
|
1039
|
+
["node", "--version"], capture_output=True, text=True, timeout=10,
|
|
1040
|
+
)
|
|
1041
|
+
raw = proc.stdout.strip().lstrip("v")
|
|
1042
|
+
major = int(raw.split(".")[0])
|
|
1043
|
+
ok = major >= 18
|
|
1044
|
+
return _env_check(
|
|
1045
|
+
"node_version", ok=ok,
|
|
1046
|
+
message=f"node v{raw}" + ("" if ok else " (requires >= 18)"),
|
|
1047
|
+
remediation="" if ok else "Upgrade Node.js to >= 18",
|
|
1048
|
+
)
|
|
1049
|
+
except Exception as exc:
|
|
1050
|
+
return _env_check(
|
|
1051
|
+
"node_version", ok=False,
|
|
1052
|
+
message=f"node version check failed: {exc}",
|
|
1053
|
+
remediation="Install Node.js >= 18 from https://nodejs.org",
|
|
1054
|
+
)
|
|
1055
|
+
|
|
1056
|
+
|
|
1057
|
+
def _check_python3_available() -> dict[str, Any]:
|
|
1058
|
+
import shutil
|
|
1059
|
+
path = shutil.which("python3")
|
|
1060
|
+
if path:
|
|
1061
|
+
return _env_check("python3_available", ok=True, message=f"python3 at {path}")
|
|
1062
|
+
return _env_check(
|
|
1063
|
+
"python3_available", ok=False,
|
|
1064
|
+
message="python3 not found on PATH",
|
|
1065
|
+
remediation="Install Python 3 from https://python.org",
|
|
1066
|
+
)
|
|
1067
|
+
|
|
1068
|
+
|
|
1069
|
+
def _check_cli_path(cli_name: str) -> dict[str, Any]:
|
|
1070
|
+
import shutil
|
|
1071
|
+
path = shutil.which(cli_name)
|
|
1072
|
+
if path:
|
|
1073
|
+
return _env_check(f"{cli_name}_path", ok=True, message=f"{cli_name} at {path}")
|
|
1074
|
+
return _env_check(
|
|
1075
|
+
f"{cli_name}_path", ok=False,
|
|
1076
|
+
message=f"{cli_name} not found on PATH",
|
|
1077
|
+
remediation=f"Install {cli_name} CLI and ensure it is on PATH",
|
|
1078
|
+
)
|
|
1079
|
+
|
|
1080
|
+
|
|
1081
|
+
def _check_cli_auth(cli_name: str) -> dict[str, Any] | None:
|
|
1082
|
+
import shutil
|
|
1083
|
+
if shutil.which(cli_name) is None:
|
|
1084
|
+
return None
|
|
1085
|
+
|
|
1086
|
+
try:
|
|
1087
|
+
from runtime.cli_provider import get_provider
|
|
1088
|
+
import runtime.providers.codex_provider # noqa: F401
|
|
1089
|
+
import runtime.providers.gemini_provider # noqa: F401
|
|
1090
|
+
import runtime.providers.kimi_provider # noqa: F401
|
|
1091
|
+
import runtime.providers.opencode_provider # noqa: F401
|
|
1092
|
+
except ImportError:
|
|
1093
|
+
return None
|
|
1094
|
+
|
|
1095
|
+
provider = get_provider(cli_name)
|
|
1096
|
+
if provider is None:
|
|
1097
|
+
return None
|
|
1098
|
+
|
|
1099
|
+
try:
|
|
1100
|
+
auth_ok, auth_msg = provider.check_auth()
|
|
1101
|
+
except Exception as exc:
|
|
1102
|
+
return _env_check(
|
|
1103
|
+
f"{cli_name}_auth", ok=False,
|
|
1104
|
+
message=f"{cli_name} auth check error: {exc}",
|
|
1105
|
+
remediation=f"Run '{cli_name} auth login' to authenticate",
|
|
1106
|
+
)
|
|
1107
|
+
|
|
1108
|
+
if auth_ok is True:
|
|
1109
|
+
return _env_check(f"{cli_name}_auth", ok=True, message=auth_msg)
|
|
1110
|
+
elif auth_ok is False:
|
|
1111
|
+
return _env_check(
|
|
1112
|
+
f"{cli_name}_auth", ok=False, message=auth_msg,
|
|
1113
|
+
remediation=f"Run '{cli_name} auth login' to authenticate",
|
|
1114
|
+
)
|
|
1115
|
+
return _env_check(
|
|
1116
|
+
f"{cli_name}_auth", ok=False, message=auth_msg,
|
|
1117
|
+
remediation=f"Check {cli_name} authentication configuration",
|
|
1118
|
+
)
|
|
1119
|
+
|
|
1120
|
+
|
|
1121
|
+
def _check_writable_config_dir(host: str, *, home_dir: str | None = None) -> dict[str, Any]:
|
|
1122
|
+
_home = home_dir or os.environ.get("OMG_TEST_HOME_DIR", os.path.expanduser("~"))
|
|
1123
|
+
rel_parts = _ENV_HOST_CONFIG_DIRS.get(host)
|
|
1124
|
+
if not rel_parts:
|
|
1125
|
+
return _env_check(
|
|
1126
|
+
f"writable_{host}_config", ok=False,
|
|
1127
|
+
message=f"unknown host config path for {host}",
|
|
1128
|
+
)
|
|
1129
|
+
target = os.path.join(_home, *rel_parts)
|
|
1130
|
+
if os.path.isdir(target):
|
|
1131
|
+
writable = os.access(target, os.W_OK)
|
|
1132
|
+
return _env_check(
|
|
1133
|
+
f"writable_{host}_config", ok=writable,
|
|
1134
|
+
message=f"{target} {'writable' if writable else 'not writable'}",
|
|
1135
|
+
remediation="" if writable else f"Check permissions on {target}",
|
|
1136
|
+
)
|
|
1137
|
+
parent = os.path.dirname(target)
|
|
1138
|
+
parent_writable = os.path.isdir(parent) and os.access(parent, os.W_OK)
|
|
1139
|
+
return _env_check(
|
|
1140
|
+
f"writable_{host}_config",
|
|
1141
|
+
ok=parent_writable,
|
|
1142
|
+
message=f"{target} not present; parent {'writable' if parent_writable else 'not writable'}",
|
|
1143
|
+
remediation="" if parent_writable else f"Ensure parent directory {parent} exists and is writable",
|
|
1144
|
+
)
|
|
1145
|
+
|
|
1146
|
+
|
|
1147
|
+
def run_env_doctor(*, root_dir: Path | None = None) -> dict[str, Any]:
|
|
1148
|
+
repo_root = root_dir or Path(__file__).resolve().parent.parent
|
|
1149
|
+
home_dir = os.environ.get("OMG_TEST_HOME_DIR", os.path.expanduser("~"))
|
|
1150
|
+
claude_dir = (
|
|
1151
|
+
os.environ.get("CLAUDE_CONFIG_DIR")
|
|
1152
|
+
or os.environ.get("CLAUDE_DIR")
|
|
1153
|
+
or os.path.join(home_dir, ".claude")
|
|
1154
|
+
)
|
|
1155
|
+
checks: list[dict[str, Any]] = []
|
|
1156
|
+
|
|
1157
|
+
checks.append(_check_node_version())
|
|
1158
|
+
|
|
1159
|
+
py_ok = sys.version_info >= (3, 10)
|
|
1160
|
+
checks.append(_env_check(
|
|
1161
|
+
"python_version",
|
|
1162
|
+
ok=py_ok,
|
|
1163
|
+
message=(
|
|
1164
|
+
f"Python {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
|
|
1165
|
+
+ ("" if py_ok else " (requires >=3.10)")
|
|
1166
|
+
),
|
|
1167
|
+
))
|
|
1168
|
+
checks.append(_check_python3_available())
|
|
1169
|
+
|
|
1170
|
+
mcp_json_path = repo_root / ".mcp.json"
|
|
1171
|
+
omg_control_ok = False
|
|
1172
|
+
omg_control_msg = ".mcp.json not found"
|
|
1173
|
+
if mcp_json_path.exists():
|
|
1174
|
+
try:
|
|
1175
|
+
with open(mcp_json_path, "r", encoding="utf-8") as f:
|
|
1176
|
+
mcp_data = json.load(f)
|
|
1177
|
+
servers = mcp_data.get("mcpServers", {})
|
|
1178
|
+
ctrl = servers.get("omg-control", {})
|
|
1179
|
+
command = ctrl.get("command", "")
|
|
1180
|
+
if command:
|
|
1181
|
+
omg_control_ok = True
|
|
1182
|
+
omg_control_msg = f"omg-control configured (stdio: {command})"
|
|
1183
|
+
elif ctrl:
|
|
1184
|
+
omg_control_ok = True
|
|
1185
|
+
omg_control_msg = "omg-control configured (non-stdio)"
|
|
1186
|
+
else:
|
|
1187
|
+
omg_control_msg = "omg-control not found in .mcp.json mcpServers"
|
|
1188
|
+
except (json.JSONDecodeError, OSError, KeyError) as exc:
|
|
1189
|
+
omg_control_msg = f".mcp.json parse error: {exc}"
|
|
1190
|
+
checks.append(_env_check("omg_control_reachable", ok=omg_control_ok, message=omg_control_msg))
|
|
1191
|
+
|
|
1192
|
+
managed_venv_path = Path(claude_dir) / "omg-runtime" / ".venv"
|
|
1193
|
+
managed_runtime_ok = managed_venv_path.exists()
|
|
1194
|
+
managed_runtime_msg = (
|
|
1195
|
+
f"managed venv at {managed_venv_path}"
|
|
1196
|
+
if managed_runtime_ok
|
|
1197
|
+
else f"managed venv not found at {managed_venv_path} (install via OMG-setup.sh)"
|
|
1198
|
+
)
|
|
1199
|
+
checks.append(_env_check("managed_runtime", ok=managed_runtime_ok, message=managed_runtime_msg))
|
|
1200
|
+
|
|
1201
|
+
for cli_name in _ENV_HOST_CLIS:
|
|
1202
|
+
checks.append(_check_cli_path(cli_name))
|
|
1203
|
+
|
|
1204
|
+
for cli_name in _ENV_HOST_CLIS:
|
|
1205
|
+
auth_check = _check_cli_auth(cli_name)
|
|
1206
|
+
if auth_check is not None:
|
|
1207
|
+
checks.append(auth_check)
|
|
1208
|
+
|
|
1209
|
+
checks.append(_env_check(
|
|
1210
|
+
"claude_auth", ok=True,
|
|
1211
|
+
message="host-native/non-probed",
|
|
1212
|
+
))
|
|
1213
|
+
|
|
1214
|
+
for host in ("claude", "codex", "gemini", "kimi"):
|
|
1215
|
+
checks.append(_check_writable_config_dir(host, home_dir=home_dir))
|
|
1216
|
+
|
|
1217
|
+
has_blocker = any(c["status"] == "blocker" for c in checks)
|
|
1218
|
+
verdict_receipt = _normalize_verdict_payload({
|
|
1219
|
+
"status": "fail" if has_blocker else "pass",
|
|
1220
|
+
"blockers": [c["name"] for c in checks if c["status"] == "blocker"],
|
|
1221
|
+
"planned_actions": [],
|
|
1222
|
+
"executed_actions": ["run_env_doctor"],
|
|
1223
|
+
"provenance": "runtime.compat.run_env_doctor",
|
|
1224
|
+
"evidence_paths": {},
|
|
1225
|
+
"next_steps": ["Fix any blocker checks."] if has_blocker else [],
|
|
1226
|
+
"executed": True,
|
|
1227
|
+
})
|
|
1228
|
+
return {
|
|
1229
|
+
"schema": "DoctorResult",
|
|
1230
|
+
"status": "fail" if has_blocker else "pass",
|
|
1231
|
+
"verdict": "fail" if has_blocker else "pass",
|
|
1232
|
+
"checks": checks,
|
|
1233
|
+
"version": CANONICAL_VERSION,
|
|
1234
|
+
"verdict_receipt": verdict_receipt,
|
|
1235
|
+
}
|
|
1236
|
+
|
|
1237
|
+
|
|
1238
|
+
class DoctorFixSpec(TypedDict):
|
|
1239
|
+
fixable: bool
|
|
1240
|
+
fix_handler: Callable[[Path, dict[str, Any]], dict[str, Any]] | None
|
|
1241
|
+
fixable_in_context: bool
|
|
1242
|
+
suggestion: str
|
|
1243
|
+
|
|
1244
|
+
|
|
1245
|
+
def _fix_omg_control_reachable(root_dir: Path, _check: dict[str, Any]) -> dict[str, Any]:
|
|
1246
|
+
from runtime.config_transaction import ConfigTransaction
|
|
1247
|
+
|
|
1248
|
+
mcp_json_path = root_dir / ".mcp.json"
|
|
1249
|
+
mcp_data: dict[str, Any] = {}
|
|
1250
|
+
if mcp_json_path.exists():
|
|
1251
|
+
try:
|
|
1252
|
+
with open(mcp_json_path, "r", encoding="utf-8") as f:
|
|
1253
|
+
mcp_data = json.load(f)
|
|
1254
|
+
except (json.JSONDecodeError, OSError):
|
|
1255
|
+
pass
|
|
1256
|
+
servers = mcp_data.setdefault("mcpServers", {})
|
|
1257
|
+
servers["omg-control"] = {
|
|
1258
|
+
"command": "python3",
|
|
1259
|
+
"args": ["-m", "runtime.omg_mcp_server"],
|
|
1260
|
+
}
|
|
1261
|
+
content = json.dumps(mcp_data, indent=2, ensure_ascii=True) + "\n"
|
|
1262
|
+
return {"planned_path": str(mcp_json_path), "content": content, "mode": 0o644}
|
|
1263
|
+
|
|
1264
|
+
|
|
1265
|
+
def _fix_policy_files(root_dir: Path, _check: dict[str, Any]) -> dict[str, Any]:
|
|
1266
|
+
policy_path = root_dir / ".omg" / "policy.yaml"
|
|
1267
|
+
if policy_path.exists():
|
|
1268
|
+
return {}
|
|
1269
|
+
content = "mode: warn_and_run\ncritical_block: true\n"
|
|
1270
|
+
return {"planned_path": str(policy_path), "content": content, "mode": 0o644}
|
|
1271
|
+
|
|
1272
|
+
|
|
1273
|
+
def _fix_metadata_drift(root_dir: Path, check: dict[str, Any]) -> dict[str, Any]:
|
|
1274
|
+
from runtime.adoption import CANONICAL_VERSION
|
|
1275
|
+
from runtime.contract_compiler import _check_version_identity_drift
|
|
1276
|
+
|
|
1277
|
+
drift_result = _check_version_identity_drift(root_dir)
|
|
1278
|
+
drift_details = drift_result.get("drift_details", {})
|
|
1279
|
+
if not drift_details:
|
|
1280
|
+
return {}
|
|
1281
|
+
|
|
1282
|
+
first_label = next(iter(drift_details))
|
|
1283
|
+
old_version = drift_details[first_label]
|
|
1284
|
+
parts = first_label.split(":", 1)
|
|
1285
|
+
rel_path = parts[0] if parts else first_label
|
|
1286
|
+
target = root_dir / rel_path
|
|
1287
|
+
if not target.exists():
|
|
1288
|
+
return {}
|
|
1289
|
+
original = target.read_text(encoding="utf-8")
|
|
1290
|
+
patched = original.replace(old_version, CANONICAL_VERSION) if old_version != "<not found>" else original
|
|
1291
|
+
if patched == original:
|
|
1292
|
+
return {}
|
|
1293
|
+
return {"planned_path": str(target), "content": patched, "mode": 0o644}
|
|
1294
|
+
|
|
1295
|
+
|
|
1296
|
+
def _atomic_write_json_str(path: str, content: str) -> None:
|
|
1297
|
+
tmp = path + ".tmp"
|
|
1298
|
+
with open(tmp, "w", encoding="utf-8") as f:
|
|
1299
|
+
f.write(content)
|
|
1300
|
+
f.flush()
|
|
1301
|
+
os.fsync(f.fileno())
|
|
1302
|
+
os.replace(tmp, path)
|
|
1303
|
+
|
|
1304
|
+
|
|
1305
|
+
def _fix_orphaned_runtime(root_dir: Path, _check: dict[str, Any]) -> dict[str, Any]:
|
|
1306
|
+
claude_dir = os.environ.get("CLAUDE_DIR", os.path.expanduser("~/.claude"))
|
|
1307
|
+
_home = os.environ.get("OMG_TEST_HOME_DIR", os.path.expanduser("~"))
|
|
1308
|
+
dry_run = bool(_check.get("_dry_run", True))
|
|
1309
|
+
refs = _collect_orphaned_runtime_refs(claude_dir, home_dir=_home)
|
|
1310
|
+
removed_paths: list[str] = []
|
|
1311
|
+
|
|
1312
|
+
settings_path = os.path.join(claude_dir, "settings.json")
|
|
1313
|
+
if not dry_run and os.path.isfile(settings_path) and any("settings.json" in r for r in refs):
|
|
1314
|
+
try:
|
|
1315
|
+
with open(settings_path, "r", encoding="utf-8") as f:
|
|
1316
|
+
settings_data = json.load(f)
|
|
1317
|
+
hooks = settings_data.get("hooks", {})
|
|
1318
|
+
changed = False
|
|
1319
|
+
for event in list(hooks.keys()):
|
|
1320
|
+
hook_list = hooks[event]
|
|
1321
|
+
if not isinstance(hook_list, list):
|
|
1322
|
+
continue
|
|
1323
|
+
filtered = []
|
|
1324
|
+
for entry in hook_list:
|
|
1325
|
+
if isinstance(entry, dict):
|
|
1326
|
+
cmd = entry.get("command", "")
|
|
1327
|
+
sub_hooks = entry.get("hooks", [])
|
|
1328
|
+
sub_filtered = [
|
|
1329
|
+
s for s in sub_hooks
|
|
1330
|
+
if not (isinstance(s, dict) and _ORPHANED_RUNTIME_MARKER in s.get("command", ""))
|
|
1331
|
+
]
|
|
1332
|
+
if len(sub_filtered) != len(sub_hooks):
|
|
1333
|
+
entry = dict(entry)
|
|
1334
|
+
entry["hooks"] = sub_filtered
|
|
1335
|
+
changed = True
|
|
1336
|
+
if _ORPHANED_RUNTIME_MARKER not in cmd:
|
|
1337
|
+
filtered.append(entry)
|
|
1338
|
+
else:
|
|
1339
|
+
changed = True
|
|
1340
|
+
removed_paths.append(f"settings.json:hooks:{event}:{cmd}")
|
|
1341
|
+
else:
|
|
1342
|
+
filtered.append(entry)
|
|
1343
|
+
hooks[event] = filtered
|
|
1344
|
+
if changed:
|
|
1345
|
+
settings_data["hooks"] = hooks
|
|
1346
|
+
content = json.dumps(settings_data, indent=2, ensure_ascii=True) + "\n"
|
|
1347
|
+
_atomic_write_json_str(settings_path, content)
|
|
1348
|
+
except (json.JSONDecodeError, OSError):
|
|
1349
|
+
pass
|
|
1350
|
+
|
|
1351
|
+
mcp_json_path = os.path.join(claude_dir, ".mcp.json")
|
|
1352
|
+
if not dry_run and os.path.isfile(mcp_json_path) and any(".mcp.json" in r for r in refs):
|
|
1353
|
+
try:
|
|
1354
|
+
with open(mcp_json_path, "r", encoding="utf-8") as f:
|
|
1355
|
+
mcp_data = json.load(f)
|
|
1356
|
+
ctrl = mcp_data.get("mcpServers", {}).get("omg-control", {})
|
|
1357
|
+
if _ORPHANED_RUNTIME_MARKER in ctrl.get("command", ""):
|
|
1358
|
+
del mcp_data["mcpServers"]["omg-control"]
|
|
1359
|
+
content = json.dumps(mcp_data, indent=2, ensure_ascii=True) + "\n"
|
|
1360
|
+
_atomic_write_json_str(mcp_json_path, content)
|
|
1361
|
+
removed_paths.append(f"{mcp_json_path}:mcpServers.omg-control")
|
|
1362
|
+
except (json.JSONDecodeError, OSError):
|
|
1363
|
+
pass
|
|
1364
|
+
|
|
1365
|
+
codex_cfg = os.path.join(_home, ".codex", "config.toml")
|
|
1366
|
+
if not dry_run and os.path.isfile(codex_cfg) and any(codex_cfg in r for r in refs):
|
|
1367
|
+
try:
|
|
1368
|
+
import tomlkit
|
|
1369
|
+
import tomlkit.exceptions
|
|
1370
|
+
except ImportError:
|
|
1371
|
+
tomlkit = None # type: ignore[assignment]
|
|
1372
|
+
if tomlkit is not None:
|
|
1373
|
+
try:
|
|
1374
|
+
with open(codex_cfg, "r", encoding="utf-8") as f:
|
|
1375
|
+
doc = tomlkit.parse(f.read())
|
|
1376
|
+
mcp_servers = doc.get("mcp_servers", {})
|
|
1377
|
+
ctrl = mcp_servers.get("omg-control", {})
|
|
1378
|
+
cmd = ctrl.get("command", "")
|
|
1379
|
+
if isinstance(cmd, str) and _ORPHANED_RUNTIME_MARKER in cmd:
|
|
1380
|
+
del mcp_servers["omg-control"]
|
|
1381
|
+
with open(codex_cfg, "w", encoding="utf-8") as f:
|
|
1382
|
+
f.write(tomlkit.dumps(doc))
|
|
1383
|
+
removed_paths.append(f"{codex_cfg}:mcp_servers.omg-control")
|
|
1384
|
+
except (OSError, KeyError, tomlkit.exceptions.ParseError):
|
|
1385
|
+
pass
|
|
1386
|
+
|
|
1387
|
+
if not dry_run:
|
|
1388
|
+
for cfg_path, key_path, mcp_top_key in [
|
|
1389
|
+
(os.path.join(_home, ".gemini", "settings.json"), "mcpServers.omg-control", "mcpServers"),
|
|
1390
|
+
(os.path.join(_home, ".kimi", "mcp.json"), "mcpServers.omg-control", "mcpServers"),
|
|
1391
|
+
(os.path.join(_home, ".config", "opencode", "opencode.json"), "mcp.omg-control", "mcp"),
|
|
1392
|
+
]:
|
|
1393
|
+
if not os.path.isfile(cfg_path) or not any(cfg_path in r for r in refs):
|
|
1394
|
+
continue
|
|
1395
|
+
try:
|
|
1396
|
+
with open(cfg_path, "r", encoding="utf-8") as f:
|
|
1397
|
+
data = json.load(f)
|
|
1398
|
+
ctrl = data.get(mcp_top_key, {}).get("omg-control", {})
|
|
1399
|
+
if _ORPHANED_RUNTIME_MARKER in ctrl.get("command", ""):
|
|
1400
|
+
del data[mcp_top_key]["omg-control"]
|
|
1401
|
+
content = json.dumps(data, indent=2, ensure_ascii=True) + "\n"
|
|
1402
|
+
_atomic_write_json_str(cfg_path, content)
|
|
1403
|
+
removed_paths.append(f"{cfg_path}:{key_path}")
|
|
1404
|
+
except (json.JSONDecodeError, OSError):
|
|
1405
|
+
pass
|
|
1406
|
+
|
|
1407
|
+
return {
|
|
1408
|
+
"planned_path": settings_path,
|
|
1409
|
+
"content": None,
|
|
1410
|
+
"action": "remove_orphaned_runtime",
|
|
1411
|
+
"removed_paths": removed_paths if not dry_run else [r for r in refs],
|
|
1412
|
+
"backup_path": "",
|
|
1413
|
+
"executed": not dry_run,
|
|
1414
|
+
"rollback": None,
|
|
1415
|
+
"_direct_receipt": True,
|
|
1416
|
+
}
|
|
1417
|
+
|
|
1418
|
+
|
|
1419
|
+
DOCTOR_FIX_SPECS: dict[str, DoctorFixSpec] = {
|
|
1420
|
+
"python_version": {
|
|
1421
|
+
"fixable": False,
|
|
1422
|
+
"fix_handler": None,
|
|
1423
|
+
"fixable_in_context": False,
|
|
1424
|
+
"suggestion": "Install Python >= 3.10 from python.org or via your package manager",
|
|
1425
|
+
},
|
|
1426
|
+
"fastmcp": {
|
|
1427
|
+
"fixable": False,
|
|
1428
|
+
"fix_handler": None,
|
|
1429
|
+
"fixable_in_context": False,
|
|
1430
|
+
"suggestion": "Run: pip install fastmcp",
|
|
1431
|
+
},
|
|
1432
|
+
"omg_control_reachable": {
|
|
1433
|
+
"fixable": True,
|
|
1434
|
+
"fix_handler": _fix_omg_control_reachable,
|
|
1435
|
+
"fixable_in_context": True,
|
|
1436
|
+
"suggestion": "",
|
|
1437
|
+
},
|
|
1438
|
+
"policy_files": {
|
|
1439
|
+
"fixable": True,
|
|
1440
|
+
"fix_handler": _fix_policy_files,
|
|
1441
|
+
"fixable_in_context": True,
|
|
1442
|
+
"suggestion": "",
|
|
1443
|
+
},
|
|
1444
|
+
"metadata_drift": {
|
|
1445
|
+
"fixable": True,
|
|
1446
|
+
"fix_handler": _fix_metadata_drift,
|
|
1447
|
+
"fixable_in_context": True,
|
|
1448
|
+
"suggestion": "",
|
|
1449
|
+
},
|
|
1450
|
+
"orphaned_runtime": {
|
|
1451
|
+
"fixable": True,
|
|
1452
|
+
"fix_handler": _fix_orphaned_runtime,
|
|
1453
|
+
"fixable_in_context": True,
|
|
1454
|
+
"suggestion": "",
|
|
1455
|
+
},
|
|
1456
|
+
}
|
|
1457
|
+
|
|
1458
|
+
_DEFAULT_FIX_SPEC: DoctorFixSpec = {
|
|
1459
|
+
"fixable": False,
|
|
1460
|
+
"fix_handler": None,
|
|
1461
|
+
"fixable_in_context": False,
|
|
1462
|
+
"suggestion": "Manual intervention required",
|
|
1463
|
+
}
|
|
1464
|
+
|
|
1465
|
+
|
|
1466
|
+
def run_doctor_fix(*, root_dir: Path | None = None, dry_run: bool = True) -> dict[str, Any]:
|
|
1467
|
+
from runtime.config_transaction import ConfigTransaction, ConfigTransactionError
|
|
1468
|
+
|
|
1469
|
+
doctor_result = run_doctor(root_dir=root_dir)
|
|
1470
|
+
repo_root = root_dir or Path(__file__).resolve().parent.parent
|
|
1471
|
+
|
|
1472
|
+
enriched_checks: list[dict[str, Any]] = []
|
|
1473
|
+
fix_receipts: list[dict[str, Any]] = []
|
|
1474
|
+
|
|
1475
|
+
for check in doctor_result["checks"]:
|
|
1476
|
+
spec = DOCTOR_FIX_SPECS.get(check["name"], _DEFAULT_FIX_SPEC)
|
|
1477
|
+
enriched = dict(check)
|
|
1478
|
+
enriched["fixable"] = spec["fixable"]
|
|
1479
|
+
if not spec["fixable"]:
|
|
1480
|
+
enriched["suggestion"] = spec["suggestion"]
|
|
1481
|
+
enriched_checks.append(enriched)
|
|
1482
|
+
|
|
1483
|
+
if check["status"] == "ok" or not spec["fixable"] or spec["fix_handler"] is None:
|
|
1484
|
+
continue
|
|
1485
|
+
|
|
1486
|
+
handler = spec["fix_handler"]
|
|
1487
|
+
check_with_mode = dict(check)
|
|
1488
|
+
check_with_mode["_dry_run"] = dry_run
|
|
1489
|
+
plan_data = handler(repo_root, check_with_mode)
|
|
1490
|
+
if not plan_data or "planned_path" not in plan_data:
|
|
1491
|
+
continue
|
|
1492
|
+
|
|
1493
|
+
if plan_data.get("_direct_receipt"):
|
|
1494
|
+
fix_receipts.append({
|
|
1495
|
+
"check": check["name"],
|
|
1496
|
+
"action": plan_data.get("action", f"fix_{check['name']}"),
|
|
1497
|
+
"backup_path": plan_data.get("backup_path", ""),
|
|
1498
|
+
"verification": {"removed_paths": plan_data.get("removed_paths", [])},
|
|
1499
|
+
"executed": bool(plan_data.get("executed", False)),
|
|
1500
|
+
"rollback": plan_data.get("rollback"),
|
|
1501
|
+
})
|
|
1502
|
+
continue
|
|
1503
|
+
|
|
1504
|
+
lock_dir = tempfile.mkdtemp(prefix="doctor-fix-")
|
|
1505
|
+
tx = ConfigTransaction(
|
|
1506
|
+
lock_path=Path(lock_dir) / "doctor-fix.lock",
|
|
1507
|
+
backup_root=Path(lock_dir) / "backups",
|
|
1508
|
+
)
|
|
1509
|
+
tx.plan(
|
|
1510
|
+
plan_data["planned_path"],
|
|
1511
|
+
plan_data["content"],
|
|
1512
|
+
mode=plan_data.get("mode", 0o644),
|
|
1513
|
+
)
|
|
1514
|
+
|
|
1515
|
+
try:
|
|
1516
|
+
receipt = tx.dry_run() if dry_run else tx.execute()
|
|
1517
|
+
except ConfigTransactionError as exc:
|
|
1518
|
+
receipt = exc.receipt or {
|
|
1519
|
+
"planned_writes": [],
|
|
1520
|
+
"executed_writes": [],
|
|
1521
|
+
"backup_path": "",
|
|
1522
|
+
"verification": {},
|
|
1523
|
+
"executed": False,
|
|
1524
|
+
"rollback": None,
|
|
1525
|
+
}
|
|
1526
|
+
|
|
1527
|
+
fix_receipts.append({
|
|
1528
|
+
"check": check["name"],
|
|
1529
|
+
"action": plan_data.get("action", f"fix_{check['name']}"),
|
|
1530
|
+
"backup_path": receipt.get("backup_path", ""),
|
|
1531
|
+
"verification": receipt.get("verification", {}),
|
|
1532
|
+
"executed": receipt.get("executed", False),
|
|
1533
|
+
"rollback": receipt.get("rollback"),
|
|
1534
|
+
})
|
|
1535
|
+
|
|
1536
|
+
has_blocker = any(c["status"] == "blocker" for c in enriched_checks)
|
|
1537
|
+
unfixed_blockers = has_blocker and dry_run
|
|
1538
|
+
|
|
1539
|
+
return {
|
|
1540
|
+
"schema": "DoctorFixResult",
|
|
1541
|
+
"mode": "dry_run" if dry_run else "fix",
|
|
1542
|
+
"status": "fail" if unfixed_blockers else doctor_result["status"],
|
|
1543
|
+
"checks": enriched_checks,
|
|
1544
|
+
"fix_receipts": fix_receipts,
|
|
1545
|
+
"version": CANONICAL_VERSION,
|
|
1546
|
+
}
|
|
1547
|
+
|
|
1548
|
+
|
|
1549
|
+
def _write_release_artifact(project_dir: str, message: str) -> str:
|
|
1550
|
+
_ensure_state_layout(project_dir)
|
|
1551
|
+
out = os.path.join(project_dir, ".omg", "evidence", "release-draft.md")
|
|
1552
|
+
if not os.path.exists(out):
|
|
1553
|
+
with open(out, "w", encoding="utf-8") as f:
|
|
1554
|
+
f.write("# Release Draft\n\n")
|
|
1555
|
+
with open(out, "a", encoding="utf-8") as f:
|
|
1556
|
+
f.write(f"- {_now()}: {message}\n")
|
|
1557
|
+
return out
|
|
1558
|
+
|
|
1559
|
+
|
|
1560
|
+
def _write_build_fix_artifact(project_dir: str, message: str) -> str:
|
|
1561
|
+
_ensure_state_layout(project_dir)
|
|
1562
|
+
out = os.path.join(project_dir, ".omg", "state", "build-fix.md")
|
|
1563
|
+
with open(out, "a", encoding="utf-8") as f:
|
|
1564
|
+
f.write(f"## {_now()}\n")
|
|
1565
|
+
f.write(f"- target: {message}\n")
|
|
1566
|
+
f.write("- checklist:\n")
|
|
1567
|
+
f.write(" - reproduce failure\n")
|
|
1568
|
+
f.write(" - implement minimal fix\n")
|
|
1569
|
+
f.write(" - run focused tests\n")
|
|
1570
|
+
f.write(" - run full regression\n\n")
|
|
1571
|
+
return out
|
|
1572
|
+
|
|
1573
|
+
|
|
1574
|
+
def _write_persistent_state(
|
|
1575
|
+
project_dir: str,
|
|
1576
|
+
*,
|
|
1577
|
+
mode: str,
|
|
1578
|
+
goal: str,
|
|
1579
|
+
context: str,
|
|
1580
|
+
expected_outcome: str,
|
|
1581
|
+
runtime_result: dict[str, Any],
|
|
1582
|
+
) -> str:
|
|
1583
|
+
_ensure_state_layout(project_dir)
|
|
1584
|
+
path = os.path.join(project_dir, ".omg", "state", "persistent-mode.json")
|
|
1585
|
+
payload: dict[str, Any] = {
|
|
1586
|
+
"schema": "PersistentModeState",
|
|
1587
|
+
"mode": mode,
|
|
1588
|
+
"status": "active",
|
|
1589
|
+
"goal": goal,
|
|
1590
|
+
"context": context,
|
|
1591
|
+
"expected_outcome": expected_outcome,
|
|
1592
|
+
"started_at": _now(),
|
|
1593
|
+
"last_updated": _now(),
|
|
1594
|
+
"last_runtime_status": runtime_result.get("status", "unknown"),
|
|
1595
|
+
"history": [],
|
|
1596
|
+
}
|
|
1597
|
+
if os.path.exists(path):
|
|
1598
|
+
try:
|
|
1599
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
1600
|
+
current = json.load(f)
|
|
1601
|
+
if isinstance(current, dict):
|
|
1602
|
+
payload.update(current)
|
|
1603
|
+
except (json.JSONDecodeError, OSError):
|
|
1604
|
+
pass
|
|
1605
|
+
payload.setdefault("history", [])
|
|
1606
|
+
if not isinstance(payload["history"], list):
|
|
1607
|
+
payload["history"] = []
|
|
1608
|
+
payload["mode"] = mode
|
|
1609
|
+
payload["status"] = "active"
|
|
1610
|
+
payload["goal"] = goal
|
|
1611
|
+
payload["context"] = context
|
|
1612
|
+
payload["expected_outcome"] = expected_outcome
|
|
1613
|
+
payload["last_updated"] = _now()
|
|
1614
|
+
payload["last_runtime_status"] = runtime_result.get("status", "unknown")
|
|
1615
|
+
payload["history"].append(
|
|
1616
|
+
{
|
|
1617
|
+
"ts": _now(),
|
|
1618
|
+
"event": "dispatch",
|
|
1619
|
+
"goal": goal,
|
|
1620
|
+
"runtime_status": runtime_result.get("status", "unknown"),
|
|
1621
|
+
}
|
|
1622
|
+
)
|
|
1623
|
+
payload["history"] = payload["history"][-200:]
|
|
1624
|
+
with open(path, "w", encoding="utf-8") as f:
|
|
1625
|
+
json.dump(payload, f, indent=2, ensure_ascii=True)
|
|
1626
|
+
return path
|
|
1627
|
+
|
|
1628
|
+
|
|
1629
|
+
def _run_dual_review(
|
|
1630
|
+
problem: str,
|
|
1631
|
+
context: str,
|
|
1632
|
+
files: list[str],
|
|
1633
|
+
expected_outcome: str,
|
|
1634
|
+
) -> dict[str, Any]:
|
|
1635
|
+
codex_req = TeamDispatchRequest(
|
|
1636
|
+
target="codex",
|
|
1637
|
+
problem=f"review: {problem}",
|
|
1638
|
+
context=context,
|
|
1639
|
+
files=files,
|
|
1640
|
+
expected_outcome=expected_outcome,
|
|
1641
|
+
)
|
|
1642
|
+
ccg_req = TeamDispatchRequest(
|
|
1643
|
+
target="ccg",
|
|
1644
|
+
problem=f"cross-check: {problem}",
|
|
1645
|
+
context=context,
|
|
1646
|
+
files=files,
|
|
1647
|
+
expected_outcome=expected_outcome,
|
|
1648
|
+
)
|
|
1649
|
+
codex = dispatch_team(codex_req).to_dict()
|
|
1650
|
+
ccg = dispatch_team(ccg_req).to_dict()
|
|
1651
|
+
merged_actions: list[str] = []
|
|
1652
|
+
seen = set()
|
|
1653
|
+
for source in (codex.get("actions", []), ccg.get("actions", [])):
|
|
1654
|
+
for action in source:
|
|
1655
|
+
if action not in seen:
|
|
1656
|
+
seen.add(action)
|
|
1657
|
+
merged_actions.append(action)
|
|
1658
|
+
synthesis = {
|
|
1659
|
+
"schema": "ReviewSynthesis",
|
|
1660
|
+
"status": "ok",
|
|
1661
|
+
"tracks": {"codex": codex, "ccg": ccg},
|
|
1662
|
+
"summary": [
|
|
1663
|
+
"Dual-track review executed (codex + ccg).",
|
|
1664
|
+
f"Merged action count: {len(merged_actions)}",
|
|
1665
|
+
],
|
|
1666
|
+
"actions": merged_actions,
|
|
1667
|
+
}
|
|
1668
|
+
return synthesis
|
|
1669
|
+
|
|
1670
|
+
|
|
1671
|
+
def _ensure_plan_artifacts(project_dir: str, goal: str) -> list[str]:
|
|
1672
|
+
_ensure_state_layout(project_dir)
|
|
1673
|
+
plan_path = os.path.join(project_dir, ".omg", "state", "_plan.md")
|
|
1674
|
+
checklist_path = os.path.join(project_dir, ".omg", "state", "_checklist.md")
|
|
1675
|
+
idea_path = os.path.join(project_dir, ".omg", "idea.yml")
|
|
1676
|
+
_write_if_missing(
|
|
1677
|
+
plan_path,
|
|
1678
|
+
"# Deep Plan\n"
|
|
1679
|
+
f"goal: {goal or 'compat planning'}\n"
|
|
1680
|
+
"CHANGE_BUDGET=small\n"
|
|
1681
|
+
"phases:\n"
|
|
1682
|
+
"- foundation\n- implementation\n- verification\n",
|
|
1683
|
+
)
|
|
1684
|
+
_write_if_missing(
|
|
1685
|
+
checklist_path,
|
|
1686
|
+
"- [ ] write failing test\n- [ ] implement minimal fix\n- [ ] run tests\n",
|
|
1687
|
+
)
|
|
1688
|
+
_write_if_missing(
|
|
1689
|
+
idea_path,
|
|
1690
|
+
"goal: \"compat-plan\"\n"
|
|
1691
|
+
"constraints: []\n"
|
|
1692
|
+
"acceptance: []\n"
|
|
1693
|
+
"risk:\n"
|
|
1694
|
+
" security: []\n"
|
|
1695
|
+
" performance: []\n"
|
|
1696
|
+
" compatibility: []\n"
|
|
1697
|
+
"evidence_required:\n"
|
|
1698
|
+
" tests: []\n"
|
|
1699
|
+
" security_scans: []\n"
|
|
1700
|
+
" reproducibility: []\n"
|
|
1701
|
+
" artifacts: []\n",
|
|
1702
|
+
)
|
|
1703
|
+
return [
|
|
1704
|
+
os.path.relpath(plan_path, project_dir),
|
|
1705
|
+
os.path.relpath(checklist_path, project_dir),
|
|
1706
|
+
os.path.relpath(idea_path, project_dir),
|
|
1707
|
+
]
|
|
1708
|
+
|
|
1709
|
+
|
|
1710
|
+
def _ensure_tdd_artifacts(project_dir: str, goal: str) -> list[str]:
|
|
1711
|
+
_ensure_state_layout(project_dir)
|
|
1712
|
+
plan_path = os.path.join(project_dir, ".omg", "state", "_plan.md")
|
|
1713
|
+
checklist_path = os.path.join(project_dir, ".omg", "state", "_checklist.md")
|
|
1714
|
+
idea_path = os.path.join(project_dir, ".omg", "idea.yml")
|
|
1715
|
+
with open(plan_path, "w", encoding="utf-8") as f:
|
|
1716
|
+
f.write(
|
|
1717
|
+
"# TDD Plan\n"
|
|
1718
|
+
f"goal: {goal or 'tdd workflow'}\n"
|
|
1719
|
+
"CHANGE_BUDGET=small\n"
|
|
1720
|
+
"workflow:\n"
|
|
1721
|
+
"- red: write failing test\n"
|
|
1722
|
+
"- green: minimal implementation\n"
|
|
1723
|
+
"- refactor: clean while tests stay green\n"
|
|
1724
|
+
)
|
|
1725
|
+
with open(checklist_path, "w", encoding="utf-8") as f:
|
|
1726
|
+
f.write(
|
|
1727
|
+
"- [ ] red: create failing test for target behavior\n"
|
|
1728
|
+
"- [ ] red: run targeted test and confirm failure reason\n"
|
|
1729
|
+
"- [ ] green: write minimal code to pass test\n"
|
|
1730
|
+
"- [ ] green: re-run targeted test and confirm pass\n"
|
|
1731
|
+
"- [ ] refactor: clean implementation without behavior change\n"
|
|
1732
|
+
"- [ ] verify: run full test suite\n"
|
|
1733
|
+
)
|
|
1734
|
+
_write_if_missing(
|
|
1735
|
+
idea_path,
|
|
1736
|
+
"goal: \"tdd\"\n"
|
|
1737
|
+
"constraints: []\n"
|
|
1738
|
+
"acceptance: []\n"
|
|
1739
|
+
"risk:\n"
|
|
1740
|
+
" security: []\n"
|
|
1741
|
+
" performance: []\n"
|
|
1742
|
+
" compatibility: []\n"
|
|
1743
|
+
"evidence_required:\n"
|
|
1744
|
+
" tests: []\n"
|
|
1745
|
+
" security_scans: []\n"
|
|
1746
|
+
" reproducibility: []\n"
|
|
1747
|
+
" artifacts: []\n",
|
|
1748
|
+
)
|
|
1749
|
+
return [
|
|
1750
|
+
os.path.relpath(plan_path, project_dir),
|
|
1751
|
+
os.path.relpath(checklist_path, project_dir),
|
|
1752
|
+
os.path.relpath(idea_path, project_dir),
|
|
1753
|
+
]
|
|
1754
|
+
|
|
1755
|
+
|
|
1756
|
+
def dispatch_compat_skill(
|
|
1757
|
+
*,
|
|
1758
|
+
skill: str,
|
|
1759
|
+
problem: str = "",
|
|
1760
|
+
context: str = "",
|
|
1761
|
+
files: list[str] | None = None,
|
|
1762
|
+
expected_outcome: str = "",
|
|
1763
|
+
project_dir: str | None = None,
|
|
1764
|
+
) -> dict[str, Any]:
|
|
1765
|
+
normalized = skill.strip()
|
|
1766
|
+
root = _project_dir(project_dir)
|
|
1767
|
+
|
|
1768
|
+
def _emit(payload: dict[str, Any]) -> dict[str, Any]:
|
|
1769
|
+
_append_audit_event(
|
|
1770
|
+
root,
|
|
1771
|
+
{
|
|
1772
|
+
"event": DEFAULT_EVENT_DISPATCH,
|
|
1773
|
+
"skill": normalized,
|
|
1774
|
+
"route": payload.get("route", ""),
|
|
1775
|
+
"status": payload.get("status", "unknown"),
|
|
1776
|
+
"routed_to": payload.get("routed_to", ""),
|
|
1777
|
+
"problem_chars": len(problem),
|
|
1778
|
+
"context_chars": len(context),
|
|
1779
|
+
"file_count": len(files or []),
|
|
1780
|
+
},
|
|
1781
|
+
)
|
|
1782
|
+
return payload
|
|
1783
|
+
|
|
1784
|
+
def _res(**kwargs: Any) -> dict[str, Any]:
|
|
1785
|
+
return _emit(_result(**kwargs))
|
|
1786
|
+
|
|
1787
|
+
if not normalized:
|
|
1788
|
+
return _res(
|
|
1789
|
+
skill=skill,
|
|
1790
|
+
route="unknown",
|
|
1791
|
+
status="error",
|
|
1792
|
+
findings=["Missing skill name."],
|
|
1793
|
+
actions=["Provide --skill value."],
|
|
1794
|
+
)
|
|
1795
|
+
|
|
1796
|
+
route = LEGACY_SKILL_ROUTES.get(normalized)
|
|
1797
|
+
if route is None:
|
|
1798
|
+
return _res(
|
|
1799
|
+
skill=normalized,
|
|
1800
|
+
route="unknown",
|
|
1801
|
+
status="error",
|
|
1802
|
+
findings=[f"Unsupported skill: {normalized}"],
|
|
1803
|
+
actions=["Use `omg compat list` to see supported skill names."],
|
|
1804
|
+
)
|
|
1805
|
+
|
|
1806
|
+
is_valid, reason = validate_compat_request(
|
|
1807
|
+
skill=normalized,
|
|
1808
|
+
problem=problem,
|
|
1809
|
+
context=context,
|
|
1810
|
+
files=files,
|
|
1811
|
+
expected_outcome=expected_outcome,
|
|
1812
|
+
)
|
|
1813
|
+
if not is_valid:
|
|
1814
|
+
return _res(
|
|
1815
|
+
skill=normalized,
|
|
1816
|
+
route=route,
|
|
1817
|
+
status="error",
|
|
1818
|
+
findings=[f"Invalid request: {reason}"],
|
|
1819
|
+
actions=["Adjust inputs and retry."],
|
|
1820
|
+
)
|
|
1821
|
+
|
|
1822
|
+
_append_audit_event(
|
|
1823
|
+
root,
|
|
1824
|
+
{
|
|
1825
|
+
"event": DEFAULT_EVENT_REQUEST,
|
|
1826
|
+
"skill": normalized,
|
|
1827
|
+
"route": route,
|
|
1828
|
+
"problem_chars": len(problem),
|
|
1829
|
+
"context_chars": len(context),
|
|
1830
|
+
"file_count": len(files or []),
|
|
1831
|
+
},
|
|
1832
|
+
)
|
|
1833
|
+
|
|
1834
|
+
msg = problem or f"compat dispatch via {normalized}"
|
|
1835
|
+
file_list = files or []
|
|
1836
|
+
|
|
1837
|
+
if route == "teams":
|
|
1838
|
+
req = TeamDispatchRequest(
|
|
1839
|
+
target="auto",
|
|
1840
|
+
problem=msg,
|
|
1841
|
+
context=context,
|
|
1842
|
+
files=file_list,
|
|
1843
|
+
expected_outcome=expected_outcome,
|
|
1844
|
+
)
|
|
1845
|
+
team = dispatch_team(req).to_dict()
|
|
1846
|
+
return _res(
|
|
1847
|
+
skill=normalized,
|
|
1848
|
+
route=route,
|
|
1849
|
+
routed_to=str(team.get("evidence", {}).get("target", "")),
|
|
1850
|
+
findings=["Team route dispatched."],
|
|
1851
|
+
actions=["Review findings and apply selected actions."],
|
|
1852
|
+
result=team,
|
|
1853
|
+
)
|
|
1854
|
+
|
|
1855
|
+
if route == "ccg":
|
|
1856
|
+
req = TeamDispatchRequest(
|
|
1857
|
+
target="ccg",
|
|
1858
|
+
problem=msg,
|
|
1859
|
+
context=context,
|
|
1860
|
+
files=file_list,
|
|
1861
|
+
expected_outcome=expected_outcome,
|
|
1862
|
+
)
|
|
1863
|
+
ccg = dispatch_team(req).to_dict()
|
|
1864
|
+
return _res(
|
|
1865
|
+
skill=normalized,
|
|
1866
|
+
route=route,
|
|
1867
|
+
routed_to="ccg",
|
|
1868
|
+
findings=["CCG route dispatched."],
|
|
1869
|
+
actions=["Review merged action plan."],
|
|
1870
|
+
result=ccg,
|
|
1871
|
+
)
|
|
1872
|
+
|
|
1873
|
+
if route == "runtime_ship":
|
|
1874
|
+
runtime = dispatch_runtime(
|
|
1875
|
+
"claude",
|
|
1876
|
+
{"goal": msg, "constraints": [], "acceptance": [expected_outcome] if expected_outcome else []},
|
|
1877
|
+
)
|
|
1878
|
+
status = "ok" if runtime.get("status") == "ok" else "error"
|
|
1879
|
+
artifacts: list[str] = []
|
|
1880
|
+
if normalized in {"autopilot", "ralph", "ultrapilot", "ultrawork"}:
|
|
1881
|
+
persistent = _write_persistent_state(
|
|
1882
|
+
root,
|
|
1883
|
+
mode=normalized,
|
|
1884
|
+
goal=msg,
|
|
1885
|
+
context=context,
|
|
1886
|
+
expected_outcome=expected_outcome,
|
|
1887
|
+
runtime_result=runtime,
|
|
1888
|
+
)
|
|
1889
|
+
artifacts.append(os.path.relpath(persistent, root))
|
|
1890
|
+
if normalized == "release" and status == "ok":
|
|
1891
|
+
rel = _write_release_artifact(root, msg)
|
|
1892
|
+
artifacts.append(os.path.relpath(rel, root))
|
|
1893
|
+
if normalized == "build-fix" and status == "ok":
|
|
1894
|
+
build_fix = _write_build_fix_artifact(root, msg)
|
|
1895
|
+
artifacts.append(os.path.relpath(build_fix, root))
|
|
1896
|
+
return _res(
|
|
1897
|
+
skill=normalized,
|
|
1898
|
+
route=route,
|
|
1899
|
+
status=status,
|
|
1900
|
+
routed_to="claude",
|
|
1901
|
+
findings=["Runtime dispatch completed." if status == "ok" else "Runtime dispatch failed."],
|
|
1902
|
+
actions=[
|
|
1903
|
+
"Inspect runtime response and continue.",
|
|
1904
|
+
"If persistent mode is active, keep iterating until checklist completion.",
|
|
1905
|
+
],
|
|
1906
|
+
result=runtime,
|
|
1907
|
+
artifacts=artifacts,
|
|
1908
|
+
)
|
|
1909
|
+
|
|
1910
|
+
if route == "pipeline":
|
|
1911
|
+
pipeline = run_pipeline(
|
|
1912
|
+
{
|
|
1913
|
+
"dataset": {"source": "clean-source", "license": "mit"},
|
|
1914
|
+
"base_model": {"source": "open-model", "allow_distill": True},
|
|
1915
|
+
"target_metric": 0.7,
|
|
1916
|
+
"simulated_metric": 0.8,
|
|
1917
|
+
"evaluation_notes": f"compat:{normalized}",
|
|
1918
|
+
}
|
|
1919
|
+
)
|
|
1920
|
+
status = "ok" if pipeline.get("status") in {"ready", "published"} else "error"
|
|
1921
|
+
return _res(
|
|
1922
|
+
skill=normalized,
|
|
1923
|
+
route=route,
|
|
1924
|
+
status=status,
|
|
1925
|
+
findings=["Pipeline route executed."],
|
|
1926
|
+
actions=["Use `omg lab eval` when evaluation is ready."],
|
|
1927
|
+
result=pipeline,
|
|
1928
|
+
)
|
|
1929
|
+
|
|
1930
|
+
if route == "memory":
|
|
1931
|
+
if normalized == "project-session-manager":
|
|
1932
|
+
session = _update_session_state(root, msg)
|
|
1933
|
+
return _res(
|
|
1934
|
+
skill=normalized,
|
|
1935
|
+
route=route,
|
|
1936
|
+
findings=["Session state updated."],
|
|
1937
|
+
actions=["Use session state to continue long-running work."],
|
|
1938
|
+
artifacts=[os.path.relpath(session, root)],
|
|
1939
|
+
)
|
|
1940
|
+
if normalized == "writer-memory":
|
|
1941
|
+
writer = _append_knowledge_note(
|
|
1942
|
+
root,
|
|
1943
|
+
"knowledge/writer-memory.md",
|
|
1944
|
+
f"- [{_now()}] {msg}",
|
|
1945
|
+
)
|
|
1946
|
+
return _res(
|
|
1947
|
+
skill=normalized,
|
|
1948
|
+
route=route,
|
|
1949
|
+
findings=["Writer memory updated."],
|
|
1950
|
+
actions=["Reuse writer-memory notes for long-form drafting."],
|
|
1951
|
+
artifacts=[os.path.relpath(writer, root)],
|
|
1952
|
+
)
|
|
1953
|
+
if normalized == "note":
|
|
1954
|
+
note = _append_knowledge_note(
|
|
1955
|
+
root,
|
|
1956
|
+
"knowledge/notes.md",
|
|
1957
|
+
f"- [{_now()}] {msg}",
|
|
1958
|
+
)
|
|
1959
|
+
return _res(
|
|
1960
|
+
skill=normalized,
|
|
1961
|
+
route=route,
|
|
1962
|
+
findings=["Note appended to knowledge log."],
|
|
1963
|
+
actions=["Review notes during planning and handoff."],
|
|
1964
|
+
artifacts=[os.path.relpath(note, root)],
|
|
1965
|
+
)
|
|
1966
|
+
wm_path = _append_memory(root, msg)
|
|
1967
|
+
return _res(
|
|
1968
|
+
skill=normalized,
|
|
1969
|
+
route=route,
|
|
1970
|
+
findings=["Working memory updated."],
|
|
1971
|
+
actions=["Continue work with refreshed context."],
|
|
1972
|
+
artifacts=[os.path.relpath(wm_path, root)],
|
|
1973
|
+
)
|
|
1974
|
+
|
|
1975
|
+
if route == "init":
|
|
1976
|
+
artifacts = _init_bootstrap(root, msg)
|
|
1977
|
+
return _res(
|
|
1978
|
+
skill=normalized,
|
|
1979
|
+
route=route,
|
|
1980
|
+
findings=["OMG layout initialized."],
|
|
1981
|
+
actions=["Run `omg compat run --skill omg-doctor` to verify health."],
|
|
1982
|
+
artifacts=artifacts,
|
|
1983
|
+
)
|
|
1984
|
+
|
|
1985
|
+
if route == "health":
|
|
1986
|
+
if normalized == "omg-doctor":
|
|
1987
|
+
doctor_result = run_doctor(root_dir=Path(root))
|
|
1988
|
+
verdict_receipt = _normalize_verdict_payload({
|
|
1989
|
+
"status": doctor_result.get("status", "pending"),
|
|
1990
|
+
"verdict": doctor_result.get("verdict", doctor_result.get("status", "pending")),
|
|
1991
|
+
"blockers": doctor_result.get("verdict_receipt", {}).get("blockers", []),
|
|
1992
|
+
"planned_actions": doctor_result.get("verdict_receipt", {}).get("planned_actions", []),
|
|
1993
|
+
"executed_actions": doctor_result.get("verdict_receipt", {}).get("executed_actions", []),
|
|
1994
|
+
"provenance": "runtime.compat.dispatch_compat_skill",
|
|
1995
|
+
"evidence_paths": doctor_result.get("verdict_receipt", {}).get("evidence_paths", {}),
|
|
1996
|
+
"next_steps": doctor_result.get("verdict_receipt", {}).get("next_steps", []),
|
|
1997
|
+
"executed": True,
|
|
1998
|
+
"metadata": {
|
|
1999
|
+
"checks": doctor_result.get("checks", []),
|
|
2000
|
+
},
|
|
2001
|
+
})
|
|
2002
|
+
snapshot = {
|
|
2003
|
+
"project_dir": root,
|
|
2004
|
+
"status": doctor_result["status"],
|
|
2005
|
+
"verdict": doctor_result.get("verdict", doctor_result["status"]),
|
|
2006
|
+
"checks": doctor_result["checks"],
|
|
2007
|
+
"verdict_receipt": verdict_receipt,
|
|
2008
|
+
}
|
|
2009
|
+
return _res(
|
|
2010
|
+
skill=normalized,
|
|
2011
|
+
route=route,
|
|
2012
|
+
findings=["Doctor verification completed."],
|
|
2013
|
+
actions=["Fix any blocker checks before shipping."],
|
|
2014
|
+
result=snapshot,
|
|
2015
|
+
)
|
|
2016
|
+
snapshot = _health_snapshot(root)
|
|
2017
|
+
return _res(
|
|
2018
|
+
skill=normalized,
|
|
2019
|
+
route=route,
|
|
2020
|
+
findings=["Health snapshot generated."],
|
|
2021
|
+
actions=["Create missing .omg folders if any field is false."],
|
|
2022
|
+
result=snapshot,
|
|
2023
|
+
)
|
|
2024
|
+
|
|
2025
|
+
if route == "help":
|
|
2026
|
+
return _res(
|
|
2027
|
+
skill=normalized,
|
|
2028
|
+
route=route,
|
|
2029
|
+
findings=["Compatibility help generated."],
|
|
2030
|
+
actions=["Run `omg compat list`, `omg compat contract --all`, then `omg compat run --skill <name>`."],
|
|
2031
|
+
result={
|
|
2032
|
+
"supported_skills": list_compat_skills(),
|
|
2033
|
+
"gap_report_hint": DEFAULT_GAP_REPORT_PATH,
|
|
2034
|
+
},
|
|
2035
|
+
)
|
|
2036
|
+
|
|
2037
|
+
if route == "review":
|
|
2038
|
+
if normalized in {"review", "code-review", "ultraqa"}:
|
|
2039
|
+
review = _run_dual_review(msg, context, file_list, expected_outcome)
|
|
2040
|
+
routed_to = "codex+ccg"
|
|
2041
|
+
findings = ["Dual-track review route dispatched."]
|
|
2042
|
+
else:
|
|
2043
|
+
req = TeamDispatchRequest(
|
|
2044
|
+
target="codex",
|
|
2045
|
+
problem=f"review: {msg}",
|
|
2046
|
+
context=context,
|
|
2047
|
+
files=file_list,
|
|
2048
|
+
expected_outcome=expected_outcome,
|
|
2049
|
+
)
|
|
2050
|
+
review = dispatch_team(req).to_dict()
|
|
2051
|
+
routed_to = "codex"
|
|
2052
|
+
findings = ["Review route dispatched."]
|
|
2053
|
+
return _res(
|
|
2054
|
+
skill=normalized,
|
|
2055
|
+
route=route,
|
|
2056
|
+
routed_to=routed_to,
|
|
2057
|
+
findings=findings,
|
|
2058
|
+
actions=["Address high-risk findings first."],
|
|
2059
|
+
result=review,
|
|
2060
|
+
)
|
|
2061
|
+
|
|
2062
|
+
if route == "plan":
|
|
2063
|
+
if normalized == "tdd":
|
|
2064
|
+
artifacts = _ensure_tdd_artifacts(root, msg)
|
|
2065
|
+
findings = ["TDD artifacts are ready (red-green-refactor)."]
|
|
2066
|
+
actions = ["Execute checklist in strict red -> green -> refactor order."]
|
|
2067
|
+
else:
|
|
2068
|
+
artifacts = _ensure_plan_artifacts(root, msg)
|
|
2069
|
+
findings = ["Plan artifacts are ready."]
|
|
2070
|
+
actions = ["Refine _plan/_checklist then execute with evidence."]
|
|
2071
|
+
return _res(
|
|
2072
|
+
skill=normalized,
|
|
2073
|
+
route=route,
|
|
2074
|
+
findings=findings,
|
|
2075
|
+
actions=actions,
|
|
2076
|
+
artifacts=artifacts,
|
|
2077
|
+
)
|
|
2078
|
+
|
|
2079
|
+
if route == "secure":
|
|
2080
|
+
decision = evaluate_bash_command(problem or "echo safe")
|
|
2081
|
+
return _res(
|
|
2082
|
+
skill=normalized,
|
|
2083
|
+
route=route,
|
|
2084
|
+
findings=["Security policy evaluation completed."],
|
|
2085
|
+
actions=["If action is deny, revise command and retry."],
|
|
2086
|
+
result=decision.to_dict(),
|
|
2087
|
+
)
|
|
2088
|
+
|
|
2089
|
+
if route == "security_check":
|
|
2090
|
+
check = run_security_check(
|
|
2091
|
+
project_dir=root,
|
|
2092
|
+
scope=msg or ".",
|
|
2093
|
+
include_live_enrichment=False,
|
|
2094
|
+
)
|
|
2095
|
+
return _res(
|
|
2096
|
+
skill=normalized,
|
|
2097
|
+
route=route,
|
|
2098
|
+
findings=["Canonical OMG security check completed."],
|
|
2099
|
+
actions=["Review high-severity findings before ship/release."],
|
|
2100
|
+
result=check,
|
|
2101
|
+
)
|
|
2102
|
+
|
|
2103
|
+
if route == "learn":
|
|
2104
|
+
if normalized in {"learn-about-omg", "learner", "skill"}:
|
|
2105
|
+
learn_path = _write_learning_artifact(root, normalized, msg, context)
|
|
2106
|
+
return _res(
|
|
2107
|
+
skill=normalized,
|
|
2108
|
+
route=route,
|
|
2109
|
+
findings=["Learning artifact recorded."],
|
|
2110
|
+
actions=["Promote stable patterns from learning artifacts into reusable commands/skills."],
|
|
2111
|
+
artifacts=[os.path.relpath(learn_path, root)],
|
|
2112
|
+
)
|
|
2113
|
+
note_path = _append_memory(root, f"learn: {msg}")
|
|
2114
|
+
return _res(
|
|
2115
|
+
skill=normalized,
|
|
2116
|
+
route=route,
|
|
2117
|
+
findings=["Learning note recorded."],
|
|
2118
|
+
actions=["Review .omg/state/working-memory.md for accumulated insights."],
|
|
2119
|
+
artifacts=[os.path.relpath(note_path, root)],
|
|
2120
|
+
)
|
|
2121
|
+
|
|
2122
|
+
if route == "maintainer":
|
|
2123
|
+
if normalized in {"analyze", "trace", "sci-omg"}:
|
|
2124
|
+
artifact = _write_analysis_artifact(root, normalized, msg, context, file_list)
|
|
2125
|
+
return _res(
|
|
2126
|
+
skill=normalized,
|
|
2127
|
+
route=route,
|
|
2128
|
+
findings=["Analysis artifact generated."],
|
|
2129
|
+
actions=["Use findings to create targeted fix/review tasks."],
|
|
2130
|
+
artifacts=[os.path.relpath(artifact, root)],
|
|
2131
|
+
)
|
|
2132
|
+
artifact = _write_maintainer_artifact(root, normalized, msg)
|
|
2133
|
+
return _res(
|
|
2134
|
+
skill=normalized,
|
|
2135
|
+
route=route,
|
|
2136
|
+
findings=["Maintainer artifact generated."],
|
|
2137
|
+
actions=["Attach artifact to maintainer workflow or release notes."],
|
|
2138
|
+
artifacts=[os.path.relpath(artifact, root)],
|
|
2139
|
+
)
|
|
2140
|
+
|
|
2141
|
+
if route == "cancel":
|
|
2142
|
+
active_path = os.path.join(root, ".omg", "shadow", "active-run")
|
|
2143
|
+
if os.path.exists(active_path):
|
|
2144
|
+
os.remove(active_path)
|
|
2145
|
+
persistent_path = os.path.join(root, ".omg", "state", "persistent-mode.json")
|
|
2146
|
+
if os.path.exists(persistent_path):
|
|
2147
|
+
try:
|
|
2148
|
+
with open(persistent_path, "r", encoding="utf-8") as f:
|
|
2149
|
+
persistent = json.load(f)
|
|
2150
|
+
if isinstance(persistent, dict):
|
|
2151
|
+
persistent["status"] = "cancelled"
|
|
2152
|
+
persistent["last_updated"] = _now()
|
|
2153
|
+
with open(persistent_path, "w", encoding="utf-8") as f:
|
|
2154
|
+
json.dump(persistent, f, indent=2, ensure_ascii=True)
|
|
2155
|
+
except (json.JSONDecodeError, OSError):
|
|
2156
|
+
pass
|
|
2157
|
+
return _res(
|
|
2158
|
+
skill=normalized,
|
|
2159
|
+
route=route,
|
|
2160
|
+
findings=["Active run state cleared."],
|
|
2161
|
+
actions=["Start a new run when ready."],
|
|
2162
|
+
)
|
|
2163
|
+
|
|
2164
|
+
return _res(
|
|
2165
|
+
skill=normalized,
|
|
2166
|
+
route=route,
|
|
2167
|
+
status="error",
|
|
2168
|
+
findings=["Route exists but has no handler."],
|
|
2169
|
+
actions=["Implement missing handler."],
|
|
2170
|
+
)
|
|
2171
|
+
|
|
2172
|
+
|
|
2173
|
+
def list_omg_skills() -> list[str]:
|
|
2174
|
+
return list_compat_skills()
|
|
2175
|
+
|
|
2176
|
+
|
|
2177
|
+
def list_omg_skill_contracts() -> list[dict[str, Any]]:
|
|
2178
|
+
return list_compat_skill_contracts()
|
|
2179
|
+
|
|
2180
|
+
|
|
2181
|
+
def get_omg_skill_contract(skill: str) -> dict[str, Any] | None:
|
|
2182
|
+
return get_compat_skill_contract(skill)
|
|
2183
|
+
|
|
2184
|
+
|
|
2185
|
+
def build_omg_gap_report(project_dir: str | None = None) -> dict[str, Any]:
|
|
2186
|
+
return build_compat_gap_report(project_dir)
|
|
2187
|
+
|
|
2188
|
+
|
|
2189
|
+
def validate_omg_request(
|
|
2190
|
+
*,
|
|
2191
|
+
skill: str,
|
|
2192
|
+
problem: str,
|
|
2193
|
+
context: str,
|
|
2194
|
+
files: list[str] | None,
|
|
2195
|
+
expected_outcome: str,
|
|
2196
|
+
) -> tuple[bool, str]:
|
|
2197
|
+
return validate_compat_request(
|
|
2198
|
+
skill=skill,
|
|
2199
|
+
problem=problem,
|
|
2200
|
+
context=context,
|
|
2201
|
+
files=files,
|
|
2202
|
+
expected_outcome=expected_outcome,
|
|
2203
|
+
)
|
|
2204
|
+
|
|
2205
|
+
|
|
2206
|
+
def dispatch_omg_skill(
|
|
2207
|
+
*,
|
|
2208
|
+
skill: str,
|
|
2209
|
+
problem: str = "",
|
|
2210
|
+
context: str = "",
|
|
2211
|
+
files: list[str] | None = None,
|
|
2212
|
+
expected_outcome: str = "",
|
|
2213
|
+
project_dir: str | None = None,
|
|
2214
|
+
) -> dict[str, Any]:
|
|
2215
|
+
return dispatch_compat_skill(
|
|
2216
|
+
skill=skill,
|
|
2217
|
+
problem=problem,
|
|
2218
|
+
context=context,
|
|
2219
|
+
files=files,
|
|
2220
|
+
expected_outcome=expected_outcome,
|
|
2221
|
+
project_dir=project_dir,
|
|
2222
|
+
)
|