@trac3er/oh-my-god 2.0.3 → 2.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.agents/skills/omg/AGENTS.fragment.md +5 -0
- package/.agents/skills/omg/codex-mcp.toml +4 -0
- package/.agents/skills/omg/control-plane/SKILL.md +11 -0
- package/.agents/skills/omg/control-plane/openai.yaml +14 -0
- package/.agents/skills/omg/hook-governor/SKILL.md +11 -0
- package/.agents/skills/omg/hook-governor/openai.yaml +11 -0
- package/.agents/skills/omg/lsp-pack/SKILL.md +11 -0
- package/.agents/skills/omg/lsp-pack/openai.yaml +11 -0
- package/.agents/skills/omg/mcp-fabric/SKILL.md +11 -0
- package/.agents/skills/omg/mcp-fabric/openai.yaml +13 -0
- package/.agents/skills/omg/secure-worktree-pipeline/SKILL.md +11 -0
- package/.agents/skills/omg/secure-worktree-pipeline/openai.yaml +12 -0
- package/.claude-plugin/marketplace.json +3 -3
- package/.claude-plugin/plugin.json +1 -1
- package/.mcp.json +20 -4
- package/CHANGELOG.md +10 -0
- package/OMG-setup.sh +9 -3
- package/OMG_COMPAT_CONTRACT.md +92 -0
- package/README.md +24 -4
- package/SECURITY.md +6 -0
- package/commands/OMG:api-twin.md +22 -0
- package/commands/OMG:preflight.md +26 -0
- package/commands/OMG:security-check.md +28 -0
- package/dist/enterprise/bundle/.agents/skills/omg/AGENTS.fragment.md +5 -0
- package/dist/enterprise/bundle/.agents/skills/omg/codex-mcp.toml +4 -0
- package/dist/enterprise/bundle/.agents/skills/omg/control-plane/SKILL.md +11 -0
- package/dist/enterprise/bundle/.agents/skills/omg/control-plane/openai.yaml +14 -0
- package/dist/enterprise/bundle/.agents/skills/omg/hook-governor/SKILL.md +11 -0
- package/dist/enterprise/bundle/.agents/skills/omg/hook-governor/openai.yaml +11 -0
- package/dist/enterprise/bundle/.agents/skills/omg/lsp-pack/SKILL.md +11 -0
- package/dist/enterprise/bundle/.agents/skills/omg/lsp-pack/openai.yaml +11 -0
- package/dist/enterprise/bundle/.agents/skills/omg/mcp-fabric/SKILL.md +11 -0
- package/dist/enterprise/bundle/.agents/skills/omg/mcp-fabric/openai.yaml +13 -0
- package/dist/enterprise/bundle/.agents/skills/omg/secure-worktree-pipeline/SKILL.md +11 -0
- package/dist/enterprise/bundle/.agents/skills/omg/secure-worktree-pipeline/openai.yaml +12 -0
- package/dist/enterprise/bundle/.claude-plugin/marketplace.json +36 -0
- package/dist/enterprise/bundle/.claude-plugin/plugin.json +23 -0
- package/dist/enterprise/bundle/.mcp.json +40 -0
- package/dist/enterprise/bundle/OMG_COMPAT_CONTRACT.md +92 -0
- package/dist/enterprise/bundle/settings.json +366 -0
- package/dist/enterprise/manifest.json +99 -0
- package/dist/public/bundle/.agents/skills/omg/AGENTS.fragment.md +5 -0
- package/dist/public/bundle/.agents/skills/omg/codex-mcp.toml +4 -0
- package/dist/public/bundle/.agents/skills/omg/control-plane/SKILL.md +11 -0
- package/dist/public/bundle/.agents/skills/omg/control-plane/openai.yaml +14 -0
- package/dist/public/bundle/.agents/skills/omg/hook-governor/SKILL.md +11 -0
- package/dist/public/bundle/.agents/skills/omg/hook-governor/openai.yaml +11 -0
- package/dist/public/bundle/.agents/skills/omg/lsp-pack/SKILL.md +11 -0
- package/dist/public/bundle/.agents/skills/omg/lsp-pack/openai.yaml +11 -0
- package/dist/public/bundle/.agents/skills/omg/mcp-fabric/SKILL.md +11 -0
- package/dist/public/bundle/.agents/skills/omg/mcp-fabric/openai.yaml +13 -0
- package/dist/public/bundle/.agents/skills/omg/secure-worktree-pipeline/SKILL.md +11 -0
- package/dist/public/bundle/.agents/skills/omg/secure-worktree-pipeline/openai.yaml +12 -0
- package/dist/public/bundle/.claude-plugin/marketplace.json +36 -0
- package/dist/public/bundle/.claude-plugin/plugin.json +23 -0
- package/dist/public/bundle/.mcp.json +40 -0
- package/dist/public/bundle/OMG_COMPAT_CONTRACT.md +92 -0
- package/dist/public/bundle/settings.json +366 -0
- package/dist/public/manifest.json +99 -0
- package/hooks/policy_engine.py +38 -7
- package/hooks/post-write.py +1 -1
- package/hooks/prompt-enhancer.py +1 -1
- package/hooks/security_validators.py +75 -0
- package/hooks/setup_wizard.py +43 -8
- package/hooks/shadow_manager.py +22 -2
- package/package.json +1 -1
- package/plugins/README.md +3 -1
- package/plugins/advanced/commands/OMG:deep-plan.md +1 -1
- package/plugins/advanced/commands/OMG:security-review.md +10 -113
- package/plugins/advanced/commands/OMG:ship.md +1 -1
- package/plugins/advanced/plugin.json +1 -10
- package/plugins/core/plugin.json +25 -2
- package/pyproject.toml +1 -1
- package/runtime/adoption.py +1 -1
- package/runtime/api_twin.py +130 -0
- package/runtime/compat.py +21 -1
- package/runtime/contract_compiler.py +698 -0
- package/runtime/domain_packs.py +34 -0
- package/runtime/guide_assert.py +45 -0
- package/runtime/mcp_config_writers.py +147 -30
- package/runtime/omg_compat_contract_snapshot.json +8 -7
- package/runtime/omg_contract_snapshot.json +8 -7
- package/runtime/omg_mcp_server.py +205 -0
- package/runtime/preflight.py +52 -0
- package/runtime/providers/codex_provider.py +2 -12
- package/runtime/providers/gemini_provider.py +2 -21
- package/runtime/providers/kimi_provider.py +2 -21
- package/runtime/runtime_profile.py +61 -0
- package/runtime/security_check.py +347 -0
- package/runtime/subagent_dispatcher.py +117 -10
- package/runtime/team_router.py +3 -1
- package/runtime/untrusted_content.py +102 -0
- package/scripts/omg.py +174 -1
- package/settings.json +66 -18
- package/tools/python_repl.py +33 -3
|
@@ -2,7 +2,6 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
import json
|
|
6
5
|
import logging
|
|
7
6
|
import os
|
|
8
7
|
import shlex
|
|
@@ -12,6 +11,7 @@ import uuid
|
|
|
12
11
|
from typing import Any
|
|
13
12
|
|
|
14
13
|
from runtime.cli_provider import CLIProvider, register_provider
|
|
14
|
+
from runtime.mcp_config_writers import write_gemini_mcp_config
|
|
15
15
|
from runtime.tmux_session_manager import TmuxSessionManager
|
|
16
16
|
|
|
17
17
|
_logger = logging.getLogger(__name__)
|
|
@@ -102,26 +102,7 @@ class GeminiProvider(CLIProvider):
|
|
|
102
102
|
Uses JSON format with ``mcpServers`` key and ``httpUrl`` field,
|
|
103
103
|
merging into any existing configuration.
|
|
104
104
|
"""
|
|
105
|
-
config_path
|
|
106
|
-
os.makedirs(os.path.dirname(config_path), exist_ok=True)
|
|
107
|
-
|
|
108
|
-
# Load existing config or start fresh
|
|
109
|
-
existing: dict[str, Any] = {} # pyright: ignore[reportExplicitAny]
|
|
110
|
-
if os.path.exists(config_path):
|
|
111
|
-
with open(config_path) as fh:
|
|
112
|
-
try:
|
|
113
|
-
existing = json.load(fh)
|
|
114
|
-
except (json.JSONDecodeError, ValueError):
|
|
115
|
-
existing = {}
|
|
116
|
-
|
|
117
|
-
# Ensure mcpServers dict exists
|
|
118
|
-
if "mcpServers" not in existing:
|
|
119
|
-
existing["mcpServers"] = {}
|
|
120
|
-
|
|
121
|
-
existing["mcpServers"][server_name] = {"httpUrl": server_url}
|
|
122
|
-
|
|
123
|
-
with open(config_path, "w") as fh:
|
|
124
|
-
json.dump(existing, fh, indent=2)
|
|
105
|
+
write_gemini_mcp_config(server_url, server_name, config_path=self.get_config_path())
|
|
125
106
|
|
|
126
107
|
|
|
127
108
|
# -- auto-register on import -----------------------------------------------
|
|
@@ -2,7 +2,6 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
import json
|
|
6
5
|
import logging
|
|
7
6
|
import os
|
|
8
7
|
import shlex
|
|
@@ -12,6 +11,7 @@ import uuid
|
|
|
12
11
|
from typing import Any
|
|
13
12
|
|
|
14
13
|
from runtime.cli_provider import CLIProvider, register_provider
|
|
14
|
+
from runtime.mcp_config_writers import write_kimi_mcp_config
|
|
15
15
|
from runtime.tmux_session_manager import TmuxSessionManager
|
|
16
16
|
|
|
17
17
|
_logger = logging.getLogger(__name__)
|
|
@@ -125,26 +125,7 @@ class KimiCodeProvider(CLIProvider):
|
|
|
125
125
|
Uses standard ``mcpServers`` JSON format with ``type: "http"`` and ``url`` field,
|
|
126
126
|
merging into any existing configuration.
|
|
127
127
|
"""
|
|
128
|
-
config_path
|
|
129
|
-
os.makedirs(os.path.dirname(config_path), exist_ok=True)
|
|
130
|
-
|
|
131
|
-
# Load existing config or start fresh
|
|
132
|
-
existing: dict[str, Any] = {} # pyright: ignore[reportExplicitAny]
|
|
133
|
-
if os.path.exists(config_path):
|
|
134
|
-
with open(config_path) as fh:
|
|
135
|
-
try:
|
|
136
|
-
existing = json.load(fh)
|
|
137
|
-
except (json.JSONDecodeError, ValueError):
|
|
138
|
-
existing = {}
|
|
139
|
-
|
|
140
|
-
# Ensure mcpServers dict exists
|
|
141
|
-
if "mcpServers" not in existing:
|
|
142
|
-
existing["mcpServers"] = {}
|
|
143
|
-
|
|
144
|
-
existing["mcpServers"][server_name] = {"type": "http", "url": server_url}
|
|
145
|
-
|
|
146
|
-
with open(config_path, "w") as fh:
|
|
147
|
-
json.dump(existing, fh, indent=2)
|
|
128
|
+
write_kimi_mcp_config(server_url, server_name, config_path=self.get_config_path())
|
|
148
129
|
|
|
149
130
|
|
|
150
131
|
# -- auto-register on import -----------------------------------------------
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""Runtime profile loading and parallelism budgets."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
import yaml
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
PROFILE_PRESETS: dict[str, dict[str, Any]] = {
|
|
11
|
+
"eco": {"profile": "eco", "max_workers": 2, "background_polling": False},
|
|
12
|
+
"balanced": {"profile": "balanced", "max_workers": 3, "background_polling": False},
|
|
13
|
+
"turbo": {"profile": "turbo", "max_workers": 5, "background_polling": True},
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def load_runtime_profile(project_dir: str) -> dict[str, Any]:
|
|
18
|
+
runtime_path = Path(project_dir) / ".omg" / "runtime.yaml"
|
|
19
|
+
profile_name = "balanced"
|
|
20
|
+
if runtime_path.exists():
|
|
21
|
+
try:
|
|
22
|
+
payload = yaml.safe_load(runtime_path.read_text(encoding="utf-8")) or {}
|
|
23
|
+
except Exception:
|
|
24
|
+
payload = {}
|
|
25
|
+
if isinstance(payload, dict):
|
|
26
|
+
candidate = str(payload.get("profile", profile_name)).strip()
|
|
27
|
+
if candidate in PROFILE_PRESETS:
|
|
28
|
+
profile_name = candidate
|
|
29
|
+
return dict(PROFILE_PRESETS[profile_name])
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def resolve_parallel_workers(project_dir: str, *, requested_workers: int) -> int:
|
|
33
|
+
profile = load_runtime_profile(project_dir)
|
|
34
|
+
max_workers = int(profile["max_workers"])
|
|
35
|
+
cli_cap = _load_cli_parallel_cap(project_dir)
|
|
36
|
+
if cli_cap is not None:
|
|
37
|
+
max_workers = min(max_workers, cli_cap)
|
|
38
|
+
return max(1, min(requested_workers, max_workers))
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _load_cli_parallel_cap(project_dir: str) -> int | None:
|
|
42
|
+
config_path = Path(project_dir) / ".omg" / "state" / "cli-config.yaml"
|
|
43
|
+
if not config_path.exists():
|
|
44
|
+
return None
|
|
45
|
+
try:
|
|
46
|
+
payload = yaml.safe_load(config_path.read_text(encoding="utf-8")) or {}
|
|
47
|
+
except Exception:
|
|
48
|
+
return None
|
|
49
|
+
if not isinstance(payload, dict):
|
|
50
|
+
return None
|
|
51
|
+
cli_configs = payload.get("cli_configs", {})
|
|
52
|
+
if not isinstance(cli_configs, dict):
|
|
53
|
+
return None
|
|
54
|
+
caps = []
|
|
55
|
+
for config in cli_configs.values():
|
|
56
|
+
if not isinstance(config, dict):
|
|
57
|
+
continue
|
|
58
|
+
value = config.get("max_parallel_agents")
|
|
59
|
+
if isinstance(value, int) and value > 0:
|
|
60
|
+
caps.append(value)
|
|
61
|
+
return min(caps) if caps else None
|
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
"""Canonical OMG security check engine."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import ast
|
|
5
|
+
from collections import Counter
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
import subprocess
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from hooks.security_validators import ensure_path_within_dir
|
|
11
|
+
from plugins.dephealth.cve_scanner import scan_for_cves
|
|
12
|
+
from plugins.dephealth.manifest_detector import detect_manifests
|
|
13
|
+
from plugins.dephealth.vuln_analyzer import analyze_reachability
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
SEVERITY_ORDER = {
|
|
17
|
+
"critical": 0,
|
|
18
|
+
"high": 1,
|
|
19
|
+
"medium": 2,
|
|
20
|
+
"low": 3,
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
_PYTHON_AST_RULES: tuple[tuple[str, str, str, str], ...] = (
|
|
24
|
+
("B602", "subprocess-shell-true", "high", "Avoid shell=True in subprocess calls."),
|
|
25
|
+
("B307", "eval-use", "high", "Replace eval with explicit parsing."),
|
|
26
|
+
("B102", "exec-use", "high", "Replace exec with explicit control flow."),
|
|
27
|
+
("B301", "pickle-load", "high", "Avoid unsafe deserialization of pickle payloads."),
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def run_security_check(
|
|
32
|
+
*,
|
|
33
|
+
project_dir: str,
|
|
34
|
+
scope: str = ".",
|
|
35
|
+
include_live_enrichment: bool = False,
|
|
36
|
+
) -> dict[str, Any]:
|
|
37
|
+
scope_path = _resolve_scope(project_dir, scope)
|
|
38
|
+
findings: list[dict[str, Any]] = []
|
|
39
|
+
|
|
40
|
+
findings.extend(_scan_python_ast(scope_path))
|
|
41
|
+
findings.extend(_scan_dependency_health(scope_path, include_live_enrichment))
|
|
42
|
+
findings.sort(key=lambda finding: (SEVERITY_ORDER.get(finding["severity"], 99), finding["id"]))
|
|
43
|
+
|
|
44
|
+
severity_counts = Counter(finding["severity"] for finding in findings)
|
|
45
|
+
source_counts = Counter(finding["source"] for finding in findings)
|
|
46
|
+
relative_scope = _display_scope(project_dir, scope_path)
|
|
47
|
+
return {
|
|
48
|
+
"schema": "SecurityCheckResult",
|
|
49
|
+
"status": "ok",
|
|
50
|
+
"scope": relative_scope,
|
|
51
|
+
"findings": findings,
|
|
52
|
+
"summary": {
|
|
53
|
+
"finding_count": len(findings),
|
|
54
|
+
"by_severity": dict(sorted(severity_counts.items())),
|
|
55
|
+
"by_source": dict(sorted(source_counts.items())),
|
|
56
|
+
"live_enrichment": include_live_enrichment,
|
|
57
|
+
},
|
|
58
|
+
"provenance": [],
|
|
59
|
+
"trust_scores": {},
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _resolve_scope(project_dir: str, scope: str) -> Path:
|
|
64
|
+
if not scope:
|
|
65
|
+
return Path(project_dir).resolve()
|
|
66
|
+
candidate = Path(scope)
|
|
67
|
+
if candidate.is_absolute():
|
|
68
|
+
return candidate.resolve()
|
|
69
|
+
base = Path(project_dir).resolve()
|
|
70
|
+
resolved = Path(ensure_path_within_dir(base, base / candidate))
|
|
71
|
+
return resolved
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _display_scope(project_dir: str, scope_path: Path) -> str:
|
|
75
|
+
base = Path(project_dir).resolve()
|
|
76
|
+
try:
|
|
77
|
+
return scope_path.relative_to(base).as_posix() or "."
|
|
78
|
+
except ValueError:
|
|
79
|
+
return str(scope_path)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _scan_python_ast(scope_path: Path) -> list[dict[str, Any]]:
|
|
83
|
+
findings: list[dict[str, Any]] = []
|
|
84
|
+
for py_file in _iter_python_files(scope_path):
|
|
85
|
+
try:
|
|
86
|
+
source = py_file.read_text(encoding="utf-8")
|
|
87
|
+
except OSError:
|
|
88
|
+
continue
|
|
89
|
+
findings.extend(_scan_python_file(py_file, source))
|
|
90
|
+
findings.extend(_run_bandit_if_available(scope_path))
|
|
91
|
+
return findings
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _iter_python_files(scope_path: Path) -> list[Path]:
|
|
95
|
+
if scope_path.is_file():
|
|
96
|
+
return [scope_path] if scope_path.suffix == ".py" else []
|
|
97
|
+
if not scope_path.exists():
|
|
98
|
+
return []
|
|
99
|
+
return sorted(path for path in scope_path.rglob("*.py") if path.is_file())
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _scan_python_file(path: Path, source: str) -> list[dict[str, Any]]:
|
|
103
|
+
try:
|
|
104
|
+
tree = ast.parse(source)
|
|
105
|
+
except SyntaxError:
|
|
106
|
+
return []
|
|
107
|
+
|
|
108
|
+
findings: list[dict[str, Any]] = []
|
|
109
|
+
for node in ast.walk(tree):
|
|
110
|
+
if isinstance(node, ast.Call):
|
|
111
|
+
findings.extend(_call_findings(path, node, source))
|
|
112
|
+
return findings
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _call_findings(path: Path, node: ast.Call, source: str) -> list[dict[str, Any]]:
|
|
116
|
+
findings: list[dict[str, Any]] = []
|
|
117
|
+
callee = _call_name(node.func)
|
|
118
|
+
if callee in {"subprocess.run", "subprocess.Popen", "os.system"}:
|
|
119
|
+
if any(keyword.arg == "shell" and isinstance(keyword.value, ast.Constant) and keyword.value.value is True for keyword in node.keywords):
|
|
120
|
+
findings.append(
|
|
121
|
+
_finding(
|
|
122
|
+
rule_id="B602",
|
|
123
|
+
source_name="bandit-lite",
|
|
124
|
+
category="python_ast",
|
|
125
|
+
severity="high",
|
|
126
|
+
path=path,
|
|
127
|
+
line=getattr(node, "lineno", 1),
|
|
128
|
+
message="subprocess call uses shell=True",
|
|
129
|
+
recommendation="Avoid shell=True in subprocess calls.",
|
|
130
|
+
snippet=_source_line(source, getattr(node, "lineno", 1)),
|
|
131
|
+
)
|
|
132
|
+
)
|
|
133
|
+
if callee == "eval":
|
|
134
|
+
findings.append(
|
|
135
|
+
_finding(
|
|
136
|
+
rule_id="B307",
|
|
137
|
+
source_name="bandit-lite",
|
|
138
|
+
category="python_ast",
|
|
139
|
+
severity="high",
|
|
140
|
+
path=path,
|
|
141
|
+
line=getattr(node, "lineno", 1),
|
|
142
|
+
message="eval() detected",
|
|
143
|
+
recommendation="Replace eval with explicit parsing.",
|
|
144
|
+
snippet=_source_line(source, getattr(node, "lineno", 1)),
|
|
145
|
+
)
|
|
146
|
+
)
|
|
147
|
+
if callee == "exec":
|
|
148
|
+
findings.append(
|
|
149
|
+
_finding(
|
|
150
|
+
rule_id="B102",
|
|
151
|
+
source_name="bandit-lite",
|
|
152
|
+
category="python_ast",
|
|
153
|
+
severity="high",
|
|
154
|
+
path=path,
|
|
155
|
+
line=getattr(node, "lineno", 1),
|
|
156
|
+
message="exec() detected",
|
|
157
|
+
recommendation="Replace exec with explicit control flow.",
|
|
158
|
+
snippet=_source_line(source, getattr(node, "lineno", 1)),
|
|
159
|
+
)
|
|
160
|
+
)
|
|
161
|
+
if callee in {"pickle.load", "pickle.loads"}:
|
|
162
|
+
findings.append(
|
|
163
|
+
_finding(
|
|
164
|
+
rule_id="B301",
|
|
165
|
+
source_name="bandit-lite",
|
|
166
|
+
category="python_ast",
|
|
167
|
+
severity="high",
|
|
168
|
+
path=path,
|
|
169
|
+
line=getattr(node, "lineno", 1),
|
|
170
|
+
message="pickle deserialization detected",
|
|
171
|
+
recommendation="Avoid unsafe deserialization of pickle payloads.",
|
|
172
|
+
snippet=_source_line(source, getattr(node, "lineno", 1)),
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
return findings
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def _call_name(func: ast.AST) -> str:
|
|
179
|
+
if isinstance(func, ast.Name):
|
|
180
|
+
return func.id
|
|
181
|
+
if isinstance(func, ast.Attribute):
|
|
182
|
+
prefix = _call_name(func.value)
|
|
183
|
+
return f"{prefix}.{func.attr}" if prefix else func.attr
|
|
184
|
+
return ""
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def _source_line(source: str, line: int) -> str:
|
|
188
|
+
lines = source.splitlines()
|
|
189
|
+
if 1 <= line <= len(lines):
|
|
190
|
+
return lines[line - 1].strip()
|
|
191
|
+
return ""
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _run_bandit_if_available(scope_path: Path) -> list[dict[str, Any]]:
|
|
195
|
+
if not _command_exists("bandit"):
|
|
196
|
+
return []
|
|
197
|
+
|
|
198
|
+
cmd = ["bandit", "-r", str(scope_path), "-f", "json"]
|
|
199
|
+
proc = subprocess.run(cmd, capture_output=True, text=True, check=False, timeout=30)
|
|
200
|
+
if proc.returncode not in {0, 1}:
|
|
201
|
+
return []
|
|
202
|
+
try:
|
|
203
|
+
import json
|
|
204
|
+
|
|
205
|
+
payload = json.loads(proc.stdout or "{}")
|
|
206
|
+
except Exception:
|
|
207
|
+
return []
|
|
208
|
+
|
|
209
|
+
findings: list[dict[str, Any]] = []
|
|
210
|
+
for item in payload.get("results", []):
|
|
211
|
+
issue_severity = str(item.get("issue_severity", "LOW")).lower()
|
|
212
|
+
findings.append(
|
|
213
|
+
{
|
|
214
|
+
"id": str(item.get("test_id", "bandit")),
|
|
215
|
+
"source": "bandit",
|
|
216
|
+
"category": "python_ast",
|
|
217
|
+
"severity": "medium" if issue_severity == "medium" else ("critical" if issue_severity == "critical" else issue_severity),
|
|
218
|
+
"exploitability": "unknown",
|
|
219
|
+
"reachability": "unknown",
|
|
220
|
+
"evidence": {
|
|
221
|
+
"path": str(item.get("filename", "")),
|
|
222
|
+
"line": int(item.get("line_number", 1)),
|
|
223
|
+
"snippet": str(item.get("code", "")).strip(),
|
|
224
|
+
},
|
|
225
|
+
"recommendation": str(item.get("more_info", "")) or "Review Bandit finding and remediate.",
|
|
226
|
+
"message": str(item.get("issue_text", "Bandit finding")),
|
|
227
|
+
}
|
|
228
|
+
)
|
|
229
|
+
return findings
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def _command_exists(command: str) -> bool:
|
|
233
|
+
from shutil import which
|
|
234
|
+
|
|
235
|
+
return which(command) is not None
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def _scan_dependency_health(scope_path: Path, include_live_enrichment: bool) -> list[dict[str, Any]]:
|
|
239
|
+
manifests = detect_manifests(str(scope_path))
|
|
240
|
+
dependencies = [
|
|
241
|
+
{
|
|
242
|
+
"name": package.name,
|
|
243
|
+
"version": _normalize_version(package.version),
|
|
244
|
+
"ecosystem": _ecosystem_from_manifest(package.source_manifest),
|
|
245
|
+
}
|
|
246
|
+
for package in manifests.packages
|
|
247
|
+
if package.name
|
|
248
|
+
]
|
|
249
|
+
if not dependencies or not include_live_enrichment:
|
|
250
|
+
return []
|
|
251
|
+
|
|
252
|
+
osv_result = scan_for_cves(dependencies, str(scope_path))
|
|
253
|
+
raw_results = osv_result.get("results", {})
|
|
254
|
+
findings: list[dict[str, Any]] = []
|
|
255
|
+
for dependency in dependencies:
|
|
256
|
+
package_name = dependency["name"]
|
|
257
|
+
for vuln in raw_results.get(package_name, []):
|
|
258
|
+
reachability = analyze_reachability(
|
|
259
|
+
{
|
|
260
|
+
"package": package_name,
|
|
261
|
+
"id": vuln.get("id", ""),
|
|
262
|
+
"summary": vuln.get("summary", ""),
|
|
263
|
+
"fixed_version": vuln.get("fixed_version", ""),
|
|
264
|
+
},
|
|
265
|
+
str(scope_path),
|
|
266
|
+
)
|
|
267
|
+
findings.append(
|
|
268
|
+
{
|
|
269
|
+
"id": str(vuln.get("id", "")),
|
|
270
|
+
"source": "osv",
|
|
271
|
+
"category": "dependency",
|
|
272
|
+
"severity": _normalize_severity(str(vuln.get("severity", "unknown"))),
|
|
273
|
+
"exploitability": "unknown",
|
|
274
|
+
"reachability": str(reachability.get("reachability", "unknown")).lower(),
|
|
275
|
+
"evidence": {
|
|
276
|
+
"package": package_name,
|
|
277
|
+
"version": dependency["version"],
|
|
278
|
+
"fixed_version": str(vuln.get("fixed_version", "")),
|
|
279
|
+
"summary": str(vuln.get("summary", "")),
|
|
280
|
+
},
|
|
281
|
+
"recommendation": reachability.get("recommendation", "Upgrade the dependency to a fixed version."),
|
|
282
|
+
"message": str(vuln.get("summary", "")) or f"Known vulnerability in {package_name}",
|
|
283
|
+
}
|
|
284
|
+
)
|
|
285
|
+
return findings
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
def _normalize_version(version: str) -> str:
|
|
289
|
+
normalized = (version or "").strip()
|
|
290
|
+
for prefix in ("==", ">=", "<=", "~=", "^", ">"):
|
|
291
|
+
if normalized.startswith(prefix):
|
|
292
|
+
return normalized[len(prefix):].strip()
|
|
293
|
+
return normalized
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def _ecosystem_from_manifest(manifest_path: str) -> str:
|
|
297
|
+
suffix = Path(manifest_path).name
|
|
298
|
+
return {
|
|
299
|
+
"package.json": "npm",
|
|
300
|
+
"requirements.txt": "PyPI",
|
|
301
|
+
"pyproject.toml": "PyPI",
|
|
302
|
+
"Cargo.toml": "crates.io",
|
|
303
|
+
"go.mod": "Go",
|
|
304
|
+
"Gemfile": "RubyGems",
|
|
305
|
+
}.get(suffix, "npm")
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
def _normalize_severity(raw: str) -> str:
|
|
309
|
+
lowered = raw.lower()
|
|
310
|
+
if "critical" in lowered:
|
|
311
|
+
return "critical"
|
|
312
|
+
if "high" in lowered:
|
|
313
|
+
return "high"
|
|
314
|
+
if "medium" in lowered or "moderate" in lowered:
|
|
315
|
+
return "medium"
|
|
316
|
+
if "low" in lowered:
|
|
317
|
+
return "low"
|
|
318
|
+
return "medium"
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def _finding(
|
|
322
|
+
*,
|
|
323
|
+
rule_id: str,
|
|
324
|
+
source_name: str,
|
|
325
|
+
category: str,
|
|
326
|
+
severity: str,
|
|
327
|
+
path: Path,
|
|
328
|
+
line: int,
|
|
329
|
+
message: str,
|
|
330
|
+
recommendation: str,
|
|
331
|
+
snippet: str,
|
|
332
|
+
) -> dict[str, Any]:
|
|
333
|
+
return {
|
|
334
|
+
"id": rule_id,
|
|
335
|
+
"source": source_name,
|
|
336
|
+
"category": category,
|
|
337
|
+
"severity": severity,
|
|
338
|
+
"exploitability": "unknown",
|
|
339
|
+
"reachability": "reachable",
|
|
340
|
+
"evidence": {
|
|
341
|
+
"path": str(path),
|
|
342
|
+
"line": line,
|
|
343
|
+
"snippet": snippet,
|
|
344
|
+
},
|
|
345
|
+
"recommendation": recommendation,
|
|
346
|
+
"message": message,
|
|
347
|
+
}
|
|
@@ -8,8 +8,10 @@ Feature flag: OMG_PARALLEL_SUBAGENTS_ENABLED (default: False)
|
|
|
8
8
|
from __future__ import annotations
|
|
9
9
|
|
|
10
10
|
import os
|
|
11
|
+
import json
|
|
12
|
+
import shlex
|
|
13
|
+
import subprocess
|
|
11
14
|
import sys
|
|
12
|
-
import time
|
|
13
15
|
import uuid
|
|
14
16
|
import threading
|
|
15
17
|
from concurrent.futures import ThreadPoolExecutor
|
|
@@ -100,7 +102,6 @@ def _load_job_from_disk(job_id: str) -> dict[str, Any] | None:
|
|
|
100
102
|
if not os.path.exists(path):
|
|
101
103
|
return None
|
|
102
104
|
try:
|
|
103
|
-
import json
|
|
104
105
|
with open(path, "r", encoding="utf-8") as f:
|
|
105
106
|
return json.load(f)
|
|
106
107
|
except (OSError, ValueError):
|
|
@@ -175,6 +176,97 @@ def _check_git_available() -> bool:
|
|
|
175
176
|
return shutil.which("git") is not None
|
|
176
177
|
|
|
177
178
|
|
|
179
|
+
def _write_job_evidence(job_id: str, payload: dict[str, Any], *, project_dir: str) -> str:
|
|
180
|
+
evidence_dir = os.path.join(project_dir, ".omg", "evidence", "subagents")
|
|
181
|
+
os.makedirs(evidence_dir, exist_ok=True)
|
|
182
|
+
out_path = os.path.join(evidence_dir, f"{job_id}.json")
|
|
183
|
+
with open(out_path, "w", encoding="utf-8") as f:
|
|
184
|
+
json.dump(payload, f, indent=2, ensure_ascii=True)
|
|
185
|
+
return out_path
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def _run_configured_worker(command_text: str, prompt: str, *, project_dir: str, worker: str) -> dict[str, Any]:
|
|
189
|
+
command_text = command_text.strip()
|
|
190
|
+
if not command_text:
|
|
191
|
+
return {"status": "error", "worker": worker, "message": "worker command not configured"}
|
|
192
|
+
|
|
193
|
+
if "{prompt}" in command_text or "{project_dir}" in command_text:
|
|
194
|
+
try:
|
|
195
|
+
cmd = [
|
|
196
|
+
token.format(prompt=prompt, project_dir=project_dir)
|
|
197
|
+
for token in shlex.split(command_text)
|
|
198
|
+
]
|
|
199
|
+
except (ValueError, KeyError) as exc:
|
|
200
|
+
return {"status": "error", "worker": worker, "message": f"invalid worker command template: {exc}"}
|
|
201
|
+
else:
|
|
202
|
+
cmd = shlex.split(command_text) + [prompt]
|
|
203
|
+
try:
|
|
204
|
+
result = subprocess.run(
|
|
205
|
+
cmd,
|
|
206
|
+
cwd=project_dir,
|
|
207
|
+
capture_output=True,
|
|
208
|
+
text=True,
|
|
209
|
+
check=False,
|
|
210
|
+
timeout=120,
|
|
211
|
+
)
|
|
212
|
+
return {
|
|
213
|
+
"status": "ok" if result.returncode == 0 else "error",
|
|
214
|
+
"worker": worker,
|
|
215
|
+
"output": result.stdout,
|
|
216
|
+
"stderr": result.stderr,
|
|
217
|
+
"exit_code": result.returncode,
|
|
218
|
+
}
|
|
219
|
+
except subprocess.TimeoutExpired:
|
|
220
|
+
return {"status": "error", "worker": worker, "message": f"{worker} worker timed out"}
|
|
221
|
+
except OSError as exc:
|
|
222
|
+
return {"status": "error", "worker": worker, "message": str(exc)}
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def _dispatch_job_task(record: dict[str, Any], *, project_dir: str) -> dict[str, Any]:
|
|
226
|
+
runner_mode = os.environ.get("OMG_SUBAGENT_RUNNER", "").strip().lower()
|
|
227
|
+
if runner_mode == "stub":
|
|
228
|
+
return {
|
|
229
|
+
"status": "ok",
|
|
230
|
+
"worker": os.environ.get("OMG_SUBAGENT_STUB_WORKER", "stub"),
|
|
231
|
+
"output": os.environ.get("OMG_SUBAGENT_STUB_OUTPUT", record["task_text"]),
|
|
232
|
+
"exit_code": 0,
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
if runner_mode == "claude":
|
|
236
|
+
return _run_configured_worker(
|
|
237
|
+
os.environ.get("OMG_CLAUDE_WORKER_CMD", ""),
|
|
238
|
+
record["task_text"],
|
|
239
|
+
project_dir=project_dir,
|
|
240
|
+
worker="claude",
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
from runtime.team_router import dispatch_to_model
|
|
244
|
+
|
|
245
|
+
dispatched = dispatch_to_model(str(record["agent_name"]), str(record["task_text"]), project_dir)
|
|
246
|
+
if "output" in dispatched or "exit_code" in dispatched:
|
|
247
|
+
worker = str(dispatched.get("model", "unknown")).replace("-cli", "")
|
|
248
|
+
return {
|
|
249
|
+
"status": "ok" if int(dispatched.get("exit_code", 0) or 0) == 0 else "error",
|
|
250
|
+
"worker": worker,
|
|
251
|
+
"output": str(dispatched.get("output", "")),
|
|
252
|
+
"exit_code": int(dispatched.get("exit_code", 0) or 0),
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
if dispatched.get("fallback") == "claude":
|
|
256
|
+
return _run_configured_worker(
|
|
257
|
+
os.environ.get("OMG_CLAUDE_WORKER_CMD", ""),
|
|
258
|
+
record["task_text"],
|
|
259
|
+
project_dir=project_dir,
|
|
260
|
+
worker="claude",
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
return {
|
|
264
|
+
"status": "error",
|
|
265
|
+
"worker": str(dispatched.get("fallback", "unknown")),
|
|
266
|
+
"message": str(dispatched.get("error", "worker unavailable")),
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
|
|
178
270
|
def _setup_worktree(job_id: str) -> str | None:
|
|
179
271
|
"""Attempt to create a git worktree for job isolation.
|
|
180
272
|
|
|
@@ -232,8 +324,7 @@ def _cleanup_worktree(worktree_dir: str) -> None:
|
|
|
232
324
|
def _run_job(job_id: str) -> None:
|
|
233
325
|
"""Execute a subagent job in the thread pool.
|
|
234
326
|
|
|
235
|
-
Updates job status
|
|
236
|
-
NOTE: Does NOT actually spawn Claude — simulates execution for now.
|
|
327
|
+
Updates job status, dispatches to a local worker, and persists artifacts/evidence.
|
|
237
328
|
"""
|
|
238
329
|
with _lock:
|
|
239
330
|
record = _jobs.get(job_id)
|
|
@@ -247,6 +338,7 @@ def _run_job(job_id: str) -> None:
|
|
|
247
338
|
_persist_job(job_id, record)
|
|
248
339
|
|
|
249
340
|
worktree_dir: str | None = None
|
|
341
|
+
project_dir = _get_project_dir()
|
|
250
342
|
try:
|
|
251
343
|
# Setup isolation if requested
|
|
252
344
|
if record.get("isolation") == "worktree":
|
|
@@ -256,17 +348,32 @@ def _run_job(job_id: str) -> None:
|
|
|
256
348
|
record["worktree"] = worktree_dir
|
|
257
349
|
_persist_job(job_id, record)
|
|
258
350
|
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
351
|
+
active_project_dir = worktree_dir or project_dir
|
|
352
|
+
dispatch_result = _dispatch_job_task(record, project_dir=active_project_dir)
|
|
353
|
+
if dispatch_result.get("status") != "ok":
|
|
354
|
+
raise RuntimeError(str(dispatch_result.get("message", "worker dispatch failed")))
|
|
263
355
|
|
|
264
356
|
artifact = {
|
|
265
|
-
"type": "result",
|
|
357
|
+
"type": "worker-result",
|
|
266
358
|
"agent": record["agent_name"],
|
|
267
|
-
"
|
|
359
|
+
"worker": dispatch_result.get("worker", "unknown"),
|
|
360
|
+
"exit_code": dispatch_result.get("exit_code", 0),
|
|
361
|
+
"output": dispatch_result.get("output", ""),
|
|
268
362
|
"produced_at": datetime.now(timezone.utc).isoformat(),
|
|
269
363
|
}
|
|
364
|
+
evidence_payload = {
|
|
365
|
+
"schema": "OmgSubagentEvidence",
|
|
366
|
+
"job_id": job_id,
|
|
367
|
+
"agent_name": record["agent_name"],
|
|
368
|
+
"task_text": record["task_text"],
|
|
369
|
+
"worker": dispatch_result.get("worker", "unknown"),
|
|
370
|
+
"exit_code": dispatch_result.get("exit_code", 0),
|
|
371
|
+
"output": dispatch_result.get("output", ""),
|
|
372
|
+
"worktree": worktree_dir,
|
|
373
|
+
"project_dir": active_project_dir,
|
|
374
|
+
}
|
|
375
|
+
evidence_path = _write_job_evidence(job_id, evidence_payload, project_dir=project_dir)
|
|
376
|
+
artifact["evidence_path"] = os.path.relpath(evidence_path, project_dir)
|
|
270
377
|
|
|
271
378
|
with _lock:
|
|
272
379
|
# Check for cancellation mid-execution
|
package/runtime/team_router.py
CHANGED
|
@@ -30,6 +30,8 @@ try:
|
|
|
30
30
|
except ImportError:
|
|
31
31
|
pass
|
|
32
32
|
|
|
33
|
+
from runtime.runtime_profile import resolve_parallel_workers
|
|
34
|
+
|
|
33
35
|
@dataclass
|
|
34
36
|
class TeamDispatchRequest:
|
|
35
37
|
target: str # codex | gemini | ccg | auto
|
|
@@ -659,7 +661,7 @@ def execute_agents_parallel(
|
|
|
659
661
|
if not sorted_tasks:
|
|
660
662
|
return []
|
|
661
663
|
|
|
662
|
-
max_workers = min(len(sorted_tasks), 5)
|
|
664
|
+
max_workers = resolve_parallel_workers(project_dir, requested_workers=min(len(sorted_tasks), 5))
|
|
663
665
|
results_by_index: dict[int, dict[str, Any]] = {}
|
|
664
666
|
|
|
665
667
|
with ThreadPoolExecutor(max_workers=max_workers) as pool:
|