@tt-a1i/mco 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +190 -0
- package/bin/mco.js +19 -0
- package/mco +16 -0
- package/package.json +21 -0
- package/runtime/__init__.py +8 -0
- package/runtime/adapters/__init__.py +7 -0
- package/runtime/adapters/claude.py +60 -0
- package/runtime/adapters/codex.py +84 -0
- package/runtime/adapters/gemini.py +48 -0
- package/runtime/adapters/opencode.py +38 -0
- package/runtime/adapters/parsing.py +305 -0
- package/runtime/adapters/qwen.py +38 -0
- package/runtime/adapters/shim.py +251 -0
- package/runtime/artifacts.py +40 -0
- package/runtime/cli.py +341 -0
- package/runtime/config.py +189 -0
- package/runtime/contracts.py +127 -0
- package/runtime/errors.py +43 -0
- package/runtime/orchestrator.py +241 -0
- package/runtime/retry.py +15 -0
- package/runtime/review_engine.py +806 -0
- package/runtime/schemas/review_findings.schema.json +94 -0
- package/runtime/types.py +71 -0
package/runtime/cli.py
ADDED
|
@@ -0,0 +1,341 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import json
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Dict, List
|
|
8
|
+
|
|
9
|
+
from .config import ReviewConfig, ReviewPolicy, load_review_config
|
|
10
|
+
from .review_engine import ReviewRequest, run_review
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _render_user_readable_report(
|
|
14
|
+
command: str,
|
|
15
|
+
result_mode: str,
|
|
16
|
+
providers: List[str],
|
|
17
|
+
payload: Dict[str, object],
|
|
18
|
+
provider_results: Dict[str, Dict[str, object]],
|
|
19
|
+
) -> str:
|
|
20
|
+
lines: List[str] = []
|
|
21
|
+
title = "Review" if command == "review" else "Run"
|
|
22
|
+
lines.append(f"{title} Result")
|
|
23
|
+
lines.append("")
|
|
24
|
+
lines.append("Execution Summary")
|
|
25
|
+
lines.append(f"- task_id: {payload['task_id']}")
|
|
26
|
+
lines.append(f"- decision: {payload['decision']}")
|
|
27
|
+
lines.append(f"- terminal_state: {payload['terminal_state']}")
|
|
28
|
+
lines.append(f"- providers: {', '.join(providers)}")
|
|
29
|
+
lines.append(
|
|
30
|
+
f"- provider_success/failure: {payload['provider_success_count']}/{payload['provider_failure_count']}"
|
|
31
|
+
)
|
|
32
|
+
lines.append(f"- findings_count: {payload['findings_count']}")
|
|
33
|
+
lines.append(f"- parse_success/failure: {payload['parse_success_count']}/{payload['parse_failure_count']}")
|
|
34
|
+
lines.append(f"- schema_valid_count: {payload['schema_valid_count']}")
|
|
35
|
+
lines.append("")
|
|
36
|
+
lines.append("Provider Details")
|
|
37
|
+
for provider in sorted(provider_results.keys()):
|
|
38
|
+
details = provider_results.get(provider, {})
|
|
39
|
+
success = bool(details.get("success"))
|
|
40
|
+
attempts = details.get("attempts")
|
|
41
|
+
final_error = details.get("final_error")
|
|
42
|
+
parse_reason = details.get("parse_reason")
|
|
43
|
+
findings_count = details.get("findings_count")
|
|
44
|
+
lines.append(
|
|
45
|
+
f"- {provider}: success={success}, attempts={attempts}, final_error={final_error}, parse_reason={parse_reason}, findings={findings_count}"
|
|
46
|
+
)
|
|
47
|
+
excerpt = str(details.get("output_excerpt", "")).strip()
|
|
48
|
+
if excerpt:
|
|
49
|
+
lines.append(f" excerpt: {excerpt}")
|
|
50
|
+
lines.append("")
|
|
51
|
+
if result_mode in ("artifact", "both"):
|
|
52
|
+
lines.append("Artifacts")
|
|
53
|
+
lines.append(f"- artifact_root: {payload['artifact_root']}")
|
|
54
|
+
else:
|
|
55
|
+
lines.append("Artifacts")
|
|
56
|
+
lines.append("- artifact files are skipped in stdout mode")
|
|
57
|
+
return "\n".join(lines)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _parse_providers(raw: str) -> List[str]:
|
|
61
|
+
seen = set()
|
|
62
|
+
providers: List[str] = []
|
|
63
|
+
for item in raw.split(","):
|
|
64
|
+
value = item.strip()
|
|
65
|
+
if not value or value in seen:
|
|
66
|
+
continue
|
|
67
|
+
seen.add(value)
|
|
68
|
+
providers.append(value)
|
|
69
|
+
return providers
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _parse_provider_timeouts(raw: str) -> Dict[str, int]:
|
|
73
|
+
result: Dict[str, int] = {}
|
|
74
|
+
if not raw.strip():
|
|
75
|
+
return result
|
|
76
|
+
for chunk in raw.split(","):
|
|
77
|
+
pair = chunk.strip()
|
|
78
|
+
if not pair or "=" not in pair:
|
|
79
|
+
continue
|
|
80
|
+
provider, timeout_text = pair.split("=", 1)
|
|
81
|
+
provider_name = provider.strip()
|
|
82
|
+
if not provider_name:
|
|
83
|
+
continue
|
|
84
|
+
try:
|
|
85
|
+
timeout = int(timeout_text.strip())
|
|
86
|
+
except Exception:
|
|
87
|
+
continue
|
|
88
|
+
if timeout <= 0:
|
|
89
|
+
continue
|
|
90
|
+
result[provider_name] = timeout
|
|
91
|
+
return result
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _parse_paths(raw: str) -> List[str]:
|
|
95
|
+
paths = [item.strip() for item in raw.split(",") if item.strip()]
|
|
96
|
+
return paths if paths else ["."]
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _parse_provider_permissions_json(raw: str) -> Dict[str, Dict[str, str]]:
|
|
100
|
+
if not raw.strip():
|
|
101
|
+
return {}
|
|
102
|
+
try:
|
|
103
|
+
payload = json.loads(raw)
|
|
104
|
+
except Exception:
|
|
105
|
+
return {}
|
|
106
|
+
if not isinstance(payload, dict):
|
|
107
|
+
return {}
|
|
108
|
+
|
|
109
|
+
result: Dict[str, Dict[str, str]] = {}
|
|
110
|
+
for provider, permissions in payload.items():
|
|
111
|
+
provider_name = str(provider).strip()
|
|
112
|
+
if not provider_name or not isinstance(permissions, dict):
|
|
113
|
+
continue
|
|
114
|
+
normalized: Dict[str, str] = {}
|
|
115
|
+
for key, value in permissions.items():
|
|
116
|
+
key_name = str(key).strip()
|
|
117
|
+
if not key_name:
|
|
118
|
+
continue
|
|
119
|
+
normalized[key_name] = str(value)
|
|
120
|
+
if normalized:
|
|
121
|
+
result[provider_name] = normalized
|
|
122
|
+
return result
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def _merge_provider_permissions(
|
|
126
|
+
base: Dict[str, Dict[str, str]],
|
|
127
|
+
override: Dict[str, Dict[str, str]],
|
|
128
|
+
) -> Dict[str, Dict[str, str]]:
|
|
129
|
+
merged: Dict[str, Dict[str, str]] = {provider: dict(values) for provider, values in base.items()}
|
|
130
|
+
for provider, permissions in override.items():
|
|
131
|
+
current = merged.get(provider, {})
|
|
132
|
+
current.update(permissions)
|
|
133
|
+
merged[provider] = current
|
|
134
|
+
return merged
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def _add_common_execution_args(parser: argparse.ArgumentParser) -> None:
|
|
138
|
+
parser.add_argument("--repo", default=".", help="Repository root path")
|
|
139
|
+
parser.add_argument("--prompt", required=True, help="Task prompt")
|
|
140
|
+
parser.add_argument("--providers", default="", help="Comma-separated providers, e.g. claude,codex")
|
|
141
|
+
parser.add_argument("--config", default="", help="Config file path (.json or .yaml/.yml)")
|
|
142
|
+
parser.add_argument("--artifact-base", default="", help="Artifact base directory override")
|
|
143
|
+
parser.add_argument("--state-file", default="", help="Runtime state file override")
|
|
144
|
+
parser.add_argument("--task-id", default="", help="Optional stable task id")
|
|
145
|
+
parser.add_argument("--idempotency-key", default="", help="Optional stable idempotency key")
|
|
146
|
+
parser.add_argument("--target-paths", default=".", help="Comma-separated task scope paths")
|
|
147
|
+
parser.add_argument("--allow-paths", default="", help="Comma-separated allowed paths (default: .)")
|
|
148
|
+
parser.add_argument(
|
|
149
|
+
"--enforcement-mode",
|
|
150
|
+
choices=("strict", "best_effort"),
|
|
151
|
+
default="",
|
|
152
|
+
help="Permission enforcement mode (default: strict)",
|
|
153
|
+
)
|
|
154
|
+
parser.add_argument(
|
|
155
|
+
"--provider-permissions-json",
|
|
156
|
+
default="",
|
|
157
|
+
help="Provider permission mapping as JSON, e.g. '{\"codex\":{\"sandbox\":\"workspace-write\"}}'",
|
|
158
|
+
)
|
|
159
|
+
parser.add_argument(
|
|
160
|
+
"--strict-contract",
|
|
161
|
+
action="store_true",
|
|
162
|
+
help="Enforce strict findings JSON contract in review mode",
|
|
163
|
+
)
|
|
164
|
+
parser.add_argument(
|
|
165
|
+
"--max-provider-parallelism",
|
|
166
|
+
type=int,
|
|
167
|
+
default=None,
|
|
168
|
+
help="Override provider fan-out concurrency (0 means full parallelism)",
|
|
169
|
+
)
|
|
170
|
+
parser.add_argument(
|
|
171
|
+
"--provider-timeouts",
|
|
172
|
+
default="",
|
|
173
|
+
help="Comma-separated provider stall-timeout overrides, e.g. claude=120,codex=90",
|
|
174
|
+
)
|
|
175
|
+
parser.add_argument(
|
|
176
|
+
"--stall-timeout",
|
|
177
|
+
type=int,
|
|
178
|
+
default=None,
|
|
179
|
+
help="Override default stall timeout seconds (no output progress => cancel)",
|
|
180
|
+
)
|
|
181
|
+
parser.add_argument(
|
|
182
|
+
"--poll-interval",
|
|
183
|
+
type=float,
|
|
184
|
+
default=None,
|
|
185
|
+
help="Override poll interval seconds for provider status checks",
|
|
186
|
+
)
|
|
187
|
+
parser.add_argument(
|
|
188
|
+
"--review-hard-timeout",
|
|
189
|
+
type=int,
|
|
190
|
+
default=None,
|
|
191
|
+
help="Override review-mode hard deadline seconds (0 disables hard deadline)",
|
|
192
|
+
)
|
|
193
|
+
parser.add_argument(
|
|
194
|
+
"--result-mode",
|
|
195
|
+
choices=("artifact", "stdout", "both"),
|
|
196
|
+
default="artifact",
|
|
197
|
+
help="Result delivery mode: artifact files, stdout payload, or both",
|
|
198
|
+
)
|
|
199
|
+
parser.add_argument("--json", action="store_true", help="Print machine-readable result JSON")
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def build_parser() -> argparse.ArgumentParser:
|
|
203
|
+
parser = argparse.ArgumentParser(prog="mco", description="MCO")
|
|
204
|
+
subparsers = parser.add_subparsers(dest="command", required=True)
|
|
205
|
+
|
|
206
|
+
run = subparsers.add_parser("run", help="Run general multi-provider task execution")
|
|
207
|
+
_add_common_execution_args(run)
|
|
208
|
+
|
|
209
|
+
review = subparsers.add_parser("review", help="Run multi-provider review")
|
|
210
|
+
_add_common_execution_args(review)
|
|
211
|
+
return parser
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _resolve_config(args: argparse.Namespace) -> ReviewConfig:
|
|
215
|
+
cfg = load_review_config(args.config or None)
|
|
216
|
+
providers = _parse_providers(args.providers) if args.providers else cfg.providers
|
|
217
|
+
artifact_base = args.artifact_base or cfg.artifact_base
|
|
218
|
+
state_file = args.state_file or cfg.state_file
|
|
219
|
+
provider_timeouts = dict(cfg.policy.provider_timeouts)
|
|
220
|
+
provider_timeouts.update(_parse_provider_timeouts(args.provider_timeouts))
|
|
221
|
+
allow_paths = _parse_paths(args.allow_paths) if args.allow_paths else list(cfg.policy.allow_paths)
|
|
222
|
+
provider_permissions = _merge_provider_permissions(
|
|
223
|
+
cfg.policy.provider_permissions,
|
|
224
|
+
_parse_provider_permissions_json(args.provider_permissions_json),
|
|
225
|
+
)
|
|
226
|
+
max_provider_parallelism = cfg.policy.max_provider_parallelism
|
|
227
|
+
if args.max_provider_parallelism is not None:
|
|
228
|
+
max_provider_parallelism = args.max_provider_parallelism
|
|
229
|
+
enforcement_mode = args.enforcement_mode or cfg.policy.enforcement_mode
|
|
230
|
+
stall_timeout_seconds = cfg.policy.stall_timeout_seconds
|
|
231
|
+
if args.stall_timeout is not None and args.stall_timeout > 0:
|
|
232
|
+
stall_timeout_seconds = args.stall_timeout
|
|
233
|
+
poll_interval_seconds = cfg.policy.poll_interval_seconds
|
|
234
|
+
if args.poll_interval is not None and args.poll_interval > 0:
|
|
235
|
+
poll_interval_seconds = args.poll_interval
|
|
236
|
+
review_hard_timeout_seconds = cfg.policy.review_hard_timeout_seconds
|
|
237
|
+
if args.review_hard_timeout is not None and args.review_hard_timeout >= 0:
|
|
238
|
+
review_hard_timeout_seconds = args.review_hard_timeout
|
|
239
|
+
enforce_findings_contract = cfg.policy.enforce_findings_contract or bool(args.strict_contract)
|
|
240
|
+
|
|
241
|
+
policy = ReviewPolicy(
|
|
242
|
+
timeout_seconds=cfg.policy.timeout_seconds,
|
|
243
|
+
stall_timeout_seconds=stall_timeout_seconds,
|
|
244
|
+
poll_interval_seconds=poll_interval_seconds,
|
|
245
|
+
review_hard_timeout_seconds=review_hard_timeout_seconds,
|
|
246
|
+
enforce_findings_contract=enforce_findings_contract,
|
|
247
|
+
max_retries=cfg.policy.max_retries,
|
|
248
|
+
high_escalation_threshold=cfg.policy.high_escalation_threshold,
|
|
249
|
+
require_non_empty_findings=cfg.policy.require_non_empty_findings,
|
|
250
|
+
max_provider_parallelism=max_provider_parallelism,
|
|
251
|
+
provider_timeouts=provider_timeouts,
|
|
252
|
+
allow_paths=allow_paths,
|
|
253
|
+
provider_permissions=provider_permissions,
|
|
254
|
+
enforcement_mode=enforcement_mode,
|
|
255
|
+
)
|
|
256
|
+
return ReviewConfig(providers=providers, artifact_base=artifact_base, state_file=state_file, policy=policy)
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def main(argv: List[str] | None = None) -> int:
|
|
260
|
+
parser = build_parser()
|
|
261
|
+
args = parser.parse_args(argv)
|
|
262
|
+
if args.command not in ("run", "review"):
|
|
263
|
+
parser.error("unsupported command")
|
|
264
|
+
return 2
|
|
265
|
+
|
|
266
|
+
cfg = _resolve_config(args)
|
|
267
|
+
repo_root = str(Path(args.repo).resolve())
|
|
268
|
+
providers = [item for item in cfg.providers if item in ("claude", "codex", "gemini", "opencode", "qwen")]
|
|
269
|
+
if not providers:
|
|
270
|
+
print("No valid providers selected.", file=sys.stderr)
|
|
271
|
+
return 2
|
|
272
|
+
|
|
273
|
+
req = ReviewRequest(
|
|
274
|
+
repo_root=repo_root,
|
|
275
|
+
prompt=args.prompt,
|
|
276
|
+
providers=providers, # type: ignore[arg-type]
|
|
277
|
+
artifact_base=str(Path(cfg.artifact_base).resolve()),
|
|
278
|
+
state_file=str(Path(cfg.state_file).resolve()),
|
|
279
|
+
policy=cfg.policy,
|
|
280
|
+
task_id=args.task_id or None,
|
|
281
|
+
idempotency_key=args.idempotency_key or None,
|
|
282
|
+
target_paths=[item.strip() for item in args.target_paths.split(",") if item.strip()],
|
|
283
|
+
)
|
|
284
|
+
review_mode = args.command == "review"
|
|
285
|
+
write_artifacts = args.result_mode in ("artifact", "both")
|
|
286
|
+
result = run_review(req, review_mode=review_mode, write_artifacts=write_artifacts)
|
|
287
|
+
|
|
288
|
+
payload = {
|
|
289
|
+
"command": args.command,
|
|
290
|
+
"task_id": result.task_id,
|
|
291
|
+
"artifact_root": result.artifact_root,
|
|
292
|
+
"decision": result.decision,
|
|
293
|
+
"terminal_state": result.terminal_state,
|
|
294
|
+
"provider_success_count": sum(1 for item in result.provider_results.values() if bool(item.get("success"))),
|
|
295
|
+
"provider_failure_count": sum(1 for item in result.provider_results.values() if not bool(item.get("success"))),
|
|
296
|
+
"findings_count": result.findings_count,
|
|
297
|
+
"parse_success_count": result.parse_success_count,
|
|
298
|
+
"parse_failure_count": result.parse_failure_count,
|
|
299
|
+
"schema_valid_count": result.schema_valid_count,
|
|
300
|
+
"dropped_findings_count": result.dropped_findings_count,
|
|
301
|
+
"created_new_task": result.created_new_task,
|
|
302
|
+
}
|
|
303
|
+
if args.result_mode == "artifact":
|
|
304
|
+
if args.json:
|
|
305
|
+
print(json.dumps(payload, ensure_ascii=True))
|
|
306
|
+
else:
|
|
307
|
+
print(
|
|
308
|
+
_render_user_readable_report(
|
|
309
|
+
args.command,
|
|
310
|
+
args.result_mode,
|
|
311
|
+
providers,
|
|
312
|
+
payload,
|
|
313
|
+
result.provider_results,
|
|
314
|
+
)
|
|
315
|
+
)
|
|
316
|
+
else:
|
|
317
|
+
detailed_payload = dict(payload)
|
|
318
|
+
detailed_payload["result_mode"] = args.result_mode
|
|
319
|
+
detailed_payload["provider_results"] = result.provider_results
|
|
320
|
+
if args.json:
|
|
321
|
+
print(json.dumps(detailed_payload, ensure_ascii=True))
|
|
322
|
+
else:
|
|
323
|
+
print(
|
|
324
|
+
_render_user_readable_report(
|
|
325
|
+
args.command,
|
|
326
|
+
args.result_mode,
|
|
327
|
+
providers,
|
|
328
|
+
payload,
|
|
329
|
+
result.provider_results,
|
|
330
|
+
)
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
if result.decision == "FAIL":
|
|
334
|
+
return 2
|
|
335
|
+
if review_mode and result.decision == "INCONCLUSIVE":
|
|
336
|
+
return 3
|
|
337
|
+
return 0
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
if __name__ == "__main__":
|
|
341
|
+
raise SystemExit(main())
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any, Dict, List, Optional
|
|
7
|
+
|
|
8
|
+
DEFAULT_PROVIDER_TIMEOUTS: Dict[str, int] = {
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass(frozen=True)
|
|
13
|
+
class ReviewPolicy:
|
|
14
|
+
timeout_seconds: int = 180
|
|
15
|
+
stall_timeout_seconds: int = 900
|
|
16
|
+
poll_interval_seconds: float = 1.0
|
|
17
|
+
review_hard_timeout_seconds: int = 1800
|
|
18
|
+
enforce_findings_contract: bool = False
|
|
19
|
+
max_retries: int = 1
|
|
20
|
+
high_escalation_threshold: int = 1
|
|
21
|
+
require_non_empty_findings: bool = True
|
|
22
|
+
max_provider_parallelism: int = 0
|
|
23
|
+
provider_timeouts: Dict[str, int] = field(default_factory=lambda: dict(DEFAULT_PROVIDER_TIMEOUTS))
|
|
24
|
+
allow_paths: List[str] = field(default_factory=lambda: ["."])
|
|
25
|
+
provider_permissions: Dict[str, Dict[str, str]] = field(default_factory=dict)
|
|
26
|
+
enforcement_mode: str = "strict"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass(frozen=True)
|
|
30
|
+
class ReviewConfig:
|
|
31
|
+
providers: List[str] = field(default_factory=lambda: ["claude", "codex"])
|
|
32
|
+
artifact_base: str = "reports/review"
|
|
33
|
+
state_file: str = ".mco/state.json"
|
|
34
|
+
policy: ReviewPolicy = field(default_factory=ReviewPolicy)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _as_bool(value: Any, default: bool) -> bool:
|
|
38
|
+
if isinstance(value, bool):
|
|
39
|
+
return value
|
|
40
|
+
if isinstance(value, str):
|
|
41
|
+
lowered = value.strip().lower()
|
|
42
|
+
if lowered in ("true", "1", "yes", "y", "on"):
|
|
43
|
+
return True
|
|
44
|
+
if lowered in ("false", "0", "no", "n", "off"):
|
|
45
|
+
return False
|
|
46
|
+
return default
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _to_policy(payload: Dict[str, Any]) -> ReviewPolicy:
|
|
50
|
+
raw_provider_timeouts = payload.get("provider_timeouts", {})
|
|
51
|
+
provider_timeouts: Dict[str, int] = dict(DEFAULT_PROVIDER_TIMEOUTS)
|
|
52
|
+
if isinstance(raw_provider_timeouts, dict):
|
|
53
|
+
for key, value in raw_provider_timeouts.items():
|
|
54
|
+
provider = str(key).strip()
|
|
55
|
+
if not provider:
|
|
56
|
+
continue
|
|
57
|
+
try:
|
|
58
|
+
timeout = int(value)
|
|
59
|
+
except Exception:
|
|
60
|
+
continue
|
|
61
|
+
if timeout <= 0:
|
|
62
|
+
continue
|
|
63
|
+
provider_timeouts[provider] = timeout
|
|
64
|
+
|
|
65
|
+
try:
|
|
66
|
+
max_parallel = int(payload.get("max_provider_parallelism", 0))
|
|
67
|
+
except Exception:
|
|
68
|
+
max_parallel = 0
|
|
69
|
+
if max_parallel < 0:
|
|
70
|
+
max_parallel = 0
|
|
71
|
+
|
|
72
|
+
raw_allow_paths = payload.get("allow_paths", ["."])
|
|
73
|
+
allow_paths: List[str]
|
|
74
|
+
if isinstance(raw_allow_paths, str):
|
|
75
|
+
allow_paths = [item.strip() for item in raw_allow_paths.split(",") if item.strip()]
|
|
76
|
+
elif isinstance(raw_allow_paths, list):
|
|
77
|
+
allow_paths = [str(item).strip() for item in raw_allow_paths if str(item).strip()]
|
|
78
|
+
else:
|
|
79
|
+
allow_paths = ["."]
|
|
80
|
+
if not allow_paths:
|
|
81
|
+
allow_paths = ["."]
|
|
82
|
+
|
|
83
|
+
raw_provider_permissions = payload.get("provider_permissions", {})
|
|
84
|
+
provider_permissions: Dict[str, Dict[str, str]] = {}
|
|
85
|
+
if isinstance(raw_provider_permissions, dict):
|
|
86
|
+
for provider, permissions in raw_provider_permissions.items():
|
|
87
|
+
provider_name = str(provider).strip()
|
|
88
|
+
if not provider_name or not isinstance(permissions, dict):
|
|
89
|
+
continue
|
|
90
|
+
normalized: Dict[str, str] = {}
|
|
91
|
+
for key, value in permissions.items():
|
|
92
|
+
key_name = str(key).strip()
|
|
93
|
+
if not key_name:
|
|
94
|
+
continue
|
|
95
|
+
normalized[key_name] = str(value)
|
|
96
|
+
if normalized:
|
|
97
|
+
provider_permissions[provider_name] = normalized
|
|
98
|
+
|
|
99
|
+
enforcement_mode = str(payload.get("enforcement_mode", "strict")).strip().lower()
|
|
100
|
+
if enforcement_mode not in ("strict", "best_effort"):
|
|
101
|
+
enforcement_mode = "strict"
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
stall_timeout_seconds = int(payload.get("stall_timeout_seconds", 900))
|
|
105
|
+
except Exception:
|
|
106
|
+
stall_timeout_seconds = 900
|
|
107
|
+
if stall_timeout_seconds <= 0:
|
|
108
|
+
stall_timeout_seconds = 900
|
|
109
|
+
|
|
110
|
+
try:
|
|
111
|
+
poll_interval_seconds = float(payload.get("poll_interval_seconds", 1.0))
|
|
112
|
+
except Exception:
|
|
113
|
+
poll_interval_seconds = 1.0
|
|
114
|
+
if poll_interval_seconds <= 0:
|
|
115
|
+
poll_interval_seconds = 1.0
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
review_hard_timeout_seconds = int(payload.get("review_hard_timeout_seconds", 1800))
|
|
119
|
+
except Exception:
|
|
120
|
+
review_hard_timeout_seconds = 1800
|
|
121
|
+
if review_hard_timeout_seconds < 0:
|
|
122
|
+
review_hard_timeout_seconds = 1800
|
|
123
|
+
|
|
124
|
+
return ReviewPolicy(
|
|
125
|
+
timeout_seconds=int(payload.get("timeout_seconds", 180)),
|
|
126
|
+
stall_timeout_seconds=stall_timeout_seconds,
|
|
127
|
+
poll_interval_seconds=poll_interval_seconds,
|
|
128
|
+
review_hard_timeout_seconds=review_hard_timeout_seconds,
|
|
129
|
+
enforce_findings_contract=_as_bool(payload.get("enforce_findings_contract", False), False),
|
|
130
|
+
max_retries=int(payload.get("max_retries", 1)),
|
|
131
|
+
high_escalation_threshold=int(payload.get("high_escalation_threshold", 1)),
|
|
132
|
+
require_non_empty_findings=_as_bool(payload.get("require_non_empty_findings", True), True),
|
|
133
|
+
max_provider_parallelism=max_parallel,
|
|
134
|
+
provider_timeouts=provider_timeouts,
|
|
135
|
+
allow_paths=allow_paths,
|
|
136
|
+
provider_permissions=provider_permissions,
|
|
137
|
+
enforcement_mode=enforcement_mode,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _normalize_payload(payload: Dict[str, Any]) -> ReviewConfig:
|
|
142
|
+
policy_payload = payload.get("policy", {})
|
|
143
|
+
if not isinstance(policy_payload, dict):
|
|
144
|
+
policy_payload = {}
|
|
145
|
+
providers = payload.get("providers", ["claude", "codex"])
|
|
146
|
+
if isinstance(providers, str):
|
|
147
|
+
providers = [item.strip() for item in providers.split(",") if item.strip()]
|
|
148
|
+
if not isinstance(providers, list):
|
|
149
|
+
providers = ["claude", "codex"]
|
|
150
|
+
providers = [str(item).strip() for item in providers if str(item).strip()]
|
|
151
|
+
if not providers:
|
|
152
|
+
providers = ["claude", "codex"]
|
|
153
|
+
|
|
154
|
+
return ReviewConfig(
|
|
155
|
+
providers=providers,
|
|
156
|
+
artifact_base=str(payload.get("artifact_base", "reports/review")),
|
|
157
|
+
state_file=str(payload.get("state_file", ".mco/state.json")),
|
|
158
|
+
policy=_to_policy(policy_payload),
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def load_review_config(config_path: Optional[str]) -> ReviewConfig:
|
|
163
|
+
if not config_path:
|
|
164
|
+
return ReviewConfig()
|
|
165
|
+
path = Path(config_path)
|
|
166
|
+
if not path.exists():
|
|
167
|
+
raise FileNotFoundError(f"config file not found: {config_path}")
|
|
168
|
+
|
|
169
|
+
suffix = path.suffix.lower()
|
|
170
|
+
raw_text = path.read_text(encoding="utf-8")
|
|
171
|
+
if suffix == ".json":
|
|
172
|
+
payload = json.loads(raw_text)
|
|
173
|
+
if not isinstance(payload, dict):
|
|
174
|
+
raise ValueError("config root must be an object")
|
|
175
|
+
return _normalize_payload(payload)
|
|
176
|
+
|
|
177
|
+
if suffix in (".yaml", ".yml"):
|
|
178
|
+
try:
|
|
179
|
+
import yaml # type: ignore
|
|
180
|
+
except Exception as exc:
|
|
181
|
+
raise RuntimeError(
|
|
182
|
+
"YAML config requires pyyaml. Install with: pip install pyyaml, or use a .json config."
|
|
183
|
+
) from exc
|
|
184
|
+
payload = yaml.safe_load(raw_text)
|
|
185
|
+
if not isinstance(payload, dict):
|
|
186
|
+
raise ValueError("config root must be a map")
|
|
187
|
+
return _normalize_payload(payload)
|
|
188
|
+
|
|
189
|
+
raise ValueError(f"unsupported config format: {config_path}")
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Any, Dict, List, Literal, Optional, Protocol, Sequence, runtime_checkable
|
|
5
|
+
|
|
6
|
+
from .types import ErrorKind
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
ProviderId = Literal["claude", "codex", "gemini", "opencode", "qwen"]
|
|
10
|
+
CapabilityTier = Literal["C0", "C1", "C2", "C3", "C4", "C5", "C6"]
|
|
11
|
+
TaskAttemptState = Literal["PENDING", "STARTED", "SUCCEEDED", "FAILED", "CANCELLED", "EXPIRED"]
|
|
12
|
+
|
|
13
|
+
PROVIDER_IDS: Sequence[ProviderId] = ("claude", "codex", "gemini", "opencode", "qwen")
|
|
14
|
+
CAPABILITY_TIERS: Sequence[CapabilityTier] = ("C0", "C1", "C2", "C3", "C4", "C5", "C6")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass(frozen=True)
|
|
18
|
+
class CapabilitySet:
|
|
19
|
+
tiers: List[CapabilityTier]
|
|
20
|
+
supports_native_async: bool
|
|
21
|
+
supports_poll_endpoint: bool
|
|
22
|
+
supports_resume_after_restart: bool
|
|
23
|
+
supports_schema_enforcement: bool
|
|
24
|
+
min_supported_version: str
|
|
25
|
+
tested_os: List[Literal["macos", "linux", "windows"]]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass(frozen=True)
|
|
29
|
+
class ProviderPresence:
|
|
30
|
+
provider: ProviderId
|
|
31
|
+
detected: bool
|
|
32
|
+
binary_path: Optional[str]
|
|
33
|
+
version: Optional[str]
|
|
34
|
+
auth_ok: bool
|
|
35
|
+
reason: str = ""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass(frozen=True)
|
|
39
|
+
class TaskInput:
|
|
40
|
+
task_id: str
|
|
41
|
+
prompt: str
|
|
42
|
+
repo_root: str
|
|
43
|
+
target_paths: List[str]
|
|
44
|
+
required_capabilities: List[CapabilityTier] = field(default_factory=lambda: ["C1", "C2"])
|
|
45
|
+
optional_capabilities: List[CapabilityTier] = field(default_factory=list)
|
|
46
|
+
timeout_seconds: int = 600
|
|
47
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclass(frozen=True)
|
|
51
|
+
class TaskRunRef:
|
|
52
|
+
task_id: str
|
|
53
|
+
provider: ProviderId
|
|
54
|
+
run_id: str
|
|
55
|
+
artifact_path: str
|
|
56
|
+
started_at: str
|
|
57
|
+
pid: Optional[int] = None
|
|
58
|
+
session_id: Optional[str] = None
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@dataclass(frozen=True)
|
|
62
|
+
class TaskStatus:
|
|
63
|
+
task_id: str
|
|
64
|
+
provider: ProviderId
|
|
65
|
+
run_id: str
|
|
66
|
+
attempt_state: TaskAttemptState
|
|
67
|
+
completed: bool
|
|
68
|
+
heartbeat_at: Optional[str]
|
|
69
|
+
output_path: Optional[str]
|
|
70
|
+
error_kind: Optional[ErrorKind] = None
|
|
71
|
+
exit_code: Optional[int] = None
|
|
72
|
+
message: str = ""
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@dataclass(frozen=True)
|
|
76
|
+
class NormalizeContext:
|
|
77
|
+
task_id: str
|
|
78
|
+
provider: ProviderId
|
|
79
|
+
repo_root: str
|
|
80
|
+
raw_ref: str
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
@dataclass(frozen=True)
|
|
84
|
+
class Evidence:
|
|
85
|
+
file: str
|
|
86
|
+
line: Optional[int]
|
|
87
|
+
snippet: str
|
|
88
|
+
symbol: Optional[str] = None
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@dataclass(frozen=True)
|
|
92
|
+
class NormalizedFinding:
|
|
93
|
+
task_id: str
|
|
94
|
+
provider: ProviderId
|
|
95
|
+
finding_id: str
|
|
96
|
+
severity: Literal["critical", "high", "medium", "low"]
|
|
97
|
+
category: Literal["bug", "security", "performance", "maintainability", "test-gap"]
|
|
98
|
+
title: str
|
|
99
|
+
evidence: Evidence
|
|
100
|
+
recommendation: str
|
|
101
|
+
confidence: float
|
|
102
|
+
fingerprint: str
|
|
103
|
+
raw_ref: str
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
@runtime_checkable
|
|
107
|
+
class ProviderAdapter(Protocol):
|
|
108
|
+
id: ProviderId
|
|
109
|
+
|
|
110
|
+
def detect(self) -> ProviderPresence:
|
|
111
|
+
...
|
|
112
|
+
|
|
113
|
+
def capabilities(self) -> CapabilitySet:
|
|
114
|
+
...
|
|
115
|
+
|
|
116
|
+
def run(self, input_task: TaskInput) -> TaskRunRef:
|
|
117
|
+
...
|
|
118
|
+
|
|
119
|
+
def poll(self, ref: TaskRunRef) -> TaskStatus:
|
|
120
|
+
...
|
|
121
|
+
|
|
122
|
+
def cancel(self, ref: TaskRunRef) -> None:
|
|
123
|
+
...
|
|
124
|
+
|
|
125
|
+
def normalize(self, raw: Any, ctx: NormalizeContext) -> List[NormalizedFinding]:
|
|
126
|
+
...
|
|
127
|
+
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from typing import List
|
|
5
|
+
|
|
6
|
+
from .types import ErrorKind, WarningKind
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def detect_warnings(stderr: str) -> List[WarningKind]:
|
|
10
|
+
text = stderr.lower()
|
|
11
|
+
warnings: List[WarningKind] = []
|
|
12
|
+
if "mcp" in text and ("failed to start" in text or "auth required" in text):
|
|
13
|
+
warnings.append(WarningKind.PROVIDER_WARNING_MCP_STARTUP)
|
|
14
|
+
return warnings
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def classify_error(exit_code: int, stderr: str) -> ErrorKind:
|
|
18
|
+
text = stderr.lower()
|
|
19
|
+
|
|
20
|
+
if exit_code in (124, 142) or "timeout" in text or "timed out" in text:
|
|
21
|
+
return ErrorKind.RETRYABLE_TIMEOUT
|
|
22
|
+
|
|
23
|
+
if "rate limit" in text or "429" in text:
|
|
24
|
+
return ErrorKind.RETRYABLE_RATE_LIMIT
|
|
25
|
+
|
|
26
|
+
if any(token in text for token in ("connection reset", "temporary failure", "network", "econnreset", "ehostunreach")):
|
|
27
|
+
return ErrorKind.RETRYABLE_TRANSIENT_NETWORK
|
|
28
|
+
|
|
29
|
+
if any(token in text for token in ("auth", "invalid api key", "401", "oauth", "unauthorized")):
|
|
30
|
+
return ErrorKind.NON_RETRYABLE_AUTH
|
|
31
|
+
|
|
32
|
+
if any(token in text for token in ("unsupported capability", "not supported", "unknown arguments")):
|
|
33
|
+
return ErrorKind.NON_RETRYABLE_UNSUPPORTED_CAPABILITY
|
|
34
|
+
|
|
35
|
+
if any(token in text for token in ("invalid input", "schema", "missing required", "validation failed", "invalid type")):
|
|
36
|
+
return ErrorKind.NON_RETRYABLE_INVALID_INPUT
|
|
37
|
+
|
|
38
|
+
# A parsing failure after command success is also represented as normalization error.
|
|
39
|
+
if re.search(r"(parse|deserialize|json).*fail", text) or "normalization" in text:
|
|
40
|
+
return ErrorKind.NORMALIZATION_ERROR
|
|
41
|
+
|
|
42
|
+
return ErrorKind.NORMALIZATION_ERROR
|
|
43
|
+
|