@jahanxu/code-flow 0.1.2 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/adapters/claude/CLAUDE.md +11 -1
- package/src/adapters/claude/commands/cf-init.md +269 -0
- package/src/adapters/claude/commands/cf-inject.md +41 -0
- package/src/adapters/claude/commands/cf-learn.md +120 -0
- package/src/adapters/claude/commands/cf-scan.md +56 -0
- package/src/adapters/claude/commands/cf-stats.md +65 -0
- package/src/adapters/claude/commands/cf-validate.md +71 -0
- package/src/cli.js +96 -14
- package/src/adapters/claude/skills/cf-init.md +0 -13
- package/src/adapters/claude/skills/cf-inject.md +0 -12
- package/src/adapters/claude/skills/cf-learn.md +0 -11
- package/src/adapters/claude/skills/cf-scan.md +0 -12
- package/src/adapters/claude/skills/cf-stats.md +0 -11
- package/src/adapters/claude/skills/cf-validate.md +0 -12
- package/src/core/code-flow/scripts/cf_init.py +0 -829
- package/src/core/code-flow/scripts/cf_inject.py +0 -150
- package/src/core/code-flow/scripts/cf_learn.py +0 -202
- package/src/core/code-flow/scripts/cf_validate.py +0 -340
|
@@ -1,150 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
import json
|
|
3
|
-
import os
|
|
4
|
-
import sys
|
|
5
|
-
import fnmatch
|
|
6
|
-
|
|
7
|
-
from cf_core import (
|
|
8
|
-
assemble_context,
|
|
9
|
-
load_config,
|
|
10
|
-
load_inject_state,
|
|
11
|
-
match_domains,
|
|
12
|
-
read_specs,
|
|
13
|
-
save_inject_state,
|
|
14
|
-
select_specs,
|
|
15
|
-
)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def match_details(rel_path: str, mapping: dict) -> dict:
|
|
19
|
-
details = {}
|
|
20
|
-
for domain, cfg in (mapping or {}).items():
|
|
21
|
-
patterns = cfg.get("patterns") or []
|
|
22
|
-
for pattern in patterns:
|
|
23
|
-
if fnmatch.fnmatch(rel_path, pattern):
|
|
24
|
-
details[domain] = pattern
|
|
25
|
-
break
|
|
26
|
-
return details
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def main() -> None:
|
|
30
|
-
project_root = os.getcwd()
|
|
31
|
-
config = load_config(project_root)
|
|
32
|
-
if not config:
|
|
33
|
-
print(json.dumps({"error": "config_missing"}, ensure_ascii=False))
|
|
34
|
-
return
|
|
35
|
-
|
|
36
|
-
mapping = config.get("path_mapping") or {}
|
|
37
|
-
available_domains = sorted(mapping.keys())
|
|
38
|
-
args = sys.argv[1:]
|
|
39
|
-
list_specs = "--list-specs" in args
|
|
40
|
-
list_domain = ""
|
|
41
|
-
if list_specs:
|
|
42
|
-
for arg in args:
|
|
43
|
-
if arg.startswith("--domain="):
|
|
44
|
-
list_domain = arg.split("=", 1)[1]
|
|
45
|
-
if list_domain and list_domain in mapping:
|
|
46
|
-
specs = (mapping.get(list_domain) or {}).get("specs") or []
|
|
47
|
-
print(json.dumps({"domain": list_domain, "specs": specs}, ensure_ascii=False))
|
|
48
|
-
return
|
|
49
|
-
print(json.dumps({"error": "domain_not_found", "available_domains": available_domains}, ensure_ascii=False))
|
|
50
|
-
return
|
|
51
|
-
if not args:
|
|
52
|
-
state_path = os.path.join(project_root, ".code-flow", ".inject-state")
|
|
53
|
-
recent_target = ""
|
|
54
|
-
try:
|
|
55
|
-
with open(state_path, "r", encoding="utf-8") as file:
|
|
56
|
-
data = json.load(file)
|
|
57
|
-
recent_target = data.get("last_file", "")
|
|
58
|
-
except Exception:
|
|
59
|
-
recent_target = ""
|
|
60
|
-
|
|
61
|
-
if not recent_target:
|
|
62
|
-
print(
|
|
63
|
-
json.dumps(
|
|
64
|
-
{
|
|
65
|
-
"error": "missing_target",
|
|
66
|
-
"available_domains": available_domains,
|
|
67
|
-
"usage": "cf-inject <domain|file_path>",
|
|
68
|
-
},
|
|
69
|
-
ensure_ascii=False,
|
|
70
|
-
)
|
|
71
|
-
)
|
|
72
|
-
return
|
|
73
|
-
target = recent_target
|
|
74
|
-
else:
|
|
75
|
-
target = args[0]
|
|
76
|
-
match_info = {}
|
|
77
|
-
if target in mapping:
|
|
78
|
-
domains = [target]
|
|
79
|
-
else:
|
|
80
|
-
abs_path = target
|
|
81
|
-
if not os.path.isabs(abs_path):
|
|
82
|
-
abs_path = os.path.join(project_root, target)
|
|
83
|
-
rel_path = os.path.relpath(abs_path, project_root)
|
|
84
|
-
domains = match_domains(rel_path, mapping)
|
|
85
|
-
match_info = match_details(rel_path, mapping)
|
|
86
|
-
|
|
87
|
-
if not domains:
|
|
88
|
-
print(
|
|
89
|
-
json.dumps(
|
|
90
|
-
{
|
|
91
|
-
"error": "domain_not_found",
|
|
92
|
-
"target": target,
|
|
93
|
-
"available_domains": available_domains,
|
|
94
|
-
},
|
|
95
|
-
ensure_ascii=False,
|
|
96
|
-
)
|
|
97
|
-
)
|
|
98
|
-
return
|
|
99
|
-
|
|
100
|
-
specs = []
|
|
101
|
-
priorities = {}
|
|
102
|
-
for domain in domains:
|
|
103
|
-
domain_cfg = mapping.get(domain) or {}
|
|
104
|
-
priorities.update(domain_cfg.get("spec_priority") or {})
|
|
105
|
-
specs.extend(read_specs(project_root, domain, domain_cfg))
|
|
106
|
-
|
|
107
|
-
if not specs:
|
|
108
|
-
print(json.dumps({"error": "specs_empty", "domains": domains}, ensure_ascii=False))
|
|
109
|
-
return
|
|
110
|
-
|
|
111
|
-
budget_cfg = config.get("budget") or {}
|
|
112
|
-
budget = budget_cfg.get("l1_max", 1700)
|
|
113
|
-
try:
|
|
114
|
-
budget = int(budget)
|
|
115
|
-
except Exception:
|
|
116
|
-
budget = 1700
|
|
117
|
-
|
|
118
|
-
selected = select_specs(specs, budget, priorities)
|
|
119
|
-
if not selected:
|
|
120
|
-
print(json.dumps({"error": "budget_exceeded", "budget": budget}, ensure_ascii=False))
|
|
121
|
-
return
|
|
122
|
-
|
|
123
|
-
included_domains = {spec.get("domain") for spec in selected if spec.get("domain")}
|
|
124
|
-
state = load_inject_state(project_root)
|
|
125
|
-
injected_domains = state.get("injected_domains") or []
|
|
126
|
-
if not isinstance(injected_domains, list):
|
|
127
|
-
injected_domains = []
|
|
128
|
-
injected_domains = {d for d in injected_domains if isinstance(d, str)}
|
|
129
|
-
state_payload = {"injected_domains": sorted(injected_domains | included_domains)}
|
|
130
|
-
state_payload["last_file"] = target
|
|
131
|
-
save_inject_state(project_root, state_payload)
|
|
132
|
-
|
|
133
|
-
total_tokens = sum(spec.get("tokens", 0) for spec in selected)
|
|
134
|
-
output = {
|
|
135
|
-
"domains": sorted(included_domains),
|
|
136
|
-
"selected_specs": [
|
|
137
|
-
{"path": spec["path"], "tokens": spec["tokens"]} for spec in selected
|
|
138
|
-
],
|
|
139
|
-
"total_tokens": total_tokens,
|
|
140
|
-
"budget": budget,
|
|
141
|
-
"context": assemble_context(selected, "## Active Specs"),
|
|
142
|
-
}
|
|
143
|
-
if os.environ.get("CF_DEBUG") == "1":
|
|
144
|
-
output["match_info"] = match_info
|
|
145
|
-
output["target"] = target
|
|
146
|
-
print(json.dumps(output, ensure_ascii=False))
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
if __name__ == "__main__":
|
|
150
|
-
main()
|
|
@@ -1,202 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
import argparse
|
|
3
|
-
import datetime
|
|
4
|
-
import json
|
|
5
|
-
import os
|
|
6
|
-
import sys
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def load_config(project_root: str) -> dict:
|
|
10
|
-
config_path = os.path.join(project_root, ".code-flow", "config.yml")
|
|
11
|
-
if not os.path.exists(config_path):
|
|
12
|
-
return {}
|
|
13
|
-
try:
|
|
14
|
-
import yaml
|
|
15
|
-
except Exception:
|
|
16
|
-
return {}
|
|
17
|
-
try:
|
|
18
|
-
with open(config_path, "r", encoding="utf-8") as file:
|
|
19
|
-
data = yaml.safe_load(file)
|
|
20
|
-
return data or {}
|
|
21
|
-
except Exception:
|
|
22
|
-
return {}
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
def read_text(path: str) -> str:
|
|
26
|
-
try:
|
|
27
|
-
with open(path, "r", encoding="utf-8") as file:
|
|
28
|
-
return file.read()
|
|
29
|
-
except Exception:
|
|
30
|
-
return ""
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
def write_text(path: str, content: str) -> bool:
|
|
34
|
-
try:
|
|
35
|
-
with open(path, "w", encoding="utf-8") as file:
|
|
36
|
-
file.write(content)
|
|
37
|
-
return True
|
|
38
|
-
except Exception:
|
|
39
|
-
return False
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
def estimate_tokens(text: str) -> int:
|
|
43
|
-
return len(text) // 4
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
def find_target_spec(domain: str, config: dict, file_arg: str) -> tuple:
|
|
47
|
-
mapping = config.get("path_mapping") or {}
|
|
48
|
-
domain_cfg = mapping.get(domain) or {}
|
|
49
|
-
specs = domain_cfg.get("specs") or []
|
|
50
|
-
if not specs:
|
|
51
|
-
return "", specs
|
|
52
|
-
if file_arg:
|
|
53
|
-
if file_arg in specs:
|
|
54
|
-
return file_arg, specs
|
|
55
|
-
return "", specs
|
|
56
|
-
if len(specs) == 1:
|
|
57
|
-
return specs[0], specs
|
|
58
|
-
return "", specs
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
def insert_learning(content: str, entry: str) -> str:
|
|
62
|
-
text = content.rstrip()
|
|
63
|
-
if not text:
|
|
64
|
-
return f"## Learnings\n{entry}\n"
|
|
65
|
-
|
|
66
|
-
lines = text.splitlines()
|
|
67
|
-
learn_index = None
|
|
68
|
-
for index, line in enumerate(lines):
|
|
69
|
-
if line.strip() == "## Learnings":
|
|
70
|
-
learn_index = index
|
|
71
|
-
break
|
|
72
|
-
|
|
73
|
-
if learn_index is None:
|
|
74
|
-
return f"{text}\n\n## Learnings\n{entry}\n"
|
|
75
|
-
|
|
76
|
-
insert_at = len(lines)
|
|
77
|
-
for idx in range(learn_index + 1, len(lines)):
|
|
78
|
-
if lines[idx].startswith("## "):
|
|
79
|
-
insert_at = idx
|
|
80
|
-
break
|
|
81
|
-
|
|
82
|
-
lines.insert(insert_at, entry)
|
|
83
|
-
return "\n".join(lines).rstrip() + "\n"
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
def main() -> None:
|
|
87
|
-
parser = argparse.ArgumentParser()
|
|
88
|
-
parser.add_argument("--scope", required=True, choices=["global", "frontend", "backend"])
|
|
89
|
-
parser.add_argument("--content", required=True)
|
|
90
|
-
parser.add_argument("--file", default="")
|
|
91
|
-
parser.add_argument("--dry-run", action="store_true")
|
|
92
|
-
args = parser.parse_args()
|
|
93
|
-
|
|
94
|
-
project_root = os.getcwd()
|
|
95
|
-
config = load_config(project_root)
|
|
96
|
-
|
|
97
|
-
date_str = datetime.date.today().isoformat()
|
|
98
|
-
entry = f"- [{date_str}] {args.content.strip()}"
|
|
99
|
-
|
|
100
|
-
if args.scope == "global":
|
|
101
|
-
target_path = os.path.join(project_root, "CLAUDE.md")
|
|
102
|
-
content = read_text(target_path)
|
|
103
|
-
updated = insert_learning(content, entry)
|
|
104
|
-
if args.dry_run:
|
|
105
|
-
tokens = estimate_tokens(updated)
|
|
106
|
-
print(
|
|
107
|
-
json.dumps(
|
|
108
|
-
{
|
|
109
|
-
"status": "dry_run",
|
|
110
|
-
"target": "CLAUDE.md",
|
|
111
|
-
"entry": entry,
|
|
112
|
-
"tokens": tokens,
|
|
113
|
-
"warning": "L0 超出预算" if tokens > 800 else "",
|
|
114
|
-
},
|
|
115
|
-
ensure_ascii=False,
|
|
116
|
-
)
|
|
117
|
-
)
|
|
118
|
-
return
|
|
119
|
-
if not write_text(target_path, updated):
|
|
120
|
-
print(json.dumps({"error": "write_failed", "target": "CLAUDE.md"}, ensure_ascii=False))
|
|
121
|
-
return
|
|
122
|
-
tokens = estimate_tokens(updated)
|
|
123
|
-
print(
|
|
124
|
-
json.dumps(
|
|
125
|
-
{
|
|
126
|
-
"status": "ok",
|
|
127
|
-
"target": "CLAUDE.md",
|
|
128
|
-
"entry": entry,
|
|
129
|
-
"tokens": tokens,
|
|
130
|
-
"warning": "L0 超出预算" if tokens > 800 else "",
|
|
131
|
-
},
|
|
132
|
-
ensure_ascii=False,
|
|
133
|
-
)
|
|
134
|
-
)
|
|
135
|
-
return
|
|
136
|
-
|
|
137
|
-
if not config:
|
|
138
|
-
print(json.dumps({"error": "config_missing"}, ensure_ascii=False))
|
|
139
|
-
return
|
|
140
|
-
|
|
141
|
-
spec_rel, specs = find_target_spec(args.scope, config, args.file)
|
|
142
|
-
if not spec_rel:
|
|
143
|
-
print(
|
|
144
|
-
json.dumps(
|
|
145
|
-
{
|
|
146
|
-
"error": "spec_not_selected",
|
|
147
|
-
"available_specs": specs,
|
|
148
|
-
},
|
|
149
|
-
ensure_ascii=False,
|
|
150
|
-
)
|
|
151
|
-
)
|
|
152
|
-
return
|
|
153
|
-
|
|
154
|
-
target_path = os.path.join(project_root, ".code-flow", "specs", spec_rel)
|
|
155
|
-
content = read_text(target_path)
|
|
156
|
-
if not content:
|
|
157
|
-
print(
|
|
158
|
-
json.dumps(
|
|
159
|
-
{"error": "spec_missing_or_empty", "target": spec_rel},
|
|
160
|
-
ensure_ascii=False,
|
|
161
|
-
)
|
|
162
|
-
)
|
|
163
|
-
return
|
|
164
|
-
|
|
165
|
-
updated = insert_learning(content, entry)
|
|
166
|
-
tokens = estimate_tokens(updated)
|
|
167
|
-
warning = ""
|
|
168
|
-
if tokens > 500:
|
|
169
|
-
warning = "单文件超过 500 tokens,建议精简"
|
|
170
|
-
if args.dry_run:
|
|
171
|
-
print(
|
|
172
|
-
json.dumps(
|
|
173
|
-
{
|
|
174
|
-
"status": "dry_run",
|
|
175
|
-
"target": spec_rel,
|
|
176
|
-
"entry": entry,
|
|
177
|
-
"tokens": tokens,
|
|
178
|
-
"warning": warning,
|
|
179
|
-
},
|
|
180
|
-
ensure_ascii=False,
|
|
181
|
-
)
|
|
182
|
-
)
|
|
183
|
-
return
|
|
184
|
-
if not write_text(target_path, updated):
|
|
185
|
-
print(json.dumps({"error": "write_failed", "target": spec_rel}, ensure_ascii=False))
|
|
186
|
-
return
|
|
187
|
-
print(
|
|
188
|
-
json.dumps(
|
|
189
|
-
{
|
|
190
|
-
"status": "ok",
|
|
191
|
-
"target": spec_rel,
|
|
192
|
-
"entry": entry,
|
|
193
|
-
"tokens": tokens,
|
|
194
|
-
"warning": warning,
|
|
195
|
-
},
|
|
196
|
-
ensure_ascii=False,
|
|
197
|
-
)
|
|
198
|
-
)
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
if __name__ == "__main__":
|
|
202
|
-
main()
|
|
@@ -1,340 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
import fnmatch
|
|
3
|
-
import json
|
|
4
|
-
import os
|
|
5
|
-
import subprocess
|
|
6
|
-
import sys
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def load_validation(project_root: str) -> dict:
|
|
10
|
-
config_path = os.path.join(project_root, ".code-flow", "validation.yml")
|
|
11
|
-
if os.path.exists(config_path):
|
|
12
|
-
try:
|
|
13
|
-
import yaml
|
|
14
|
-
except Exception:
|
|
15
|
-
return {}
|
|
16
|
-
try:
|
|
17
|
-
with open(config_path, "r", encoding="utf-8") as file:
|
|
18
|
-
data = yaml.safe_load(file)
|
|
19
|
-
return data or {}
|
|
20
|
-
except Exception:
|
|
21
|
-
return {}
|
|
22
|
-
|
|
23
|
-
package_path = os.path.join(project_root, "package.json")
|
|
24
|
-
if not os.path.exists(package_path):
|
|
25
|
-
return {}
|
|
26
|
-
try:
|
|
27
|
-
with open(package_path, "r", encoding="utf-8") as file:
|
|
28
|
-
data = json.load(file)
|
|
29
|
-
except Exception:
|
|
30
|
-
return {}
|
|
31
|
-
|
|
32
|
-
scripts = data.get("scripts") or {}
|
|
33
|
-
validators = []
|
|
34
|
-
if "lint" in scripts:
|
|
35
|
-
validators.append(
|
|
36
|
-
{
|
|
37
|
-
"name": "npm run lint",
|
|
38
|
-
"trigger": "**/*.{ts,tsx,js,jsx}",
|
|
39
|
-
"command": "npm run lint",
|
|
40
|
-
"timeout": 30000,
|
|
41
|
-
"on_fail": "检查 lint 配置",
|
|
42
|
-
}
|
|
43
|
-
)
|
|
44
|
-
if "test" in scripts:
|
|
45
|
-
validators.append(
|
|
46
|
-
{
|
|
47
|
-
"name": "npm test",
|
|
48
|
-
"trigger": "**/*.{ts,tsx,js,jsx}"
|
|
49
|
-
if "lint" in scripts
|
|
50
|
-
else "**/*.{ts,tsx,js,jsx,py}",
|
|
51
|
-
"command": "npm test",
|
|
52
|
-
"timeout": 60000,
|
|
53
|
-
"on_fail": "检查测试用例",
|
|
54
|
-
}
|
|
55
|
-
)
|
|
56
|
-
|
|
57
|
-
return {"validators": validators}
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
def expand_pattern(pattern: str) -> list:
|
|
61
|
-
if "{" in pattern and "}" in pattern:
|
|
62
|
-
prefix, rest = pattern.split("{", 1)
|
|
63
|
-
options, suffix = rest.split("}", 1)
|
|
64
|
-
return [f"{prefix}{opt}{suffix}" for opt in options.split(",")]
|
|
65
|
-
return [pattern]
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def normalize_path(path: str) -> str:
|
|
69
|
-
return path.replace(os.sep, "/")
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
def match_files(pattern: str, files: list) -> list:
|
|
73
|
-
matches = []
|
|
74
|
-
patterns = expand_pattern(pattern)
|
|
75
|
-
for file_path in files:
|
|
76
|
-
normalized = normalize_path(file_path)
|
|
77
|
-
for pat in patterns:
|
|
78
|
-
if "**/" in pat:
|
|
79
|
-
pat_variants = [pat, pat.replace("**/", "")]
|
|
80
|
-
else:
|
|
81
|
-
pat_variants = [pat]
|
|
82
|
-
for variant in pat_variants:
|
|
83
|
-
if fnmatch.fnmatch(normalized, variant):
|
|
84
|
-
matches.append(file_path)
|
|
85
|
-
break
|
|
86
|
-
return sorted(set(matches))
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
def truncate(text: str, limit: int = 2000) -> str:
|
|
90
|
-
if len(text) <= limit:
|
|
91
|
-
return text
|
|
92
|
-
return text[:limit] + "..."
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
def quote_single(path: str) -> str:
|
|
96
|
-
return "'" + path.replace("'", "'\"'\"'") + "'"
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
def normalize_requested_files(
|
|
100
|
-
project_root: str,
|
|
101
|
-
requested: list,
|
|
102
|
-
require_exists: bool,
|
|
103
|
-
) -> tuple:
|
|
104
|
-
normalized = []
|
|
105
|
-
root_abs = os.path.abspath(project_root)
|
|
106
|
-
for raw_path in requested:
|
|
107
|
-
if not raw_path:
|
|
108
|
-
continue
|
|
109
|
-
if os.path.isabs(raw_path):
|
|
110
|
-
abs_path = os.path.abspath(raw_path)
|
|
111
|
-
else:
|
|
112
|
-
abs_path = os.path.abspath(os.path.join(project_root, raw_path))
|
|
113
|
-
try:
|
|
114
|
-
common = os.path.commonpath([root_abs, abs_path])
|
|
115
|
-
except Exception:
|
|
116
|
-
return [], "invalid_path"
|
|
117
|
-
if common != root_abs:
|
|
118
|
-
return [], "outside_project_root"
|
|
119
|
-
if require_exists and not os.path.exists(abs_path):
|
|
120
|
-
return [], "file_missing"
|
|
121
|
-
rel_path = os.path.relpath(abs_path, project_root)
|
|
122
|
-
normalized.append(normalize_path(rel_path))
|
|
123
|
-
return sorted(set(normalized)), ""
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
def main() -> None:
|
|
127
|
-
project_root = os.getcwd()
|
|
128
|
-
config = load_validation(project_root)
|
|
129
|
-
validators = config.get("validators") or []
|
|
130
|
-
if not validators:
|
|
131
|
-
print(json.dumps({"error": "validation_config_missing"}, ensure_ascii=False))
|
|
132
|
-
return
|
|
133
|
-
|
|
134
|
-
args = sys.argv[1:]
|
|
135
|
-
requested_files = []
|
|
136
|
-
for arg in args:
|
|
137
|
-
if arg.startswith("--files="):
|
|
138
|
-
raw = arg.split("=", 1)[1]
|
|
139
|
-
requested_files.extend([part.strip() for part in raw.split(",") if part.strip()])
|
|
140
|
-
elif arg in {"--json-short", "--only-failed"}:
|
|
141
|
-
continue
|
|
142
|
-
elif arg.startswith("--output="):
|
|
143
|
-
continue
|
|
144
|
-
else:
|
|
145
|
-
requested_files.append(arg)
|
|
146
|
-
|
|
147
|
-
git_dir = os.path.join(project_root, ".git")
|
|
148
|
-
has_git = os.path.isdir(git_dir)
|
|
149
|
-
files = []
|
|
150
|
-
|
|
151
|
-
if has_git:
|
|
152
|
-
result = subprocess.run(
|
|
153
|
-
["git", "diff", "--name-only", "HEAD"],
|
|
154
|
-
capture_output=True,
|
|
155
|
-
text=True,
|
|
156
|
-
check=False,
|
|
157
|
-
)
|
|
158
|
-
if result.returncode != 0:
|
|
159
|
-
print(
|
|
160
|
-
json.dumps(
|
|
161
|
-
{"error": "git_diff_failed", "hint": "检查 git 仓库状态或 HEAD 是否存在"},
|
|
162
|
-
ensure_ascii=False,
|
|
163
|
-
)
|
|
164
|
-
)
|
|
165
|
-
return
|
|
166
|
-
diff_files = [
|
|
167
|
-
normalize_path(line.strip())
|
|
168
|
-
for line in result.stdout.splitlines()
|
|
169
|
-
if line.strip()
|
|
170
|
-
]
|
|
171
|
-
if requested_files:
|
|
172
|
-
normalized, error = normalize_requested_files(
|
|
173
|
-
project_root, requested_files, require_exists=False
|
|
174
|
-
)
|
|
175
|
-
if error:
|
|
176
|
-
print(
|
|
177
|
-
json.dumps(
|
|
178
|
-
{
|
|
179
|
-
"error": "invalid_files",
|
|
180
|
-
"hint": "确认文件路径位于项目根目录内",
|
|
181
|
-
},
|
|
182
|
-
ensure_ascii=False,
|
|
183
|
-
)
|
|
184
|
-
)
|
|
185
|
-
return
|
|
186
|
-
diff_set = set(diff_files)
|
|
187
|
-
invalid = [path for path in normalized if path not in diff_set]
|
|
188
|
-
if invalid:
|
|
189
|
-
print(
|
|
190
|
-
json.dumps(
|
|
191
|
-
{
|
|
192
|
-
"error": "files_not_in_diff",
|
|
193
|
-
"hint": "仅允许使用 `git diff --name-only HEAD` 中的文件路径",
|
|
194
|
-
"files": invalid,
|
|
195
|
-
},
|
|
196
|
-
ensure_ascii=False,
|
|
197
|
-
)
|
|
198
|
-
)
|
|
199
|
-
return
|
|
200
|
-
files = normalized
|
|
201
|
-
else:
|
|
202
|
-
files = diff_files
|
|
203
|
-
else:
|
|
204
|
-
if not requested_files:
|
|
205
|
-
print(
|
|
206
|
-
json.dumps(
|
|
207
|
-
{
|
|
208
|
-
"error": "no_git_repo",
|
|
209
|
-
"hint": "无 git 仓库时请通过 --files 或位置参数显式传入文件路径",
|
|
210
|
-
},
|
|
211
|
-
ensure_ascii=False,
|
|
212
|
-
)
|
|
213
|
-
)
|
|
214
|
-
return
|
|
215
|
-
normalized, error = normalize_requested_files(
|
|
216
|
-
project_root, requested_files, require_exists=True
|
|
217
|
-
)
|
|
218
|
-
if error:
|
|
219
|
-
print(
|
|
220
|
-
json.dumps(
|
|
221
|
-
{
|
|
222
|
-
"error": "invalid_files",
|
|
223
|
-
"hint": "确认文件存在且位于项目根目录内",
|
|
224
|
-
},
|
|
225
|
-
ensure_ascii=False,
|
|
226
|
-
)
|
|
227
|
-
)
|
|
228
|
-
return
|
|
229
|
-
files = normalized
|
|
230
|
-
|
|
231
|
-
if not files:
|
|
232
|
-
print(json.dumps({"error": "no_files"}, ensure_ascii=False))
|
|
233
|
-
return
|
|
234
|
-
|
|
235
|
-
results = []
|
|
236
|
-
all_passed = True
|
|
237
|
-
|
|
238
|
-
for validator in validators:
|
|
239
|
-
name = validator.get("name", "unnamed")
|
|
240
|
-
trigger = validator.get("trigger", "")
|
|
241
|
-
command = validator.get("command", "")
|
|
242
|
-
timeout_ms = validator.get("timeout", 30000)
|
|
243
|
-
on_fail = validator.get("on_fail", "")
|
|
244
|
-
|
|
245
|
-
matched_files = match_files(trigger, files) if trigger else []
|
|
246
|
-
if not matched_files:
|
|
247
|
-
results.append(
|
|
248
|
-
{
|
|
249
|
-
"name": name,
|
|
250
|
-
"status": "skipped",
|
|
251
|
-
"matched_files": [],
|
|
252
|
-
"command": command,
|
|
253
|
-
}
|
|
254
|
-
)
|
|
255
|
-
continue
|
|
256
|
-
|
|
257
|
-
quoted_files = " ".join(quote_single(path) for path in matched_files)
|
|
258
|
-
run_command = command.replace("{files}", quoted_files)
|
|
259
|
-
|
|
260
|
-
try:
|
|
261
|
-
timeout = int(timeout_ms) / 1000
|
|
262
|
-
except Exception:
|
|
263
|
-
timeout = 30
|
|
264
|
-
|
|
265
|
-
status = "passed"
|
|
266
|
-
exit_code = 0
|
|
267
|
-
stdout = ""
|
|
268
|
-
stderr = ""
|
|
269
|
-
|
|
270
|
-
try:
|
|
271
|
-
proc = subprocess.run(
|
|
272
|
-
run_command,
|
|
273
|
-
shell=True,
|
|
274
|
-
capture_output=True,
|
|
275
|
-
text=True,
|
|
276
|
-
timeout=timeout,
|
|
277
|
-
)
|
|
278
|
-
exit_code = proc.returncode
|
|
279
|
-
stdout = proc.stdout.strip()
|
|
280
|
-
stderr = proc.stderr.strip()
|
|
281
|
-
if exit_code != 0:
|
|
282
|
-
status = "failed"
|
|
283
|
-
all_passed = False
|
|
284
|
-
except subprocess.TimeoutExpired:
|
|
285
|
-
status = "timeout"
|
|
286
|
-
all_passed = False
|
|
287
|
-
except Exception:
|
|
288
|
-
status = "error"
|
|
289
|
-
all_passed = False
|
|
290
|
-
|
|
291
|
-
results.append(
|
|
292
|
-
{
|
|
293
|
-
"name": name,
|
|
294
|
-
"status": status,
|
|
295
|
-
"matched_files": matched_files,
|
|
296
|
-
"command": run_command,
|
|
297
|
-
"exit_code": exit_code,
|
|
298
|
-
"stdout": truncate(stdout),
|
|
299
|
-
"stderr": truncate(stderr),
|
|
300
|
-
"on_fail": on_fail,
|
|
301
|
-
}
|
|
302
|
-
)
|
|
303
|
-
|
|
304
|
-
output = {"passed": all_passed, "results": results}
|
|
305
|
-
if "--only-failed" in sys.argv:
|
|
306
|
-
output = {
|
|
307
|
-
"passed": all_passed,
|
|
308
|
-
"results": [
|
|
309
|
-
item
|
|
310
|
-
for item in results
|
|
311
|
-
if item["status"] in {"failed", "timeout", "error"}
|
|
312
|
-
],
|
|
313
|
-
}
|
|
314
|
-
|
|
315
|
-
if "--json-short" in sys.argv:
|
|
316
|
-
output = {
|
|
317
|
-
"passed": all_passed,
|
|
318
|
-
"results": [
|
|
319
|
-
{
|
|
320
|
-
"name": item["name"],
|
|
321
|
-
"status": item["status"],
|
|
322
|
-
"on_fail": item.get("on_fail", ""),
|
|
323
|
-
}
|
|
324
|
-
for item in output.get("results", [])
|
|
325
|
-
],
|
|
326
|
-
}
|
|
327
|
-
|
|
328
|
-
if "--output=table" in sys.argv:
|
|
329
|
-
print("NAME | STATUS | MATCHED_FILES | COMMAND")
|
|
330
|
-
for item in output.get("results", []):
|
|
331
|
-
matched = ",".join(item.get("matched_files", []))
|
|
332
|
-
command = item.get("command", "")
|
|
333
|
-
print(f"{item.get('name')} | {item.get('status')} | {matched} | {command}")
|
|
334
|
-
return
|
|
335
|
-
|
|
336
|
-
print(json.dumps(output, ensure_ascii=False))
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
if __name__ == "__main__":
|
|
340
|
-
main()
|