@jahanxu/code-flow 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +13 -0
- package/src/adapters/claude/settings.local.json +26 -0
- package/src/adapters/claude/skills/cf-init.md +13 -0
- package/src/adapters/claude/skills/cf-inject.md +12 -0
- package/src/adapters/claude/skills/cf-learn.md +11 -0
- package/src/adapters/claude/skills/cf-scan.md +12 -0
- package/src/adapters/claude/skills/cf-stats.md +11 -0
- package/src/adapters/claude/skills/cf-validate.md +12 -0
- package/src/adapters/codex/AGENTS.md +3 -0
- package/src/adapters/cursor/cursorrules +1 -0
- package/src/cli.js +105 -0
- package/src/core/code-flow/config.yml +97 -0
- package/src/core/code-flow/scripts/cf_core.py +129 -0
- package/src/core/code-flow/scripts/cf_init.py +829 -0
- package/src/core/code-flow/scripts/cf_inject.py +150 -0
- package/src/core/code-flow/scripts/cf_inject_hook.py +107 -0
- package/src/core/code-flow/scripts/cf_learn.py +202 -0
- package/src/core/code-flow/scripts/cf_scan.py +157 -0
- package/src/core/code-flow/scripts/cf_session_hook.py +16 -0
- package/src/core/code-flow/scripts/cf_stats.py +108 -0
- package/src/core/code-flow/scripts/cf_validate.py +340 -0
- package/src/core/code-flow/specs/backend/code-quality-performance.md +13 -0
- package/src/core/code-flow/specs/backend/database.md +13 -0
- package/src/core/code-flow/specs/backend/directory-structure.md +13 -0
- package/src/core/code-flow/specs/backend/logging.md +13 -0
- package/src/core/code-flow/specs/backend/platform-rules.md +13 -0
- package/src/core/code-flow/specs/frontend/component-specs.md +14 -0
- package/src/core/code-flow/specs/frontend/directory-structure.md +14 -0
- package/src/core/code-flow/specs/frontend/quality-standards.md +15 -0
- package/src/core/code-flow/validation.yml +30 -0
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
|
|
6
|
+
from cf_core import estimate_tokens, load_config
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def read_text(path: str) -> str:
|
|
10
|
+
try:
|
|
11
|
+
with open(path, "r", encoding="utf-8") as file:
|
|
12
|
+
return file.read().strip()
|
|
13
|
+
except Exception:
|
|
14
|
+
return ""
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def main() -> None:
|
|
18
|
+
project_root = os.getcwd()
|
|
19
|
+
config = load_config(project_root)
|
|
20
|
+
budget_cfg = config.get("budget") or {}
|
|
21
|
+
|
|
22
|
+
human_output = "--human" in sys.argv
|
|
23
|
+
json_output = not human_output
|
|
24
|
+
domain_filter = None
|
|
25
|
+
for arg in sys.argv[1:]:
|
|
26
|
+
if arg.startswith("--domain="):
|
|
27
|
+
domain_filter = arg.split("=", 1)[1]
|
|
28
|
+
|
|
29
|
+
l0_budget = budget_cfg.get("l0_max", 800)
|
|
30
|
+
l1_budget = budget_cfg.get("l1_max", 1700)
|
|
31
|
+
total_budget = budget_cfg.get("total", l0_budget + l1_budget)
|
|
32
|
+
|
|
33
|
+
try:
|
|
34
|
+
l0_budget = int(l0_budget)
|
|
35
|
+
except Exception:
|
|
36
|
+
l0_budget = 800
|
|
37
|
+
try:
|
|
38
|
+
total_budget = int(total_budget)
|
|
39
|
+
except Exception:
|
|
40
|
+
total_budget = l0_budget + l1_budget
|
|
41
|
+
|
|
42
|
+
claude_path = os.path.join(project_root, "CLAUDE.md")
|
|
43
|
+
l0_tokens = 0
|
|
44
|
+
if os.path.exists(claude_path):
|
|
45
|
+
l0_tokens = estimate_tokens(read_text(claude_path))
|
|
46
|
+
|
|
47
|
+
l1 = {}
|
|
48
|
+
total_tokens = l0_tokens
|
|
49
|
+
specs_root = os.path.join(project_root, ".code-flow", "specs")
|
|
50
|
+
spec_domain_map = {}
|
|
51
|
+
for domain, domain_cfg in (config.get("path_mapping") or {}).items():
|
|
52
|
+
if domain_filter and domain_filter != domain:
|
|
53
|
+
continue
|
|
54
|
+
items = []
|
|
55
|
+
for rel in domain_cfg.get("specs") or []:
|
|
56
|
+
spec_domain_map[rel] = domain
|
|
57
|
+
full_path = os.path.join(specs_root, rel)
|
|
58
|
+
if not os.path.exists(full_path):
|
|
59
|
+
continue
|
|
60
|
+
content = read_text(full_path)
|
|
61
|
+
if not content:
|
|
62
|
+
continue
|
|
63
|
+
tokens = estimate_tokens(content)
|
|
64
|
+
items.append({"path": rel, "tokens": tokens})
|
|
65
|
+
total_tokens += tokens
|
|
66
|
+
if items:
|
|
67
|
+
l1[domain] = items
|
|
68
|
+
|
|
69
|
+
utilization = "0%"
|
|
70
|
+
if total_budget:
|
|
71
|
+
utilization = f"{round(total_tokens * 100 / total_budget)}%"
|
|
72
|
+
|
|
73
|
+
warnings = []
|
|
74
|
+
if l0_tokens > l0_budget:
|
|
75
|
+
warnings.append("L0 超出预算")
|
|
76
|
+
l1_tokens = total_tokens - l0_tokens
|
|
77
|
+
if l1_tokens > l1_budget:
|
|
78
|
+
warnings.append("L1 超出预算")
|
|
79
|
+
if total_tokens > total_budget:
|
|
80
|
+
warnings.append("总预算超出")
|
|
81
|
+
|
|
82
|
+
output = {
|
|
83
|
+
"l0": {"file": "CLAUDE.md", "tokens": l0_tokens, "budget": l0_budget},
|
|
84
|
+
"l1": l1,
|
|
85
|
+
"total_tokens": total_tokens,
|
|
86
|
+
"total_budget": total_budget,
|
|
87
|
+
"utilization": utilization,
|
|
88
|
+
"warnings": warnings,
|
|
89
|
+
"spec_domain_map": spec_domain_map,
|
|
90
|
+
}
|
|
91
|
+
if json_output:
|
|
92
|
+
print(json.dumps(output, ensure_ascii=False))
|
|
93
|
+
return
|
|
94
|
+
|
|
95
|
+
print("L0 (CLAUDE.md):", f"{l0_tokens} / {l0_budget}")
|
|
96
|
+
for domain, items in l1.items():
|
|
97
|
+
total_domain = sum(item["tokens"] for item in items)
|
|
98
|
+
print(f"L1 {domain}:", total_domain)
|
|
99
|
+
for item in items:
|
|
100
|
+
print(" -", item["path"], item["tokens"])
|
|
101
|
+
print("TOTAL:", f"{total_tokens} / {total_budget}")
|
|
102
|
+
print("UTILIZATION:", utilization)
|
|
103
|
+
if warnings:
|
|
104
|
+
print("WARNINGS:", "; ".join(warnings))
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
if __name__ == "__main__":
|
|
108
|
+
main()
|
|
@@ -0,0 +1,340 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import fnmatch
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import subprocess
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def load_validation(project_root: str) -> dict:
|
|
10
|
+
config_path = os.path.join(project_root, ".code-flow", "validation.yml")
|
|
11
|
+
if os.path.exists(config_path):
|
|
12
|
+
try:
|
|
13
|
+
import yaml
|
|
14
|
+
except Exception:
|
|
15
|
+
return {}
|
|
16
|
+
try:
|
|
17
|
+
with open(config_path, "r", encoding="utf-8") as file:
|
|
18
|
+
data = yaml.safe_load(file)
|
|
19
|
+
return data or {}
|
|
20
|
+
except Exception:
|
|
21
|
+
return {}
|
|
22
|
+
|
|
23
|
+
package_path = os.path.join(project_root, "package.json")
|
|
24
|
+
if not os.path.exists(package_path):
|
|
25
|
+
return {}
|
|
26
|
+
try:
|
|
27
|
+
with open(package_path, "r", encoding="utf-8") as file:
|
|
28
|
+
data = json.load(file)
|
|
29
|
+
except Exception:
|
|
30
|
+
return {}
|
|
31
|
+
|
|
32
|
+
scripts = data.get("scripts") or {}
|
|
33
|
+
validators = []
|
|
34
|
+
if "lint" in scripts:
|
|
35
|
+
validators.append(
|
|
36
|
+
{
|
|
37
|
+
"name": "npm run lint",
|
|
38
|
+
"trigger": "**/*.{ts,tsx,js,jsx}",
|
|
39
|
+
"command": "npm run lint",
|
|
40
|
+
"timeout": 30000,
|
|
41
|
+
"on_fail": "检查 lint 配置",
|
|
42
|
+
}
|
|
43
|
+
)
|
|
44
|
+
if "test" in scripts:
|
|
45
|
+
validators.append(
|
|
46
|
+
{
|
|
47
|
+
"name": "npm test",
|
|
48
|
+
"trigger": "**/*.{ts,tsx,js,jsx}"
|
|
49
|
+
if "lint" in scripts
|
|
50
|
+
else "**/*.{ts,tsx,js,jsx,py}",
|
|
51
|
+
"command": "npm test",
|
|
52
|
+
"timeout": 60000,
|
|
53
|
+
"on_fail": "检查测试用例",
|
|
54
|
+
}
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
return {"validators": validators}
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def expand_pattern(pattern: str) -> list:
|
|
61
|
+
if "{" in pattern and "}" in pattern:
|
|
62
|
+
prefix, rest = pattern.split("{", 1)
|
|
63
|
+
options, suffix = rest.split("}", 1)
|
|
64
|
+
return [f"{prefix}{opt}{suffix}" for opt in options.split(",")]
|
|
65
|
+
return [pattern]
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def normalize_path(path: str) -> str:
|
|
69
|
+
return path.replace(os.sep, "/")
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def match_files(pattern: str, files: list) -> list:
|
|
73
|
+
matches = []
|
|
74
|
+
patterns = expand_pattern(pattern)
|
|
75
|
+
for file_path in files:
|
|
76
|
+
normalized = normalize_path(file_path)
|
|
77
|
+
for pat in patterns:
|
|
78
|
+
if "**/" in pat:
|
|
79
|
+
pat_variants = [pat, pat.replace("**/", "")]
|
|
80
|
+
else:
|
|
81
|
+
pat_variants = [pat]
|
|
82
|
+
for variant in pat_variants:
|
|
83
|
+
if fnmatch.fnmatch(normalized, variant):
|
|
84
|
+
matches.append(file_path)
|
|
85
|
+
break
|
|
86
|
+
return sorted(set(matches))
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def truncate(text: str, limit: int = 2000) -> str:
|
|
90
|
+
if len(text) <= limit:
|
|
91
|
+
return text
|
|
92
|
+
return text[:limit] + "..."
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def quote_single(path: str) -> str:
|
|
96
|
+
return "'" + path.replace("'", "'\"'\"'") + "'"
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def normalize_requested_files(
|
|
100
|
+
project_root: str,
|
|
101
|
+
requested: list,
|
|
102
|
+
require_exists: bool,
|
|
103
|
+
) -> tuple:
|
|
104
|
+
normalized = []
|
|
105
|
+
root_abs = os.path.abspath(project_root)
|
|
106
|
+
for raw_path in requested:
|
|
107
|
+
if not raw_path:
|
|
108
|
+
continue
|
|
109
|
+
if os.path.isabs(raw_path):
|
|
110
|
+
abs_path = os.path.abspath(raw_path)
|
|
111
|
+
else:
|
|
112
|
+
abs_path = os.path.abspath(os.path.join(project_root, raw_path))
|
|
113
|
+
try:
|
|
114
|
+
common = os.path.commonpath([root_abs, abs_path])
|
|
115
|
+
except Exception:
|
|
116
|
+
return [], "invalid_path"
|
|
117
|
+
if common != root_abs:
|
|
118
|
+
return [], "outside_project_root"
|
|
119
|
+
if require_exists and not os.path.exists(abs_path):
|
|
120
|
+
return [], "file_missing"
|
|
121
|
+
rel_path = os.path.relpath(abs_path, project_root)
|
|
122
|
+
normalized.append(normalize_path(rel_path))
|
|
123
|
+
return sorted(set(normalized)), ""
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def main() -> None:
|
|
127
|
+
project_root = os.getcwd()
|
|
128
|
+
config = load_validation(project_root)
|
|
129
|
+
validators = config.get("validators") or []
|
|
130
|
+
if not validators:
|
|
131
|
+
print(json.dumps({"error": "validation_config_missing"}, ensure_ascii=False))
|
|
132
|
+
return
|
|
133
|
+
|
|
134
|
+
args = sys.argv[1:]
|
|
135
|
+
requested_files = []
|
|
136
|
+
for arg in args:
|
|
137
|
+
if arg.startswith("--files="):
|
|
138
|
+
raw = arg.split("=", 1)[1]
|
|
139
|
+
requested_files.extend([part.strip() for part in raw.split(",") if part.strip()])
|
|
140
|
+
elif arg in {"--json-short", "--only-failed"}:
|
|
141
|
+
continue
|
|
142
|
+
elif arg.startswith("--output="):
|
|
143
|
+
continue
|
|
144
|
+
else:
|
|
145
|
+
requested_files.append(arg)
|
|
146
|
+
|
|
147
|
+
git_dir = os.path.join(project_root, ".git")
|
|
148
|
+
has_git = os.path.isdir(git_dir)
|
|
149
|
+
files = []
|
|
150
|
+
|
|
151
|
+
if has_git:
|
|
152
|
+
result = subprocess.run(
|
|
153
|
+
["git", "diff", "--name-only", "HEAD"],
|
|
154
|
+
capture_output=True,
|
|
155
|
+
text=True,
|
|
156
|
+
check=False,
|
|
157
|
+
)
|
|
158
|
+
if result.returncode != 0:
|
|
159
|
+
print(
|
|
160
|
+
json.dumps(
|
|
161
|
+
{"error": "git_diff_failed", "hint": "检查 git 仓库状态或 HEAD 是否存在"},
|
|
162
|
+
ensure_ascii=False,
|
|
163
|
+
)
|
|
164
|
+
)
|
|
165
|
+
return
|
|
166
|
+
diff_files = [
|
|
167
|
+
normalize_path(line.strip())
|
|
168
|
+
for line in result.stdout.splitlines()
|
|
169
|
+
if line.strip()
|
|
170
|
+
]
|
|
171
|
+
if requested_files:
|
|
172
|
+
normalized, error = normalize_requested_files(
|
|
173
|
+
project_root, requested_files, require_exists=False
|
|
174
|
+
)
|
|
175
|
+
if error:
|
|
176
|
+
print(
|
|
177
|
+
json.dumps(
|
|
178
|
+
{
|
|
179
|
+
"error": "invalid_files",
|
|
180
|
+
"hint": "确认文件路径位于项目根目录内",
|
|
181
|
+
},
|
|
182
|
+
ensure_ascii=False,
|
|
183
|
+
)
|
|
184
|
+
)
|
|
185
|
+
return
|
|
186
|
+
diff_set = set(diff_files)
|
|
187
|
+
invalid = [path for path in normalized if path not in diff_set]
|
|
188
|
+
if invalid:
|
|
189
|
+
print(
|
|
190
|
+
json.dumps(
|
|
191
|
+
{
|
|
192
|
+
"error": "files_not_in_diff",
|
|
193
|
+
"hint": "仅允许使用 `git diff --name-only HEAD` 中的文件路径",
|
|
194
|
+
"files": invalid,
|
|
195
|
+
},
|
|
196
|
+
ensure_ascii=False,
|
|
197
|
+
)
|
|
198
|
+
)
|
|
199
|
+
return
|
|
200
|
+
files = normalized
|
|
201
|
+
else:
|
|
202
|
+
files = diff_files
|
|
203
|
+
else:
|
|
204
|
+
if not requested_files:
|
|
205
|
+
print(
|
|
206
|
+
json.dumps(
|
|
207
|
+
{
|
|
208
|
+
"error": "no_git_repo",
|
|
209
|
+
"hint": "无 git 仓库时请通过 --files 或位置参数显式传入文件路径",
|
|
210
|
+
},
|
|
211
|
+
ensure_ascii=False,
|
|
212
|
+
)
|
|
213
|
+
)
|
|
214
|
+
return
|
|
215
|
+
normalized, error = normalize_requested_files(
|
|
216
|
+
project_root, requested_files, require_exists=True
|
|
217
|
+
)
|
|
218
|
+
if error:
|
|
219
|
+
print(
|
|
220
|
+
json.dumps(
|
|
221
|
+
{
|
|
222
|
+
"error": "invalid_files",
|
|
223
|
+
"hint": "确认文件存在且位于项目根目录内",
|
|
224
|
+
},
|
|
225
|
+
ensure_ascii=False,
|
|
226
|
+
)
|
|
227
|
+
)
|
|
228
|
+
return
|
|
229
|
+
files = normalized
|
|
230
|
+
|
|
231
|
+
if not files:
|
|
232
|
+
print(json.dumps({"error": "no_files"}, ensure_ascii=False))
|
|
233
|
+
return
|
|
234
|
+
|
|
235
|
+
results = []
|
|
236
|
+
all_passed = True
|
|
237
|
+
|
|
238
|
+
for validator in validators:
|
|
239
|
+
name = validator.get("name", "unnamed")
|
|
240
|
+
trigger = validator.get("trigger", "")
|
|
241
|
+
command = validator.get("command", "")
|
|
242
|
+
timeout_ms = validator.get("timeout", 30000)
|
|
243
|
+
on_fail = validator.get("on_fail", "")
|
|
244
|
+
|
|
245
|
+
matched_files = match_files(trigger, files) if trigger else []
|
|
246
|
+
if not matched_files:
|
|
247
|
+
results.append(
|
|
248
|
+
{
|
|
249
|
+
"name": name,
|
|
250
|
+
"status": "skipped",
|
|
251
|
+
"matched_files": [],
|
|
252
|
+
"command": command,
|
|
253
|
+
}
|
|
254
|
+
)
|
|
255
|
+
continue
|
|
256
|
+
|
|
257
|
+
quoted_files = " ".join(quote_single(path) for path in matched_files)
|
|
258
|
+
run_command = command.replace("{files}", quoted_files)
|
|
259
|
+
|
|
260
|
+
try:
|
|
261
|
+
timeout = int(timeout_ms) / 1000
|
|
262
|
+
except Exception:
|
|
263
|
+
timeout = 30
|
|
264
|
+
|
|
265
|
+
status = "passed"
|
|
266
|
+
exit_code = 0
|
|
267
|
+
stdout = ""
|
|
268
|
+
stderr = ""
|
|
269
|
+
|
|
270
|
+
try:
|
|
271
|
+
proc = subprocess.run(
|
|
272
|
+
run_command,
|
|
273
|
+
shell=True,
|
|
274
|
+
capture_output=True,
|
|
275
|
+
text=True,
|
|
276
|
+
timeout=timeout,
|
|
277
|
+
)
|
|
278
|
+
exit_code = proc.returncode
|
|
279
|
+
stdout = proc.stdout.strip()
|
|
280
|
+
stderr = proc.stderr.strip()
|
|
281
|
+
if exit_code != 0:
|
|
282
|
+
status = "failed"
|
|
283
|
+
all_passed = False
|
|
284
|
+
except subprocess.TimeoutExpired:
|
|
285
|
+
status = "timeout"
|
|
286
|
+
all_passed = False
|
|
287
|
+
except Exception:
|
|
288
|
+
status = "error"
|
|
289
|
+
all_passed = False
|
|
290
|
+
|
|
291
|
+
results.append(
|
|
292
|
+
{
|
|
293
|
+
"name": name,
|
|
294
|
+
"status": status,
|
|
295
|
+
"matched_files": matched_files,
|
|
296
|
+
"command": run_command,
|
|
297
|
+
"exit_code": exit_code,
|
|
298
|
+
"stdout": truncate(stdout),
|
|
299
|
+
"stderr": truncate(stderr),
|
|
300
|
+
"on_fail": on_fail,
|
|
301
|
+
}
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
output = {"passed": all_passed, "results": results}
|
|
305
|
+
if "--only-failed" in sys.argv:
|
|
306
|
+
output = {
|
|
307
|
+
"passed": all_passed,
|
|
308
|
+
"results": [
|
|
309
|
+
item
|
|
310
|
+
for item in results
|
|
311
|
+
if item["status"] in {"failed", "timeout", "error"}
|
|
312
|
+
],
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
if "--json-short" in sys.argv:
|
|
316
|
+
output = {
|
|
317
|
+
"passed": all_passed,
|
|
318
|
+
"results": [
|
|
319
|
+
{
|
|
320
|
+
"name": item["name"],
|
|
321
|
+
"status": item["status"],
|
|
322
|
+
"on_fail": item.get("on_fail", ""),
|
|
323
|
+
}
|
|
324
|
+
for item in output.get("results", [])
|
|
325
|
+
],
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
if "--output=table" in sys.argv:
|
|
329
|
+
print("NAME | STATUS | MATCHED_FILES | COMMAND")
|
|
330
|
+
for item in output.get("results", []):
|
|
331
|
+
matched = ",".join(item.get("matched_files", []))
|
|
332
|
+
command = item.get("command", "")
|
|
333
|
+
print(f"{item.get('name')} | {item.get('status')} | {matched} | {command}")
|
|
334
|
+
return
|
|
335
|
+
|
|
336
|
+
print(json.dumps(output, ensure_ascii=False))
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
if __name__ == "__main__":
|
|
340
|
+
main()
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
validators:
|
|
2
|
+
- name: "Python 语法检查"
|
|
3
|
+
trigger: "**/*.py"
|
|
4
|
+
command: "python3 -m py_compile {files}"
|
|
5
|
+
timeout: 30000
|
|
6
|
+
on_fail: "检查语法错误"
|
|
7
|
+
|
|
8
|
+
- name: "TypeScript 类型检查"
|
|
9
|
+
trigger: "**/*.{ts,tsx}"
|
|
10
|
+
command: "npx tsc --noEmit"
|
|
11
|
+
timeout: 30000
|
|
12
|
+
on_fail: "检查类型定义"
|
|
13
|
+
|
|
14
|
+
- name: "ESLint"
|
|
15
|
+
trigger: "**/*.{ts,tsx,js,jsx}"
|
|
16
|
+
command: "npx eslint {files}"
|
|
17
|
+
timeout: 15000
|
|
18
|
+
on_fail: "运行 npx eslint --fix 自动修复"
|
|
19
|
+
|
|
20
|
+
- name: "Python 类型检查"
|
|
21
|
+
trigger: "**/*.py"
|
|
22
|
+
command: "python3 -m mypy {files}"
|
|
23
|
+
timeout: 30000
|
|
24
|
+
on_fail: "检查类型注解,参见 specs/backend/code-quality-performance.md"
|
|
25
|
+
|
|
26
|
+
- name: "Pytest"
|
|
27
|
+
trigger: "**/*.py"
|
|
28
|
+
command: "python3 -m pytest --tb=short -q"
|
|
29
|
+
timeout: 60000
|
|
30
|
+
on_fail: "测试失败,检查断言和 mock 是否需要更新"
|