@aipper/aiws-spec 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. package/README.md +12 -0
  2. package/docs/cli-interface.md +288 -0
  3. package/docs/spec-contract.md +183 -0
  4. package/package.json +18 -0
  5. package/templates/workspace/.agents/skills/aiws-change-archive/SKILL.md +23 -0
  6. package/templates/workspace/.agents/skills/aiws-change-list/SKILL.md +18 -0
  7. package/templates/workspace/.agents/skills/aiws-change-new/SKILL.md +26 -0
  8. package/templates/workspace/.agents/skills/aiws-change-next/SKILL.md +19 -0
  9. package/templates/workspace/.agents/skills/aiws-change-start/SKILL.md +27 -0
  10. package/templates/workspace/.agents/skills/aiws-change-status/SKILL.md +19 -0
  11. package/templates/workspace/.agents/skills/aiws-change-sync/SKILL.md +19 -0
  12. package/templates/workspace/.agents/skills/aiws-change-templates-init/SKILL.md +18 -0
  13. package/templates/workspace/.agents/skills/aiws-change-templates-which/SKILL.md +18 -0
  14. package/templates/workspace/.agents/skills/aiws-change-validate/SKILL.md +23 -0
  15. package/templates/workspace/.agents/skills/aiws-hooks-install/SKILL.md +30 -0
  16. package/templates/workspace/.agents/skills/aiws-hooks-status/SKILL.md +18 -0
  17. package/templates/workspace/.agents/skills/aiws-init/SKILL.md +27 -0
  18. package/templates/workspace/.agents/skills/aiws-rollback/SKILL.md +18 -0
  19. package/templates/workspace/.agents/skills/aiws-update/SKILL.md +26 -0
  20. package/templates/workspace/.agents/skills/aiws-validate/SKILL.md +22 -0
  21. package/templates/workspace/.agents/skills/ws-analyze/SKILL.md +26 -0
  22. package/templates/workspace/.agents/skills/ws-commit/SKILL.md +50 -0
  23. package/templates/workspace/.agents/skills/ws-dev/SKILL.md +34 -0
  24. package/templates/workspace/.agents/skills/ws-migrate/SKILL.md +54 -0
  25. package/templates/workspace/.agents/skills/ws-plan/SKILL.md +39 -0
  26. package/templates/workspace/.agents/skills/ws-preflight/SKILL.md +29 -0
  27. package/templates/workspace/.agents/skills/ws-req-change/SKILL.md +33 -0
  28. package/templates/workspace/.agents/skills/ws-req-contract-sync/SKILL.md +17 -0
  29. package/templates/workspace/.agents/skills/ws-req-contract-validate/SKILL.md +12 -0
  30. package/templates/workspace/.agents/skills/ws-req-flow-sync/SKILL.md +28 -0
  31. package/templates/workspace/.agents/skills/ws-req-review/SKILL.md +32 -0
  32. package/templates/workspace/.agents/skills/ws-review/SKILL.md +24 -0
  33. package/templates/workspace/.agents/skills/ws-rule/SKILL.md +23 -0
  34. package/templates/workspace/.aiws/manifest.json +36 -0
  35. package/templates/workspace/.claude/commands/aiws-init.md +19 -0
  36. package/templates/workspace/.claude/commands/aiws-rollback.md +12 -0
  37. package/templates/workspace/.claude/commands/aiws-update.md +18 -0
  38. package/templates/workspace/.claude/commands/aiws-validate.md +13 -0
  39. package/templates/workspace/.claude/commands/ws-analyze.md +27 -0
  40. package/templates/workspace/.claude/commands/ws-dev.md +24 -0
  41. package/templates/workspace/.claude/commands/ws-migrate.md +22 -0
  42. package/templates/workspace/.claude/commands/ws-preflight.md +27 -0
  43. package/templates/workspace/.claude/commands/ws-req-change.md +34 -0
  44. package/templates/workspace/.claude/commands/ws-req-contract-sync.md +18 -0
  45. package/templates/workspace/.claude/commands/ws-req-contract-validate.md +13 -0
  46. package/templates/workspace/.claude/commands/ws-req-flow-sync.md +20 -0
  47. package/templates/workspace/.claude/commands/ws-req-review.md +33 -0
  48. package/templates/workspace/.claude/commands/ws-review.md +25 -0
  49. package/templates/workspace/.claude/commands/ws-rule.md +24 -0
  50. package/templates/workspace/.codex/prompts/aiws-init.md +23 -0
  51. package/templates/workspace/.codex/prompts/aiws-rollback.md +16 -0
  52. package/templates/workspace/.codex/prompts/aiws-update.md +22 -0
  53. package/templates/workspace/.codex/prompts/aiws-validate.md +17 -0
  54. package/templates/workspace/.codex/prompts/ws-analyze.md +32 -0
  55. package/templates/workspace/.codex/prompts/ws-dev.md +29 -0
  56. package/templates/workspace/.codex/prompts/ws-migrate.md +27 -0
  57. package/templates/workspace/.codex/prompts/ws-preflight.md +32 -0
  58. package/templates/workspace/.codex/prompts/ws-req-change.md +39 -0
  59. package/templates/workspace/.codex/prompts/ws-req-contract-sync.md +23 -0
  60. package/templates/workspace/.codex/prompts/ws-req-contract-validate.md +18 -0
  61. package/templates/workspace/.codex/prompts/ws-req-flow-sync.md +25 -0
  62. package/templates/workspace/.codex/prompts/ws-req-review.md +38 -0
  63. package/templates/workspace/.codex/prompts/ws-review.md +30 -0
  64. package/templates/workspace/.codex/prompts/ws-rule.md +29 -0
  65. package/templates/workspace/.githooks/pre-commit +32 -0
  66. package/templates/workspace/.githooks/pre-push +32 -0
  67. package/templates/workspace/.iflow/agents/feature-reviewer.md +27 -0
  68. package/templates/workspace/.iflow/agents/requirements-analyst.md +24 -0
  69. package/templates/workspace/.iflow/agents/server-commit-manager.md +28 -0
  70. package/templates/workspace/.iflow/agents/server-fix-implementer.md +31 -0
  71. package/templates/workspace/.iflow/agents/server-test-planner.md +28 -0
  72. package/templates/workspace/.iflow/agents/server-test-triager.md +30 -0
  73. package/templates/workspace/.iflow/commands/aiws-init.toml +24 -0
  74. package/templates/workspace/.iflow/commands/aiws-rollback.toml +18 -0
  75. package/templates/workspace/.iflow/commands/aiws-update.toml +23 -0
  76. package/templates/workspace/.iflow/commands/aiws-validate.toml +18 -0
  77. package/templates/workspace/.iflow/commands/server-commit.toml +27 -0
  78. package/templates/workspace/.iflow/commands/server-drain.toml +99 -0
  79. package/templates/workspace/.iflow/commands/server-fix-and-commit.toml +27 -0
  80. package/templates/workspace/.iflow/commands/server-fix.toml +65 -0
  81. package/templates/workspace/.iflow/commands/server-test-plan.toml +62 -0
  82. package/templates/workspace/.iflow/commands/server-test.toml +58 -0
  83. package/templates/workspace/.iflow/commands/server-triage.toml +38 -0
  84. package/templates/workspace/.iflow/commands/server_test-plan.toml +12 -0
  85. package/templates/workspace/.iflow/commands/server_test.toml +12 -0
  86. package/templates/workspace/.iflow/commands/ws-analyze.toml +33 -0
  87. package/templates/workspace/.iflow/commands/ws-contract-check.toml +69 -0
  88. package/templates/workspace/.iflow/commands/ws-dev.toml +34 -0
  89. package/templates/workspace/.iflow/commands/ws-doctor.toml +141 -0
  90. package/templates/workspace/.iflow/commands/ws-env-doctor.toml +74 -0
  91. package/templates/workspace/.iflow/commands/ws-feature-deliver.toml +44 -0
  92. package/templates/workspace/.iflow/commands/ws-feature-plan.toml +47 -0
  93. package/templates/workspace/.iflow/commands/ws-init.toml +53 -0
  94. package/templates/workspace/.iflow/commands/ws-memory-bank-init.toml +100 -0
  95. package/templates/workspace/.iflow/commands/ws-migrate.toml +59 -0
  96. package/templates/workspace/.iflow/commands/ws-preflight.toml +30 -0
  97. package/templates/workspace/.iflow/commands/ws-req-change.toml +52 -0
  98. package/templates/workspace/.iflow/commands/ws-req-contract-sync.toml +25 -0
  99. package/templates/workspace/.iflow/commands/ws-req-contract-validate.toml +16 -0
  100. package/templates/workspace/.iflow/commands/ws-req-flow-sync.toml +36 -0
  101. package/templates/workspace/.iflow/commands/ws-req-review.toml +56 -0
  102. package/templates/workspace/.iflow/commands/ws-review.toml +32 -0
  103. package/templates/workspace/.iflow/commands/ws-rule.toml +43 -0
  104. package/templates/workspace/.opencode/command/aiws-init.md +19 -0
  105. package/templates/workspace/.opencode/command/aiws-rollback.md +12 -0
  106. package/templates/workspace/.opencode/command/aiws-update.md +18 -0
  107. package/templates/workspace/.opencode/command/aiws-validate.md +13 -0
  108. package/templates/workspace/.opencode/command/ws-analyze.md +27 -0
  109. package/templates/workspace/.opencode/command/ws-dev.md +24 -0
  110. package/templates/workspace/.opencode/command/ws-migrate.md +22 -0
  111. package/templates/workspace/.opencode/command/ws-preflight.md +27 -0
  112. package/templates/workspace/.opencode/command/ws-req-change.md +34 -0
  113. package/templates/workspace/.opencode/command/ws-req-contract-sync.md +18 -0
  114. package/templates/workspace/.opencode/command/ws-req-contract-validate.md +13 -0
  115. package/templates/workspace/.opencode/command/ws-req-flow-sync.md +20 -0
  116. package/templates/workspace/.opencode/command/ws-req-review.md +33 -0
  117. package/templates/workspace/.opencode/command/ws-review.md +25 -0
  118. package/templates/workspace/.opencode/command/ws-rule.md +24 -0
  119. package/templates/workspace/AGENTS.md +22 -0
  120. package/templates/workspace/AI_PROJECT.md +86 -0
  121. package/templates/workspace/AI_WORKSPACE.md +167 -0
  122. package/templates/workspace/REQUIREMENTS.md +94 -0
  123. package/templates/workspace/changes/README.md +55 -0
  124. package/templates/workspace/changes/templates/design.md +29 -0
  125. package/templates/workspace/changes/templates/proposal.md +59 -0
  126. package/templates/workspace/changes/templates/tasks.md +33 -0
  127. package/templates/workspace/issues/problem-issues.csv +2 -0
  128. package/templates/workspace/manifest.json +205 -0
  129. package/templates/workspace/memory-bank/README.md +14 -0
  130. package/templates/workspace/memory-bank/architecture.md +9 -0
  131. package/templates/workspace/memory-bank/implementation-plan.md +11 -0
  132. package/templates/workspace/memory-bank/progress.md +10 -0
  133. package/templates/workspace/memory-bank/tech-stack.md +11 -0
  134. package/templates/workspace/requirements/CHANGELOG.md +13 -0
  135. package/templates/workspace/requirements/requirements-issues.csv +2 -0
  136. package/templates/workspace/secrets/test-accounts.example.json +32 -0
  137. package/templates/workspace/tools/iflow_watchdog.sh +138 -0
  138. package/templates/workspace/tools/install_iflow_watchdog_systemd_user.sh +118 -0
  139. package/templates/workspace/tools/requirements_contract.py +285 -0
  140. package/templates/workspace/tools/requirements_contract_sync.py +290 -0
  141. package/templates/workspace/tools/requirements_flow_gen.py +250 -0
  142. package/templates/workspace/tools/server_test_runner.py +1902 -0
  143. package/templates/workspace/tools/systemd/iflow-watchdog@.service +16 -0
  144. package/templates/workspace/tools/systemd/iflow-watchdog@.timer +11 -0
  145. package/templates/workspace/tools/ws_change_check.py +323 -0
@@ -0,0 +1,1902 @@
1
+ #!/usr/bin/env -S uv run --script
2
+ # /// script
3
+ # requires-python = ">=3.10"
4
+ # dependencies = [
5
+ # "requests>=2.31.0",
6
+ # "faker>=24.0.0",
7
+ # "jsonschema>=4.22.0",
8
+ # "jsonref>=1.1.0",
9
+ # "rstr>=3.2.2",
10
+ # "uv-mirror[china]>=0.2.1",
11
+ # ]
12
+ # ///
13
+
14
+ from __future__ import annotations
15
+
16
+ import argparse
17
+ import csv
18
+ import json
19
+ import os
20
+ import re
21
+ import signal
22
+ import subprocess
23
+ import sys
24
+ import time
25
+ import uuid
26
+ from dataclasses import dataclass
27
+ from pathlib import Path
28
+ from typing import Any, Dict, Iterable, List, Optional, Tuple
29
+
30
+ # Prefer faster/closer PyPI mirror when available (no-op if not installed).
31
+ try:
32
+ import uv_mirror # type: ignore
33
+ except Exception:
34
+ uv_mirror = None
35
+
36
+ try:
37
+ import jsonref # type: ignore
38
+ except Exception:
39
+ jsonref = None
40
+
41
+ import requests
42
+
43
+ # Increase CSV field size limit to avoid "field larger than field limit" errors on large responses.
44
+ try:
45
+ csv.field_size_limit(10 * 1024 * 1024) # 10 MB per field
46
+ except Exception:
47
+ pass
48
+
49
+ # Keep a copy of the loaded OpenAPI for $ref resolution in helpers.
50
+ OPENAPI_DOC: Dict[str, Any] = {}
51
+
52
+
53
+ CSV_COLUMNS = [
54
+ "Issue_ID",
55
+ "Service",
56
+ "Title",
57
+ "Method",
58
+ "Path",
59
+ "Auth",
60
+ "Request_Example",
61
+ "Expected_Status",
62
+ "Expected_Body_Checks",
63
+ "Log_Checks",
64
+ "Test_Status",
65
+ "Review_Status",
66
+ "Notes",
67
+ ]
68
+
69
+ FIX_CSV_COLUMNS = [
70
+ "Issue_ID",
71
+ "Source_Issue_ID",
72
+ "Service",
73
+ "Title",
74
+ "Method",
75
+ "Path",
76
+ "Status",
77
+ "Evidence",
78
+ "Failure_Category",
79
+ "Failure_Analysis",
80
+ "Suggestion",
81
+ "Notes",
82
+ "Created_At",
83
+ "Updated_At",
84
+ ]
85
+
86
+ TRIAGE_CSV_COLUMNS = [
87
+ "Triage_ID",
88
+ "Fix_Issue_ID",
89
+ "Source_Issue_ID",
90
+ "Service",
91
+ "Method",
92
+ "Path",
93
+ "Failure_Category",
94
+ "Evidence",
95
+ "Failure_Analysis",
96
+ "Suggestion",
97
+ "Notes",
98
+ "Created_At",
99
+ "Updated_At",
100
+ ]
101
+
102
+
103
+ @dataclass(frozen=True)
104
+ class WorkspaceConfig:
105
+ environment: str = "test"
106
+ allow_mutations: bool = False
107
+ base_url: str = "http://127.0.0.1:8080"
108
+ base_url_allowlist: Tuple[str, ...] = (
109
+ "http://127.0.0.1",
110
+ "http://localhost",
111
+ "https://127.0.0.1",
112
+ "https://localhost",
113
+ )
114
+ health_path: str = "/health"
115
+ openapi_path: str = "docs/openapi.json"
116
+ openapi_url: str = "/openapi.json"
117
+ log_path: str = ".agentdocs/tmp/server-test/app.log"
118
+ request_id_header: str = "X-Request-Id"
119
+ server_dirs: Tuple[str, ...] = ()
120
+ build_cmd: str = ""
121
+ start_cmd: str = ""
122
+ stop_cmd: str = ""
123
+ dangerous_disabled: bool = True
124
+ max_requests_per_minute: int = 60
125
+
126
+
127
+ @dataclass(frozen=True)
128
+ class SecretsConfig:
129
+ base_url: Optional[str]
130
+ headers: Dict[str, str]
131
+ service_base_urls: Dict[str, str]
132
+ resource_ids: Dict[str, str]
133
+ auth: Dict[str, Any]
134
+ accounts: List[Dict[str, Any]]
135
+ openapi_auth: Dict[str, Any]
136
+
137
+
138
+ def _b64_basic(user: str, password: str) -> str:
139
+ import base64
140
+
141
+ token = base64.b64encode(f"{user}:{password}".encode("utf-8")).decode("ascii")
142
+ return f"Basic {token}"
143
+
144
+
145
+ def _get_first_account(secrets: SecretsConfig) -> Tuple[Optional[str], Optional[str], Optional[str]]:
146
+ for acc in secrets.accounts or []:
147
+ if not isinstance(acc, dict):
148
+ continue
149
+ username = acc.get("username")
150
+ password = acc.get("password")
151
+ token = acc.get("token")
152
+ if isinstance(username, str) and isinstance(password, str):
153
+ return username, password, token if isinstance(token, str) else None
154
+ if isinstance(token, str):
155
+ return None, None, token
156
+ return None, None, None
157
+
158
+
159
+ def _extract_by_dot_path(payload: Any, path: str) -> Optional[Any]:
160
+ cur = payload
161
+ for part in (path or "").split("."):
162
+ key = part.strip()
163
+ if not key:
164
+ continue
165
+ if isinstance(cur, dict) and key in cur:
166
+ cur = cur[key]
167
+ else:
168
+ return None
169
+ return cur
170
+
171
+
172
+ def build_auth_headers(*, base_url: str, secrets: SecretsConfig, request_id_header: str) -> Dict[str, str]:
173
+ """
174
+ Build auth headers without leaking secrets.
175
+ Priority:
176
+ 1) secrets.headers (explicit headers)
177
+ 2) auth.type == bearer/basic/login with accounts/auth config
178
+ """
179
+ if secrets.headers:
180
+ return dict(secrets.headers)
181
+
182
+ auth = secrets.auth or {}
183
+ auth_type = (auth.get("type") or "").strip().lower() if isinstance(auth.get("type"), str) else ""
184
+ username, password, account_token = _get_first_account(secrets)
185
+
186
+ if not auth_type:
187
+ if isinstance(account_token, str) and account_token.strip():
188
+ auth_type = "bearer"
189
+ auth = {**auth, "token": account_token}
190
+ elif isinstance(username, str) and isinstance(password, str):
191
+ auth_type = "login"
192
+
193
+ if auth_type in ("bearer", "token"):
194
+ token = auth.get("token")
195
+ if not (isinstance(token, str) and token.strip()):
196
+ token = account_token
197
+ if isinstance(token, str) and token.strip():
198
+ header_name = auth.get("header_name") if isinstance(auth.get("header_name"), str) else "Authorization"
199
+ scheme = auth.get("scheme") if isinstance(auth.get("scheme"), str) else "Bearer"
200
+ return {header_name: f"{scheme} {token.strip()}"}
201
+ return {}
202
+
203
+ if auth_type == "basic":
204
+ if isinstance(username, str) and isinstance(password, str):
205
+ header_name = auth.get("header_name") if isinstance(auth.get("header_name"), str) else "Authorization"
206
+ return {header_name: _b64_basic(username, password)}
207
+ return {}
208
+
209
+ if auth_type == "login":
210
+ if not (isinstance(username, str) and isinstance(password, str)):
211
+ return {}
212
+
213
+ login_url = ""
214
+ if isinstance(auth.get("login_url"), str) and auth.get("login_url", "").strip():
215
+ login_url = auth["login_url"].strip()
216
+ else:
217
+ login_path = auth.get("login_path") if isinstance(auth.get("login_path"), str) else "/login"
218
+ if not login_path.startswith("/"):
219
+ login_path = "/" + login_path
220
+ login_url = base_url.rstrip("/") + login_path
221
+
222
+ method = auth.get("method") if isinstance(auth.get("method"), str) else "POST"
223
+ method = method.strip().upper() or "POST"
224
+
225
+ username_field = auth.get("username_field") if isinstance(auth.get("username_field"), str) else ""
226
+ password_field = auth.get("password_field") if isinstance(auth.get("password_field"), str) else ""
227
+ username_field = username_field.strip() or "username"
228
+ password_field = password_field.strip() or "password"
229
+
230
+ token_json_path = auth.get("token_json_path") if isinstance(auth.get("token_json_path"), str) else ""
231
+ token_json_path = token_json_path.strip() or "token"
232
+
233
+ extra_body = auth.get("extra_body") if isinstance(auth.get("extra_body"), dict) else {}
234
+ content_type = auth.get("content_type") if isinstance(auth.get("content_type"), str) else "json"
235
+ content_type = content_type.strip().lower() or "json"
236
+
237
+ req_headers: Dict[str, str] = {}
238
+ req_headers[request_id_header] = str(uuid.uuid4())
239
+ try:
240
+ if content_type == "form":
241
+ body = {**extra_body, username_field: username, password_field: password}
242
+ resp = requests.request(method=method, url=login_url, headers=req_headers, timeout=15, data=body)
243
+ else:
244
+ body = {**extra_body, username_field: username, password_field: password}
245
+ resp = requests.request(method=method, url=login_url, headers=req_headers, timeout=15, json=body)
246
+ except Exception: # noqa: BLE001
247
+ return {}
248
+
249
+ if not (200 <= int(getattr(resp, "status_code", 0)) < 300):
250
+ return {}
251
+ try:
252
+ payload = resp.json()
253
+ except Exception: # noqa: BLE001
254
+ return {}
255
+
256
+ token = _extract_by_dot_path(payload, token_json_path)
257
+ if not (isinstance(token, str) and token.strip()):
258
+ return {}
259
+
260
+ header_name = auth.get("header_name") if isinstance(auth.get("header_name"), str) else "Authorization"
261
+ scheme = auth.get("scheme") if isinstance(auth.get("scheme"), str) else "Bearer"
262
+ return {header_name: f"{scheme} {token.strip()}"}
263
+
264
+ return {}
265
+
266
+
267
+ def build_openapi_headers(*, secrets: SecretsConfig, request_id_header: str) -> Dict[str, str]:
268
+ """
269
+ Build headers for OpenAPI export; prefers secrets.openapi_auth, otherwise falls back to API auth headers.
270
+ Supports headers/basic/bearer minimal cases.
271
+ """
272
+ cfg = secrets.openapi_auth or {}
273
+
274
+ headers_raw = cfg.get("headers") if isinstance(cfg.get("headers"), dict) else {}
275
+ out: Dict[str, str] = {}
276
+ if headers_raw:
277
+ for k, v in headers_raw.items():
278
+ if isinstance(k, str) and isinstance(v, str):
279
+ out[k] = v
280
+ if out:
281
+ return out
282
+
283
+ typ = (cfg.get("type") or "").strip().lower() if isinstance(cfg.get("type"), str) else ""
284
+ username = cfg.get("username") if isinstance(cfg.get("username"), str) else None
285
+ password = cfg.get("password") if isinstance(cfg.get("password"), str) else None
286
+ token = cfg.get("token") if isinstance(cfg.get("token"), str) else None
287
+ if not username or not password:
288
+ acc_user, acc_pass, acc_token = _get_first_account(secrets)
289
+ username = username or acc_user
290
+ password = password or acc_pass
291
+ token = token or acc_token
292
+
293
+ if typ == "basic" and username and password:
294
+ hdr = cfg.get("header_name") if isinstance(cfg.get("header_name"), str) else "Authorization"
295
+ return {hdr: _b64_basic(username, password)}
296
+ if typ in ("bearer", "token") and token:
297
+ hdr = cfg.get("header_name") if isinstance(cfg.get("header_name"), str) else "Authorization"
298
+ scheme = cfg.get("scheme") if isinstance(cfg.get("scheme"), str) else "Bearer"
299
+ return {hdr: f"{scheme} {token}"}
300
+
301
+ # Fallback: reuse API auth headers.
302
+ api_headers = build_auth_headers(base_url="", secrets=secrets, request_id_header=request_id_header)
303
+ api_headers.pop(request_id_header, None)
304
+ return api_headers
305
+
306
+
307
+ def _bool_from_str(value: str) -> Optional[bool]:
308
+ normalized = value.strip().lower()
309
+ if normalized in ("true", "yes", "y", "1", "on"):
310
+ return True
311
+ if normalized in ("false", "no", "n", "0", "off"):
312
+ return False
313
+ return None
314
+
315
+
316
+ def parse_ai_workspace_md(path: Path) -> WorkspaceConfig:
317
+ if not path.exists():
318
+ return WorkspaceConfig()
319
+
320
+ text = path.read_text(encoding="utf-8", errors="replace")
321
+
322
+ def find_scalar(key: str) -> Optional[str]:
323
+ patterns = [
324
+ rf"^\s*-\s*{re.escape(key)}\s*:\s*\"([^\"]+)\"\s*$",
325
+ rf"^\s*-\s*{re.escape(key)}\s*:\s*'([^']+)'\s*$",
326
+ rf"^\s*-\s*{re.escape(key)}\s*:\s*([^\s#]+)\s*$",
327
+ ]
328
+ for pat in patterns:
329
+ m = re.search(pat, text, flags=re.MULTILINE)
330
+ if m:
331
+ return m.group(1).strip()
332
+ return None
333
+
334
+ def find_list(key: str) -> List[str]:
335
+ lines = text.splitlines()
336
+ start_idx = None
337
+ for i, line in enumerate(lines):
338
+ if re.match(rf"^\s*-\s*{re.escape(key)}\s*:\s*$", line):
339
+ start_idx = i + 1
340
+ break
341
+ if start_idx is None:
342
+ return []
343
+ items: List[str] = []
344
+ for line in lines[start_idx:]:
345
+ m = re.match(r"^\s*-\s+(.+?)\s*$", line)
346
+ if not m:
347
+ if line.strip() == "":
348
+ continue
349
+ break
350
+ items.append(m.group(1).strip())
351
+ return items
352
+
353
+ environment = find_scalar("environment") or "test"
354
+ allow_mutations_raw = find_scalar("allow_mutations")
355
+ allow_mutations = _bool_from_str(allow_mutations_raw) if allow_mutations_raw else None
356
+
357
+ base_url = find_scalar("base_url") or "http://127.0.0.1:8080"
358
+ base_url_allowlist = tuple(find_list("base_url_allowlist")) or WorkspaceConfig.base_url_allowlist
359
+ health_path = find_scalar("health_path") or "/health"
360
+ openapi_path = find_scalar("openapi_path") or "docs/openapi.json"
361
+ openapi_url = find_scalar("openapi_url") or "/openapi.json"
362
+ log_path = find_scalar("log_path") or ".agentdocs/tmp/server-test/app.log"
363
+ request_id_header = find_scalar("request_id_header") or "X-Request-Id"
364
+
365
+ server_dirs = tuple(find_list("server_dirs"))
366
+ dangerous_disabled_raw = find_scalar("dangerous_disabled")
367
+ dangerous_disabled = _bool_from_str(dangerous_disabled_raw) if dangerous_disabled_raw else None
368
+ max_rpm_raw = find_scalar("max_requests_per_minute")
369
+ try:
370
+ max_rpm = int(max_rpm_raw) if max_rpm_raw else WorkspaceConfig.max_requests_per_minute
371
+ except ValueError:
372
+ max_rpm = WorkspaceConfig.max_requests_per_minute
373
+
374
+ return WorkspaceConfig(
375
+ environment=environment,
376
+ allow_mutations=bool(allow_mutations) if allow_mutations is not None else False,
377
+ base_url=base_url,
378
+ base_url_allowlist=base_url_allowlist,
379
+ health_path=health_path,
380
+ openapi_path=openapi_path,
381
+ openapi_url=openapi_url,
382
+ log_path=log_path,
383
+ request_id_header=request_id_header,
384
+ server_dirs=server_dirs,
385
+ build_cmd=find_scalar("build_cmd") or "",
386
+ start_cmd=find_scalar("start_cmd") or "",
387
+ stop_cmd=find_scalar("stop_cmd") or "",
388
+ dangerous_disabled=bool(dangerous_disabled) if dangerous_disabled is not None else True,
389
+ max_requests_per_minute=max_rpm,
390
+ )
391
+
392
+
393
+ def load_secrets(secrets_path: Path) -> SecretsConfig:
394
+ if not secrets_path.exists():
395
+ raise FileNotFoundError(f"missing secrets file: {secrets_path}")
396
+ data = json.loads(secrets_path.read_text(encoding="utf-8"))
397
+
398
+ auth_raw = data.get("auth") if isinstance(data.get("auth"), dict) else {}
399
+ headers_raw = (auth_raw or {}).get("headers") or {}
400
+ headers: Dict[str, str] = {}
401
+ if isinstance(headers_raw, dict):
402
+ for k, v in headers_raw.items():
403
+ if isinstance(k, str) and isinstance(v, str):
404
+ headers[k] = v
405
+
406
+ base_url = data.get("base_url")
407
+ base_url_out = base_url if isinstance(base_url, str) and base_url.strip() else None
408
+
409
+ service_base_urls: Dict[str, str] = {}
410
+ services = data.get("services")
411
+ if isinstance(services, dict):
412
+ for service_name, svc in services.items():
413
+ if not isinstance(service_name, str) or not isinstance(svc, dict):
414
+ continue
415
+ u = svc.get("base_url")
416
+ if isinstance(u, str) and u.strip():
417
+ service_base_urls[service_name] = u.strip()
418
+
419
+ resource_ids: Dict[str, str] = {}
420
+ ids = data.get("test_resource_ids")
421
+ if isinstance(ids, dict):
422
+ for k, v in ids.items():
423
+ if isinstance(k, str) and isinstance(v, (str, int)):
424
+ resource_ids[k] = str(v)
425
+
426
+ return SecretsConfig(
427
+ base_url=base_url_out,
428
+ headers=headers,
429
+ service_base_urls=service_base_urls,
430
+ resource_ids=resource_ids,
431
+ auth=auth_raw if isinstance(auth_raw, dict) else {},
432
+ accounts=data.get("accounts") if isinstance(data.get("accounts"), list) else [],
433
+ openapi_auth=data.get("openapi_auth") if isinstance(data.get("openapi_auth"), dict) else {},
434
+ )
435
+
436
+
437
+ def _read_ai_project_rules(workspace_root: Path) -> Tuple[bool, List[str]]:
438
+ """
439
+ Returns (exists_and_has_rules, violations)
440
+ """
441
+ ai_project = workspace_root / "AI_PROJECT.md"
442
+ if not ai_project.exists():
443
+ return False, ["AI_PROJECT.md 缺失(运行 /ws-migrate 或 `npx @aipper/aiws init .` 补齐模板)"]
444
+
445
+ text = ai_project.read_text(encoding="utf-8", errors="replace")
446
+ violations: List[str] = []
447
+
448
+ if "AI_PROJECT_VERSION" not in text:
449
+ violations.append("缺少 AI_PROJECT_VERSION 标记(请用模板版本补齐)")
450
+
451
+ begin = "<!-- AI_PROJECT_RULES_BEGIN -->"
452
+ end = "<!-- AI_PROJECT_RULES_END -->"
453
+ if begin not in text or end not in text:
454
+ violations.append("缺少 AI_PROJECT_RULES BEGIN/END 标记(请用模板补齐)")
455
+ return False, violations
456
+
457
+ start = text.find(begin) + len(begin)
458
+ finish = text.find(end, start)
459
+ managed = text[start:finish] if finish != -1 else ""
460
+ lines = [ln.strip() for ln in managed.splitlines() if ln.strip()]
461
+ has_custom_rule = False
462
+ for ln in lines:
463
+ if "ws-rule" in ln or "ws:rule" in ln:
464
+ continue
465
+ if "建议写成" in ln:
466
+ continue
467
+ if ln.startswith("<!--") and ln.endswith("-->"):
468
+ continue
469
+ has_custom_rule = True
470
+ break
471
+ if not has_custom_rule:
472
+ violations.append("AI_PROJECT_RULES 段为空或仍是模板内容(请用 /ws-rule 写入项目规则)")
473
+
474
+ return len(violations) == 0, violations
475
+
476
+
477
+ def sync_ai_project_rule_rows(csv_path: Path, workspace_root: Path) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
478
+ """
479
+ Upsert lint results into CSV as RULE-* rows and return (rows, blocked_items_for_report).
480
+ """
481
+ rows = read_csv_rows(csv_path)
482
+ rows = [r for r in rows if not str(r.get("Issue_ID", "") or "").startswith("RULE-")]
483
+
484
+ _, violations = _read_ai_project_rules(workspace_root)
485
+ blocked: List[Dict[str, str]] = []
486
+ if violations:
487
+ for idx, msg in enumerate(violations, start=1):
488
+ issue_id = f"RULE-{idx:02d}"
489
+ row = {
490
+ "Issue_ID": issue_id,
491
+ "Service": "workspace",
492
+ "Title": f"AI_PROJECT 规则校验失败:{msg}",
493
+ "Method": "DOC",
494
+ "Path": "AI_PROJECT.md",
495
+ "Auth": "",
496
+ "Request_Example": "",
497
+ "Expected_Status": "",
498
+ "Expected_Body_Checks": "",
499
+ "Log_Checks": "",
500
+ "Test_Status": "BLOCKED",
501
+ "Review_Status": "",
502
+ "Notes": f"rule_violation=AI_PROJECT {msg}",
503
+ }
504
+ rows.append(row)
505
+ blocked.append(
506
+ {
507
+ "issue_id": issue_id,
508
+ "service": row["Service"],
509
+ "method": row["Method"],
510
+ "path": row["Path"],
511
+ "expected_status": "",
512
+ "notes": row["Notes"],
513
+ }
514
+ )
515
+
516
+ write_csv_rows(csv_path, rows)
517
+ return rows, blocked
518
+
519
+
520
+ def candidate_openapi_paths(cfg: WorkspaceConfig) -> List[str]:
521
+ paths = [
522
+ cfg.openapi_url or "",
523
+ "/openapi.json",
524
+ "/openapi.yaml",
525
+ "/v3/api-docs",
526
+ "/v3/api-docs.yaml",
527
+ "/swagger.json",
528
+ "/swagger.yaml",
529
+ "/swagger/v1/swagger.json",
530
+ "/swagger/v1/swagger.yaml",
531
+ "/api-docs",
532
+ "/api-docs.yaml",
533
+ ]
534
+ seen = set()
535
+ deduped: List[str] = []
536
+ for p in paths:
537
+ if not p:
538
+ continue
539
+ if not p.startswith("/"):
540
+ p = "/" + p
541
+ if p in seen:
542
+ continue
543
+ seen.add(p)
544
+ deduped.append(p)
545
+ return deduped
546
+
547
+
548
+ def effective_base_url(
549
+ *,
550
+ cli_base_url: str,
551
+ secrets: SecretsConfig,
552
+ ws: WorkspaceConfig,
553
+ service_name: str,
554
+ ) -> str:
555
+ if cli_base_url.strip():
556
+ return cli_base_url.strip()
557
+ if service_name in secrets.service_base_urls:
558
+ return secrets.service_base_urls[service_name]
559
+ if secrets.base_url:
560
+ return secrets.base_url
561
+ return workspace.base_url
562
+
563
+
564
+ def assert_base_url_allowed(base_url: str, allowlist: Tuple[str, ...]) -> None:
565
+ # Keep it strict-by-default to avoid accidentally testing prod.
566
+ # Policy: match scheme + hostname; allow any port unless allowlist entry specifies one.
567
+ from urllib.parse import urlparse
568
+
569
+ u = base_url.strip()
570
+ if not u:
571
+ raise ValueError("empty base_url")
572
+ parsed = urlparse(u)
573
+ if not parsed.scheme or not parsed.hostname:
574
+ raise ValueError(f"invalid base_url: {u}")
575
+
576
+ for item in allowlist:
577
+ raw = (item or "").strip()
578
+ if not raw:
579
+ continue
580
+ if "://" not in raw:
581
+ # Host-only allowlist entry.
582
+ if parsed.hostname == raw:
583
+ return
584
+ continue
585
+ allow = urlparse(raw)
586
+ if not allow.scheme or not allow.hostname:
587
+ continue
588
+ if allow.scheme != parsed.scheme:
589
+ continue
590
+ if allow.hostname != parsed.hostname:
591
+ continue
592
+ if allow.port is not None and parsed.port != allow.port:
593
+ continue
594
+ return
595
+ raise RuntimeError(f"base_url not allowed by base_url_allowlist: {u}")
596
+
597
+
598
+ def normalize_path_for_test(path: str, op: Dict[str, Any], resource_ids: Dict[str, str]) -> Tuple[str, Dict[str, str]]:
599
+ """
600
+ Replace {param} segments with simple safe defaults so we can hit the endpoint.
601
+ Returns: (normalized_path, substitutions)
602
+ """
603
+ substitutions: Dict[str, str] = {}
604
+ params = op.get("parameters") or []
605
+ param_types: Dict[str, str] = {}
606
+ if isinstance(params, list):
607
+ for p in params:
608
+ if not isinstance(p, dict):
609
+ continue
610
+ if p.get("in") != "path":
611
+ continue
612
+ name = p.get("name")
613
+ if not isinstance(name, str) or not name:
614
+ continue
615
+ schema = p.get("schema") or {}
616
+ t = schema.get("type") if isinstance(schema, dict) else None
617
+ param_types[name] = t if isinstance(t, str) else "string"
618
+
619
+ def default_for_type(t: str) -> str:
620
+ if t in ("integer", "number"):
621
+ return "1"
622
+ if t == "boolean":
623
+ return "true"
624
+ return "test"
625
+
626
+ def repl(match: re.Match[str]) -> str:
627
+ name = match.group(1)
628
+ t = param_types.get(name, "string")
629
+ value = resource_ids.get(name) or default_for_type(t)
630
+ substitutions[name] = value
631
+ return value
632
+
633
+ normalized = re.sub(r"\{([^}]+)\}", repl, path)
634
+ return normalized, substitutions
635
+
636
+
637
+ def default_value_for_schema_type(t: str) -> str:
638
+ if t in ("integer", "number"):
639
+ return "1"
640
+ if t == "boolean":
641
+ return "true"
642
+ return "test"
643
+
644
+
645
+ def resolve_schema_ref(schema: Dict[str, Any], base_doc: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
646
+ """
647
+ Resolve a local $ref within the OpenAPI document to avoid missing fields.
648
+ """
649
+ if not isinstance(schema, dict):
650
+ return schema
651
+ ref = schema.get("$ref")
652
+ if not isinstance(ref, str):
653
+ return schema
654
+ doc = base_doc or OPENAPI_DOC
655
+ if ref.startswith("#/") and isinstance(doc, dict):
656
+ cur: Any = doc
657
+ for part in ref.lstrip("#/").split("/"):
658
+ if isinstance(cur, dict) and part in cur:
659
+ cur = cur[part]
660
+ else:
661
+ cur = None
662
+ break
663
+ if isinstance(cur, dict):
664
+ return cur
665
+ if jsonref is not None:
666
+ try:
667
+ resolved = jsonref.replace_refs(schema, base_uri="") # type: ignore[arg-type]
668
+ if isinstance(resolved, dict):
669
+ return resolved
670
+ except Exception:
671
+ pass
672
+ return schema
673
+
674
+
675
+ def build_query_params(op: Dict[str, Any], resource_ids: Dict[str, str]) -> Dict[str, str]:
676
+ """
677
+ Construct query params for a request using OpenAPI parameter metadata.
678
+ Prefer resource_ids overrides; otherwise fall back to safe defaults for required params.
679
+ """
680
+ params = {}
681
+ for p in op.get("parameters") or []:
682
+ if not isinstance(p, dict):
683
+ continue
684
+ if p.get("in") != "query":
685
+ continue
686
+ name = p.get("name")
687
+ if not isinstance(name, str) or not name:
688
+ continue
689
+ schema = p.get("schema") or {}
690
+ t = schema.get("type") if isinstance(schema, dict) else None
691
+ required = bool(p.get("required"))
692
+ # Use provided resource_ids first; otherwise only populate required params.
693
+ if name in resource_ids:
694
+ params[name] = str(resource_ids[name])
695
+ elif required:
696
+ params[name] = default_value_for_schema_type(t if isinstance(t, str) else "string")
697
+ return params
698
+
699
+
700
+ def render_cmd(template: str, *, service_dir: Path, service_name: str) -> str:
701
+ return template.replace("{service_dir}", service_dir.as_posix()).replace("{service}", service_name)
702
+
703
+
704
+ def resolve_log_file(log_path: str, *, workspace_root: Path, service_name: str) -> Path:
705
+ """
706
+ Resolve log file path with optional {service} placeholder; defaults to per-service log under .agentdocs/tmp/server-test.
707
+ """
708
+ path_str = (log_path or "").strip() or ".agentdocs/tmp/server-test/{service}.log"
709
+ path_str = path_str.replace("{service}", service_name)
710
+ return (workspace_root / path_str).resolve()
711
+
712
+
713
+ def run_shell(cmd: str, *, cwd: Path) -> None:
714
+ subprocess.run(["bash", "-lc", cmd], cwd=cwd, check=True)
715
+
716
+
717
+ def start_service(cmd: str, *, cwd: Path, log_file: Path) -> subprocess.Popen[bytes]:
718
+ log_file.parent.mkdir(parents=True, exist_ok=True)
719
+ f = log_file.open("ab")
720
+ return subprocess.Popen(
721
+ ["bash", "-lc", cmd],
722
+ cwd=cwd,
723
+ stdout=f,
724
+ stderr=subprocess.STDOUT,
725
+ start_new_session=True,
726
+ )
727
+
728
+
729
+ def stop_service(proc: subprocess.Popen[bytes], timeout_s: int = 10) -> None:
730
+ if proc.poll() is not None:
731
+ return
732
+ try:
733
+ os.killpg(proc.pid, signal.SIGTERM)
734
+ except Exception:
735
+ proc.terminate()
736
+ deadline = time.time() + timeout_s
737
+ while time.time() < deadline:
738
+ if proc.poll() is not None:
739
+ return
740
+ time.sleep(0.2)
741
+ try:
742
+ os.killpg(proc.pid, signal.SIGKILL)
743
+ except Exception:
744
+ proc.kill()
745
+
746
+
747
+ def discover_server_dirs(workspace_root: Path, configured: Tuple[str, ...]) -> List[Path]:
748
+ candidates: List[Path] = []
749
+ if configured:
750
+ for rel in configured:
751
+ p = (workspace_root / rel).resolve()
752
+ if p.is_dir():
753
+ candidates.append(p)
754
+ return candidates
755
+
756
+ markers = ("Cargo.toml", "go.mod", "pom.xml", "build.gradle", "build.gradle.kts")
757
+ for entry in workspace_root.iterdir():
758
+ if not entry.is_dir():
759
+ continue
760
+ if entry.name.startswith("."):
761
+ continue
762
+ for marker in markers:
763
+ if (entry / marker).exists():
764
+ candidates.append(entry.resolve())
765
+ break
766
+
767
+ return sorted(candidates)
768
+
769
+
770
+ def wait_for_health(base_url: str, health_path: str, headers: Dict[str, str], timeout_s: int) -> None:
771
+ url = base_url.rstrip("/") + "/" + health_path.lstrip("/")
772
+ deadline = time.time() + timeout_s
773
+ last_error: Optional[str] = None
774
+ while time.time() < deadline:
775
+ try:
776
+ resp = requests.get(url, headers=headers, timeout=5)
777
+ if 200 <= resp.status_code < 500:
778
+ return
779
+ last_error = f"unexpected status {resp.status_code}"
780
+ except Exception as e: # noqa: BLE001
781
+ last_error = str(e)
782
+ time.sleep(1.0)
783
+ raise RuntimeError(f"health check timeout: {url} ({last_error})")
784
+
785
+
786
+ def load_openapi(openapi_file: Path) -> Dict[str, Any]:
787
+ data = json.loads(openapi_file.read_text(encoding="utf-8"))
788
+ if not isinstance(data, dict):
789
+ raise ValueError("openapi is not a JSON object")
790
+ # Resolve $ref to avoid missing fields in generated payloads.
791
+ if jsonref is not None:
792
+ try:
793
+ data = jsonref.replace_refs(data)
794
+ except Exception:
795
+ pass
796
+ return data
797
+
798
+
799
+ def export_openapi(base_url: str, openapi_url: str, headers: Dict[str, str], dest: Path) -> None:
800
+ url = base_url.rstrip("/") + "/" + openapi_url.lstrip("/")
801
+ resp = requests.get(url, headers=headers, timeout=10)
802
+ resp.raise_for_status()
803
+ dest.parent.mkdir(parents=True, exist_ok=True)
804
+ dest.write_text(resp.text, encoding="utf-8")
805
+
806
+
807
+ def iter_openapi_endpoints(openapi: Dict[str, Any]) -> Iterable[Tuple[str, str, Dict[str, Any]]]:
808
+ paths = openapi.get("paths") or {}
809
+ if not isinstance(paths, dict):
810
+ return []
811
+ for path, methods in paths.items():
812
+ if not isinstance(path, str) or not isinstance(methods, dict):
813
+ continue
814
+ for method, op in methods.items():
815
+ if not isinstance(method, str) or not isinstance(op, dict):
816
+ continue
817
+ lower = method.lower()
818
+ if lower not in ("get", "post", "put", "patch", "delete", "head", "options"):
819
+ continue
820
+ yield lower.upper(), path, op
821
+
822
+
823
+ def infer_expected_status(op: Dict[str, Any]) -> str:
824
+ responses = op.get("responses") or {}
825
+ if not isinstance(responses, dict):
826
+ return ""
827
+ candidates: List[int] = []
828
+ for k in responses.keys():
829
+ if not isinstance(k, str):
830
+ continue
831
+ if k.isdigit():
832
+ code = int(k)
833
+ if 200 <= code < 300:
834
+ candidates.append(code)
835
+ if not candidates:
836
+ return ""
837
+ return str(sorted(candidates)[0])
838
+
839
+
840
+ def ensure_csv(csv_path: Path) -> None:
841
+ if csv_path.exists():
842
+ return
843
+ csv_path.parent.mkdir(parents=True, exist_ok=True)
844
+ with csv_path.open("w", encoding="utf-8", newline="") as f:
845
+ writer = csv.DictWriter(f, fieldnames=CSV_COLUMNS)
846
+ writer.writeheader()
847
+
848
+
849
+ def read_csv_rows(csv_path: Path) -> List[Dict[str, str]]:
850
+ if not csv_path.exists():
851
+ return []
852
+ with csv_path.open("r", encoding="utf-8", newline="") as f:
853
+ reader = csv.DictReader(f)
854
+ rows: List[Dict[str, str]] = []
855
+ for row in reader:
856
+ rows.append({k: (v or "") for k, v in row.items()})
857
+ return rows
858
+
859
+
860
+ def write_csv_rows(csv_path: Path, rows: List[Dict[str, str]]) -> None:
861
+ csv_path.parent.mkdir(parents=True, exist_ok=True)
862
+ with csv_path.open("w", encoding="utf-8", newline="") as f:
863
+ writer = csv.DictWriter(f, fieldnames=CSV_COLUMNS)
864
+ writer.writeheader()
865
+ for row in rows:
866
+ out = {k: row.get(k, "") for k in CSV_COLUMNS}
867
+ writer.writerow(out)
868
+
869
+
870
+ def read_fix_csv_rows(csv_path: Path) -> List[Dict[str, str]]:
871
+ if not csv_path.exists():
872
+ return []
873
+ with csv_path.open("r", encoding="utf-8", newline="") as f:
874
+ reader = csv.DictReader(f)
875
+ rows: List[Dict[str, str]] = []
876
+ for row in reader:
877
+ rows.append({k: (v or "") for k, v in row.items()})
878
+ return rows
879
+
880
+
881
+ def write_fix_csv_rows(csv_path: Path, rows: List[Dict[str, str]]) -> None:
882
+ csv_path.parent.mkdir(parents=True, exist_ok=True)
883
+ with csv_path.open("w", encoding="utf-8", newline="") as f:
884
+ writer = csv.DictWriter(f, fieldnames=FIX_CSV_COLUMNS)
885
+ writer.writeheader()
886
+ for row in rows:
887
+ out = {k: row.get(k, "") for k in FIX_CSV_COLUMNS}
888
+ writer.writerow(out)
889
+
890
+
891
+ def parse_notes_kv(notes: str) -> Dict[str, str]:
892
+ out: Dict[str, str] = {}
893
+ for piece in (notes or "").strip().split():
894
+ if "=" not in piece:
895
+ continue
896
+ k, v = piece.split("=", 1)
897
+ k = k.strip()
898
+ v = v.strip()
899
+ if k and v:
900
+ out[k] = v
901
+ return out
902
+
903
+
904
+ def derive_evidence_from_notes(notes: str) -> str:
905
+ kv = parse_notes_kv(notes)
906
+ keys = ("status", "expected", "resp", "log_snippet", "log", "request_id", "error")
907
+ parts: List[str] = []
908
+ for k in keys:
909
+ if k in kv:
910
+ parts.append(f"{k}={kv[k]}")
911
+ return " ".join(parts).strip()
912
+
913
+
914
+ def _safe_read_text_under_workspace(workspace_root: Path, rel_path: str, max_chars: int = 20000) -> str:
915
+ raw = (rel_path or "").strip()
916
+ if not raw:
917
+ return ""
918
+ target = (workspace_root / raw).resolve()
919
+ try:
920
+ if not target.is_relative_to(workspace_root):
921
+ return ""
922
+ except AttributeError:
923
+ if str(target).startswith(str(workspace_root)) is False:
924
+ return ""
925
+ if not target.exists() or not target.is_file():
926
+ return ""
927
+ return target.read_text(encoding="utf-8", errors="replace")[:max_chars]
928
+
929
+
930
+ def classify_failure(*, notes: str, workspace_root: Path) -> Tuple[str, str, str, bool]:
931
+ """
932
+ Returns: (category, analysis_zh, suggestion_zh, needs_human)
933
+ Keep it deterministic and evidence-based; do not leak secrets.
934
+ """
935
+ kv = parse_notes_kv(notes)
936
+ status_raw = kv.get("status", "").strip()
937
+ error_raw = kv.get("error", "").strip()
938
+ expected_raw = kv.get("expected", "").strip()
939
+ log_snippet_rel = kv.get("log_snippet", "").strip()
940
+ snippet = _safe_read_text_under_workspace(workspace_root, log_snippet_rel) if log_snippet_rel else ""
941
+
942
+ def has(pat: str) -> bool:
943
+ return bool(re.search(pat, snippet, flags=re.IGNORECASE | re.MULTILINE))
944
+
945
+ status_code: Optional[int] = None
946
+ if status_raw.isdigit():
947
+ try:
948
+ status_code = int(status_raw)
949
+ except ValueError:
950
+ status_code = None
951
+
952
+ notes_compact = (notes or "").lower()
953
+
954
+ if "rule_violation" in notes_compact:
955
+ return (
956
+ "RULE_VIOLATION",
957
+ "AI_PROJECT.md 校验未通过(规则段缺失/未填充)。",
958
+ "运行 /ws-rule 补齐 AI_PROJECT_RULES 段(保证 BEGIN/END 标记存在且包含项目规则),再复测。",
959
+ False,
960
+ )
961
+
962
+ if "no_request_id_in_log" in notes_compact:
963
+ return (
964
+ "NO_REQUEST_ID_LOG",
965
+ "日志中未能按 request_id 命中对应请求,无法可靠定位根因。",
966
+ "请在服务端日志中打印 request_id=<id> 并确保响应回传 X-Request-Id;然后复测以生成可用证据。",
967
+ True,
968
+ )
969
+
970
+ if "expected_mismatch" in notes_compact:
971
+ return (
972
+ "EXPECTED_MISMATCH",
973
+ f"返回状态码与期望不一致(status={status_raw or '?'} expected={expected_raw or '?'})。",
974
+ "先对齐 REQUIREMENTS.md/Expected_Status;若期望正确则修复服务端返回码后复测。",
975
+ True,
976
+ )
977
+
978
+ if status_code in (401, 403) or "unauthorized" in notes_compact or has(r"\bjwt\b.*\bexpired\b") or has(r"\btoken\b.*\bexpired\b"):
979
+ return (
980
+ "AUTH",
981
+ "鉴权失败(可能是 token 过期/权限不足)。",
982
+ "刷新/更新鉴权 token(不要提交 secrets),并确认该接口在 REQUIREMENTS.md 中是否要求鉴权;复测验证。",
983
+ False,
984
+ )
985
+
986
+ if error_raw.lower() in ("readtimeout", "timeout") or "timeout" in notes_compact or status_code in (408, 504):
987
+ return (
988
+ "TIMEOUT",
989
+ "请求超时或网关超时,服务在当前压力/延迟下无法按时响应。",
990
+ "降低请求速率(max_requests_per_minute)、增加服务端资源或超时阈值,并优先复测单接口定位瓶颈。",
991
+ True,
992
+ )
993
+
994
+ if error_raw.lower() in ("connectionerror", "connectionrefusederror") or "connection refused" in notes_compact:
995
+ return (
996
+ "CONNECTION",
997
+ "连接失败(服务未就绪/端口不可达)。",
998
+ "检查服务是否成功启动且 health 通过;必要时用 --manage-service 让 runner 管理启动并复测。",
999
+ True,
1000
+ )
1001
+
1002
+ if status_code == 500 or "status=500" in notes_compact:
1003
+ if has(r"hikaripool|jdbc|could not get jdbc connection|too many connections|connection is not available"):
1004
+ return (
1005
+ "DB_POOL",
1006
+ "疑似数据库连接池耗尽或连接获取超时(并发/连接池配置不足)。",
1007
+ "增大连接池(如 maximum-pool-size)或降低并发/请求速率;必要时增加重试并观察 request_id 对应日志。",
1008
+ True,
1009
+ )
1010
+ return (
1011
+ "SERVER_500",
1012
+ "服务端 500(需结合日志定位具体异常)。",
1013
+ "优先查看 log_snippet 中的异常堆栈;若是压力相关,降低请求速率或增加重试;修复后复测。",
1014
+ True,
1015
+ )
1016
+
1017
+ if status_code is not None and not (200 <= status_code < 400):
1018
+ return (
1019
+ "NON_2XX_3XX",
1020
+ f"返回非 2xx/3xx(status={status_code})。",
1021
+ "确认该返回是否符合 REQUIREMENTS.md;若不符合则修复服务端或调整期望后复测。",
1022
+ True,
1023
+ )
1024
+
1025
+ return (
1026
+ "UNKNOWN",
1027
+ "证据不足,无法自动归因(建议补充日志或缩小复测范围)。",
1028
+ "先确保 request_id 日志串联可用;然后只复测单个 service/endpoint 以收集稳定证据,再人工补充修复清单。",
1029
+ True,
1030
+ )
1031
+
1032
+
1033
+ def read_triage_csv_rows(csv_path: Path) -> List[Dict[str, str]]:
1034
+ if not csv_path.exists():
1035
+ return []
1036
+ with csv_path.open("r", encoding="utf-8", newline="") as f:
1037
+ reader = csv.DictReader(f)
1038
+ rows: List[Dict[str, str]] = []
1039
+ for row in reader:
1040
+ rows.append({k: (v or "") for k, v in row.items()})
1041
+ return rows
1042
+
1043
+
1044
+ def write_triage_csv_rows(csv_path: Path, rows: List[Dict[str, str]]) -> None:
1045
+ csv_path.parent.mkdir(parents=True, exist_ok=True)
1046
+ with csv_path.open("w", encoding="utf-8", newline="") as f:
1047
+ writer = csv.DictWriter(f, fieldnames=TRIAGE_CSV_COLUMNS)
1048
+ writer.writeheader()
1049
+ for row in rows:
1050
+ out = {k: row.get(k, "") for k in TRIAGE_CSV_COLUMNS}
1051
+ writer.writerow(out)
1052
+
1053
+
1054
+ def write_triage_issues_csv(
1055
+ *,
1056
+ triage_csv_path: Path,
1057
+ fix_rows: List[Dict[str, str]],
1058
+ generated_at: str,
1059
+ ) -> None:
1060
+ existing = read_triage_csv_rows(triage_csv_path)
1061
+ existing_by_fix_id: Dict[str, Dict[str, str]] = {}
1062
+ used_ids: set[str] = set()
1063
+ next_num = 1
1064
+ for r in existing:
1065
+ triage_id = (r.get("Triage_ID", "") or "").strip()
1066
+ if triage_id:
1067
+ used_ids.add(triage_id)
1068
+ m = re.match(r"^TRIAGE-(\d+)$", triage_id)
1069
+ if m:
1070
+ try:
1071
+ next_num = max(next_num, int(m.group(1)) + 1)
1072
+ except ValueError:
1073
+ pass
1074
+ fix_id = (r.get("Fix_Issue_ID", "") or "").strip()
1075
+ if fix_id:
1076
+ existing_by_fix_id[fix_id] = r
1077
+
1078
+ out_rows: List[Dict[str, str]] = []
1079
+ active_fix_ids: set[str] = set()
1080
+
1081
+ for fr in fix_rows:
1082
+ if ((fr.get("Status", "") or "").strip().upper()) == "DONE":
1083
+ continue
1084
+ needs_human = ((fr.get("Failure_Category", "") or "").strip().upper() in ("UNKNOWN", "EXPECTED_MISMATCH", "NO_REQUEST_ID_LOG")) or not (
1085
+ (fr.get("Failure_Analysis", "") or "").strip() and (fr.get("Suggestion", "") or "").strip()
1086
+ )
1087
+ if not needs_human:
1088
+ continue
1089
+
1090
+ fix_id = (fr.get("Issue_ID", "") or "").strip()
1091
+ if not fix_id:
1092
+ continue
1093
+ active_fix_ids.add(fix_id)
1094
+ prev = existing_by_fix_id.get(fix_id)
1095
+
1096
+ triage_id = (prev.get("Triage_ID", "") if prev else "").strip()
1097
+ if not triage_id:
1098
+ while True:
1099
+ candidate = f"TRIAGE-{next_num:03d}"
1100
+ next_num += 1
1101
+ if candidate not in used_ids:
1102
+ triage_id = candidate
1103
+ break
1104
+ created_at = (prev.get("Created_At", "") if prev else "").strip() or generated_at
1105
+ prev_notes = (prev.get("Notes", "") if prev else "").strip()
1106
+ notes = (fr.get("Notes", "") or "").strip()
1107
+ merged_notes = append_note(prev_notes, notes) if notes else prev_notes
1108
+
1109
+ out_rows.append(
1110
+ {
1111
+ "Triage_ID": triage_id,
1112
+ "Fix_Issue_ID": fix_id,
1113
+ "Source_Issue_ID": (fr.get("Source_Issue_ID", "") or "").strip(),
1114
+ "Service": (fr.get("Service", "") or "").strip(),
1115
+ "Method": (fr.get("Method", "") or "").strip(),
1116
+ "Path": (fr.get("Path", "") or "").strip(),
1117
+ "Failure_Category": (fr.get("Failure_Category", "") or "").strip(),
1118
+ "Evidence": (fr.get("Evidence", "") or "").strip(),
1119
+ "Failure_Analysis": (fr.get("Failure_Analysis", "") or "").strip(),
1120
+ "Suggestion": (fr.get("Suggestion", "") or "").strip(),
1121
+ "Notes": merged_notes,
1122
+ "Created_At": created_at,
1123
+ "Updated_At": generated_at,
1124
+ }
1125
+ )
1126
+ used_ids.add(triage_id)
1127
+
1128
+ for r in existing:
1129
+ fix_id = (r.get("Fix_Issue_ID", "") or "").strip()
1130
+ if not fix_id or fix_id in active_fix_ids:
1131
+ continue
1132
+ out_rows.append(
1133
+ {
1134
+ "Triage_ID": (r.get("Triage_ID", "") or "").strip(),
1135
+ "Fix_Issue_ID": fix_id,
1136
+ "Source_Issue_ID": (r.get("Source_Issue_ID", "") or "").strip(),
1137
+ "Service": (r.get("Service", "") or "").strip(),
1138
+ "Method": (r.get("Method", "") or "").strip(),
1139
+ "Path": (r.get("Path", "") or "").strip(),
1140
+ "Failure_Category": (r.get("Failure_Category", "") or "").strip(),
1141
+ "Evidence": (r.get("Evidence", "") or "").strip(),
1142
+ "Failure_Analysis": (r.get("Failure_Analysis", "") or "").strip(),
1143
+ "Suggestion": (r.get("Suggestion", "") or "").strip(),
1144
+ "Notes": (r.get("Notes", "") or "").strip(),
1145
+ "Created_At": (r.get("Created_At", "") or "").strip() or generated_at,
1146
+ "Updated_At": (r.get("Updated_At", "") or "").strip(),
1147
+ }
1148
+ )
1149
+
1150
+ write_triage_csv_rows(triage_csv_path, out_rows)
1151
+
1152
+
1153
+ def write_fix_issues_csv(
1154
+ *,
1155
+ fix_csv_path: Path,
1156
+ endpoint_rows: List[Dict[str, str]],
1157
+ blocked_items: List[Dict[str, Any]],
1158
+ generated_at: str,
1159
+ workspace_root: Path,
1160
+ ) -> None:
1161
+ rows_by_key: Dict[Tuple[str, str, str], Dict[str, str]] = {}
1162
+ for r in endpoint_rows:
1163
+ rows_by_key[(r.get("Service", ""), r.get("Method", ""), r.get("Path", ""))] = r
1164
+
1165
+ existing = read_fix_csv_rows(fix_csv_path)
1166
+ existing_by_source: Dict[str, Dict[str, str]] = {}
1167
+ existing_by_key: Dict[Tuple[str, str, str], Dict[str, str]] = {}
1168
+ used_issue_ids: set[str] = set()
1169
+ next_num = 1
1170
+ for r in existing:
1171
+ issue_id = (r.get("Issue_ID", "") or "").strip()
1172
+ if issue_id:
1173
+ used_issue_ids.add(issue_id)
1174
+ m = re.match(r"^FIX-(\d+)$", issue_id)
1175
+ if m:
1176
+ try:
1177
+ next_num = max(next_num, int(m.group(1)) + 1)
1178
+ except ValueError:
1179
+ pass
1180
+ source_id = (r.get("Source_Issue_ID", "") or "").strip()
1181
+ if source_id:
1182
+ existing_by_source[source_id] = r
1183
+ k = (r.get("Service", ""), r.get("Method", ""), r.get("Path", ""))
1184
+ existing_by_key[k] = r
1185
+
1186
+ current_blocked_keys: set[Tuple[str, str, str]] = set()
1187
+ out_rows: List[Dict[str, str]] = []
1188
+
1189
+ for item in blocked_items:
1190
+ service = str(item.get("service") or "")
1191
+ method = str(item.get("method") or "")
1192
+ path = str(item.get("path") or "")
1193
+ source_id = str(item.get("issue_id") or "").strip()
1194
+ key = (service, method, path)
1195
+ current_blocked_keys.add(key)
1196
+
1197
+ endpoint = rows_by_key.get(key, {})
1198
+ title = (endpoint.get("Title", "") or "").strip() or f"{method} {path}".strip()
1199
+ notes = str(item.get("notes") or endpoint.get("Notes", "") or "").strip()
1200
+ evidence = derive_evidence_from_notes(notes)
1201
+ category, analysis, suggestion, _needs_human = classify_failure(notes=notes, workspace_root=workspace_root)
1202
+
1203
+ prev = existing_by_source.get(source_id) if source_id else None
1204
+ if prev is None:
1205
+ prev = existing_by_key.get(key)
1206
+
1207
+ issue_id = ""
1208
+ if prev is not None:
1209
+ issue_id = (prev.get("Issue_ID", "") or "").strip()
1210
+ if not issue_id:
1211
+ candidate = f"FIX-{source_id}" if source_id else ""
1212
+ if candidate and candidate not in used_issue_ids:
1213
+ issue_id = candidate
1214
+ else:
1215
+ while True:
1216
+ candidate = f"FIX-{next_num:03d}"
1217
+ next_num += 1
1218
+ if candidate not in used_issue_ids:
1219
+ issue_id = candidate
1220
+ break
1221
+
1222
+ created_at = (prev.get("Created_At", "") if prev else "").strip() or generated_at
1223
+ prev_notes = (prev.get("Notes", "") if prev else "").strip()
1224
+ merged_notes = prev_notes
1225
+ if notes:
1226
+ merged_notes = append_note(prev_notes, notes)
1227
+
1228
+ prev_category = (prev.get("Failure_Category", "") if prev else "").strip()
1229
+ prev_analysis = (prev.get("Failure_Analysis", "") if prev else "").strip()
1230
+ prev_suggestion = (prev.get("Suggestion", "") if prev else "").strip()
1231
+
1232
+ out_rows.append(
1233
+ {
1234
+ "Issue_ID": issue_id,
1235
+ "Source_Issue_ID": source_id,
1236
+ "Service": service,
1237
+ "Title": title,
1238
+ "Method": method,
1239
+ "Path": path,
1240
+ "Status": "TODO",
1241
+ "Evidence": evidence,
1242
+ "Failure_Category": prev_category or category,
1243
+ "Failure_Analysis": prev_analysis or analysis,
1244
+ "Suggestion": prev_suggestion or suggestion,
1245
+ "Notes": merged_notes,
1246
+ "Created_At": created_at,
1247
+ "Updated_At": generated_at,
1248
+ }
1249
+ )
1250
+ used_issue_ids.add(issue_id)
1251
+
1252
+ for r in existing:
1253
+ k = (r.get("Service", ""), r.get("Method", ""), r.get("Path", ""))
1254
+ if k in current_blocked_keys:
1255
+ continue
1256
+ endpoint = rows_by_key.get(k)
1257
+ st = ((endpoint or {}).get("Test_Status", "") or "").strip().upper()
1258
+ status = (r.get("Status", "") or "").strip().upper() or "TODO"
1259
+ if st in ("DONE", "SKIP"):
1260
+ status = "DONE"
1261
+ out_rows.append(
1262
+ {
1263
+ "Issue_ID": (r.get("Issue_ID", "") or "").strip(),
1264
+ "Source_Issue_ID": (r.get("Source_Issue_ID", "") or "").strip(),
1265
+ "Service": (r.get("Service", "") or "").strip(),
1266
+ "Title": (r.get("Title", "") or "").strip(),
1267
+ "Method": (r.get("Method", "") or "").strip(),
1268
+ "Path": (r.get("Path", "") or "").strip(),
1269
+ "Status": status,
1270
+ "Evidence": (r.get("Evidence", "") or "").strip(),
1271
+ "Failure_Category": (r.get("Failure_Category", "") or "").strip(),
1272
+ "Failure_Analysis": (r.get("Failure_Analysis", "") or "").strip(),
1273
+ "Suggestion": (r.get("Suggestion", "") or "").strip(),
1274
+ "Notes": (r.get("Notes", "") or "").strip(),
1275
+ "Created_At": (r.get("Created_At", "") or "").strip() or generated_at,
1276
+ "Updated_At": generated_at if status == "DONE" and (r.get("Updated_At", "") or "").strip() != generated_at else (r.get("Updated_At", "") or "").strip(),
1277
+ }
1278
+ )
1279
+
1280
+ write_fix_csv_rows(fix_csv_path, out_rows)
1281
+ triage_csv_path = (workspace_root / "issues" / "server-triage-issues.csv").resolve()
1282
+ write_triage_issues_csv(triage_csv_path=triage_csv_path, fix_rows=out_rows, generated_at=generated_at)
1283
+
1284
+
1285
+ def _safe_curl_example(
1286
+ base_url: str,
1287
+ method: str,
1288
+ path: str,
1289
+ request_id_header: str,
1290
+ auth_headers: Dict[str, str],
1291
+ ) -> str:
1292
+ url = base_url.rstrip("/") + "/" + path.lstrip("/")
1293
+ parts = ["curl", "-sS", "-i", "-X", method, repr(url)]
1294
+ parts += ["-H", repr(f"{request_id_header}: <generated>")]
1295
+ for k in auth_headers.keys():
1296
+ parts += ["-H", repr(f"{k}: <redacted>")]
1297
+ return " ".join(parts)
1298
+
1299
+
1300
+ def upsert_endpoints_into_csv(
1301
+ csv_path: Path,
1302
+ service_name: str,
1303
+ base_url: str,
1304
+ request_id_header: str,
1305
+ auth_headers: Dict[str, str],
1306
+ endpoints: Iterable[Tuple[str, str, Dict[str, Any]]],
1307
+ allow_mutations: bool,
1308
+ ) -> None:
1309
+ ensure_csv(csv_path)
1310
+ rows = read_csv_rows(csv_path)
1311
+
1312
+ existing = {(r.get("Service", ""), r.get("Method", ""), r.get("Path", "")) for r in rows}
1313
+ next_id = 1
1314
+ for r in rows:
1315
+ try:
1316
+ next_id = max(next_id, int(r.get("Issue_ID", "0") or "0") + 1)
1317
+ except ValueError:
1318
+ continue
1319
+
1320
+ for method, path, op in endpoints:
1321
+ if not allow_mutations and method in ("POST", "PUT", "PATCH", "DELETE"):
1322
+ continue
1323
+ key = (service_name, method, path)
1324
+ if key in existing:
1325
+ continue
1326
+ title = op.get("summary") or op.get("operationId") or f"{method} {path}"
1327
+ if not isinstance(title, str):
1328
+ title = f"{method} {path}"
1329
+ expected_status = infer_expected_status(op)
1330
+ rows.append(
1331
+ {
1332
+ "Issue_ID": str(next_id),
1333
+ "Service": service_name,
1334
+ "Title": title,
1335
+ "Method": method,
1336
+ "Path": path,
1337
+ "Auth": "header",
1338
+ "Request_Example": _safe_curl_example(base_url, method, path, request_id_header, auth_headers),
1339
+ "Expected_Status": expected_status,
1340
+ "Expected_Body_Checks": "",
1341
+ "Log_Checks": "no ERROR/Exception; correlate by request_id",
1342
+ "Test_Status": "TODO",
1343
+ "Review_Status": "PENDING",
1344
+ "Notes": "",
1345
+ }
1346
+ )
1347
+ next_id += 1
1348
+
1349
+ write_csv_rows(csv_path, rows)
1350
+
1351
+
1352
+ def slugify(text: str) -> str:
1353
+ s = re.sub(r"[^a-zA-Z0-9]+", "-", text).strip("-").lower()
1354
+ return s[:80] if s else "endpoint"
1355
+
1356
+
1357
+ def grep_log_by_request_id(log_file: Path, request_id: str, max_lines: int = 80) -> str:
1358
+ if not log_file.exists():
1359
+ return ""
1360
+ lines = log_file.read_text(encoding="utf-8", errors="replace").splitlines()
1361
+ matches = [ln for ln in lines if request_id in ln]
1362
+ if not matches:
1363
+ return ""
1364
+ return "\n".join(matches[-max_lines:])
1365
+
1366
+
1367
+ def tail_log(log_file: Path, max_lines: int = 200) -> str:
1368
+ if not log_file.exists():
1369
+ return ""
1370
+ lines = log_file.read_text(encoding="utf-8", errors="replace").splitlines()
1371
+ return "\n".join(lines[-max_lines:])
1372
+
1373
+
1374
+ def write_text_if_changed(path: Path, content: str) -> None:
1375
+ path.parent.mkdir(parents=True, exist_ok=True)
1376
+ try:
1377
+ if path.exists() and path.read_text(encoding="utf-8", errors="replace") == content:
1378
+ return
1379
+ except Exception:
1380
+ pass
1381
+ path.write_text(content, encoding="utf-8", errors="replace")
1382
+
1383
+
1384
+ def append_note(existing: str, piece: str) -> str:
1385
+ e = (existing or "").strip()
1386
+ p = (piece or "").strip()
1387
+ if not p:
1388
+ return e
1389
+ if not e:
1390
+ return p
1391
+ if p in e:
1392
+ return e
1393
+ return f"{e} {p}".strip()
1394
+
1395
+
1396
+ def json_from_schema(schema: Dict[str, Any], depth: int = 0, base_doc: Optional[Dict[str, Any]] = None) -> Any:
1397
+ """
1398
+ Generate a sample payload from JSON schema with a bias toward required fields and examples/defaults.
1399
+ Goal: avoid missing fields that cause 500/400 when runner auto-generates bodies.
1400
+ """
1401
+ if depth > 4:
1402
+ return None
1403
+
1404
+ schema = resolve_schema_ref(schema, base_doc=base_doc) if isinstance(schema, dict) else schema
1405
+
1406
+ # Respect example/default/enum early.
1407
+ if isinstance(schema.get("example"), (str, int, float, bool, dict, list)):
1408
+ return schema["example"]
1409
+ if isinstance(schema.get("default"), (str, int, float, bool, dict, list)):
1410
+ return schema["default"]
1411
+ if isinstance(schema.get("enum"), list) and schema["enum"]:
1412
+ return schema["enum"][0]
1413
+
1414
+ # oneOf/anyOf/allOf
1415
+ if "oneOf" in schema and isinstance(schema["oneOf"], list) and schema["oneOf"]:
1416
+ s0 = schema["oneOf"][0]
1417
+ return json_from_schema(s0, depth + 1, base_doc=base_doc) if isinstance(s0, dict) else None
1418
+ if "anyOf" in schema and isinstance(schema["anyOf"], list) and schema["anyOf"]:
1419
+ s0 = schema["anyOf"][0]
1420
+ return json_from_schema(s0, depth + 1, base_doc=base_doc) if isinstance(s0, dict) else None
1421
+ if "allOf" in schema and isinstance(schema["allOf"], list) and schema["allOf"]:
1422
+ merged: Dict[str, Any] = {}
1423
+ for s in schema["allOf"]:
1424
+ if isinstance(s, dict):
1425
+ merged.update(s)
1426
+ return json_from_schema(merged, depth + 1, base_doc=base_doc)
1427
+
1428
+ t = schema.get("type")
1429
+ if t == "object" or ("properties" in schema):
1430
+ props = schema.get("properties") if isinstance(schema.get("properties"), dict) else {}
1431
+ required = schema.get("required") if isinstance(schema.get("required"), list) else []
1432
+ out: Dict[str, Any] = {}
1433
+ # Fill required first.
1434
+ for k in required:
1435
+ if isinstance(k, str) and k in props and isinstance(props[k], dict):
1436
+ out[k] = json_from_schema(props[k], depth + 1, base_doc=base_doc)
1437
+ # Fill remaining optional fields (best-effort, capped to avoid huge payloads).
1438
+ optional_added = 0
1439
+ for k, v in props.items():
1440
+ if k in out:
1441
+ continue
1442
+ if not isinstance(k, str) or not isinstance(v, dict):
1443
+ continue
1444
+ out[k] = json_from_schema(v, depth + 1, base_doc=base_doc)
1445
+ optional_added += 1
1446
+ if optional_added >= 10:
1447
+ break
1448
+ return out
1449
+
1450
+ if t == "array":
1451
+ items = schema.get("items")
1452
+ if isinstance(items, dict):
1453
+ return [json_from_schema(items, depth + 1, base_doc=base_doc)]
1454
+ return []
1455
+
1456
+ if t == "integer" or t == "number":
1457
+ return 1
1458
+ if t == "boolean":
1459
+ return True
1460
+
1461
+ # string handling with simple format-aware samples
1462
+ if t == "string" or not t:
1463
+ fmt = schema.get("format") if isinstance(schema.get("format"), str) else ""
1464
+ fmt = fmt.lower()
1465
+ if fmt == "date":
1466
+ return "2024-01-01"
1467
+ if fmt in ("date-time", "datetime"):
1468
+ return "2024-01-01T00:00:00Z"
1469
+ if fmt == "email":
1470
+ return "user@example.com"
1471
+ if fmt in ("phone", "phone-number", "tel"):
1472
+ return "13800000000"
1473
+ if fmt in ("uuid", "guid"):
1474
+ return "00000000-0000-0000-0000-000000000000"
1475
+ if fmt in ("uri", "url"):
1476
+ return "https://example.com"
1477
+ return "test"
1478
+
1479
+ return "test"
1480
+
1481
+
1482
+ def infer_json_body(op: Dict[str, Any]) -> Optional[Any]:
1483
+ rb = op.get("requestBody") or {}
1484
+ if not isinstance(rb, dict):
1485
+ return None
1486
+ content = rb.get("content") or {}
1487
+ if not isinstance(content, dict):
1488
+ return None
1489
+ json_ct = None
1490
+ for ct in content.keys():
1491
+ if isinstance(ct, str) and ct.lower().startswith("application/json"):
1492
+ json_ct = ct
1493
+ break
1494
+ if not json_ct:
1495
+ return None
1496
+ part = content.get(json_ct)
1497
+ if not isinstance(part, dict):
1498
+ return {}
1499
+ schema = part.get("schema")
1500
+ if not isinstance(schema, dict):
1501
+ return {}
1502
+ base_doc = op.get("_openapi_doc") if isinstance(op, dict) else None
1503
+ schema = resolve_schema_ref(schema, base_doc=base_doc)
1504
+ return json_from_schema(schema, base_doc=base_doc or OPENAPI_DOC)
1505
+
1506
+
1507
+ def run_endpoint_request(
1508
+ base_url: str,
1509
+ method: str,
1510
+ path: str,
1511
+ op: Optional[Dict[str, Any]],
1512
+ headers: Dict[str, str],
1513
+ request_id_header: str,
1514
+ out_dir: Path,
1515
+ resource_ids: Dict[str, str],
1516
+ response_max_bytes: int,
1517
+ request_timeout_s: int,
1518
+ ) -> Tuple[int, str, str]:
1519
+ request_id = str(uuid.uuid4())
1520
+ normalized_path, substitutions = normalize_path_for_test(path, op or {}, resource_ids=resource_ids)
1521
+ url = base_url.rstrip("/") + "/" + normalized_path.lstrip("/")
1522
+ query_params = build_query_params(op or {}, resource_ids=resource_ids)
1523
+ req_headers = dict(headers)
1524
+ req_headers[request_id_header] = request_id
1525
+
1526
+ json_body = infer_json_body(op or {}) if method in ("POST", "PUT", "PATCH") else None
1527
+ resp = requests.request(
1528
+ method=method,
1529
+ url=url,
1530
+ headers=req_headers,
1531
+ timeout=max(1, int(request_timeout_s)),
1532
+ json=json_body,
1533
+ params=query_params or None,
1534
+ )
1535
+ out_dir.mkdir(parents=True, exist_ok=True)
1536
+ out_file = out_dir / f"{slugify(method)}-{slugify(path)}.out"
1537
+ raw = resp.content if isinstance(resp.content, (bytes, bytearray)) else resp.text.encode("utf-8", errors="replace")
1538
+ max_bytes = max(0, int(response_max_bytes))
1539
+ truncated_bytes = raw[:max_bytes] if max_bytes else raw
1540
+ truncated_text = truncated_bytes.decode("utf-8", errors="replace")
1541
+ out_file.write_text(truncated_text, encoding="utf-8", errors="replace")
1542
+ if substitutions or json_body is not None:
1543
+ # Add a small sidecar note for reproducibility without mutating the CSV schema.
1544
+ (out_dir / f"{slugify(method)}-{slugify(path)}.meta.json").write_text(
1545
+ json.dumps(
1546
+ {
1547
+ "path_substitutions": substitutions,
1548
+ "url": url,
1549
+ "query_params": query_params,
1550
+ "json_body": json_body,
1551
+ "response_truncated": bool(max_bytes and len(raw) > max_bytes),
1552
+ "response_bytes": len(raw),
1553
+ "response_bytes_written": len(truncated_bytes),
1554
+ },
1555
+ ensure_ascii=False,
1556
+ indent=2,
1557
+ ),
1558
+ encoding="utf-8",
1559
+ )
1560
+ return resp.status_code, out_file.as_posix(), request_id
1561
+
1562
+
1563
+ def main(argv: List[str]) -> int:
1564
+ parser = argparse.ArgumentParser(description="Workspace server API test runner (OpenAPI -> CSV -> requests -> log correlation)")
1565
+ parser.add_argument("--workspace", default=".", help="workspace root (directory A)")
1566
+ parser.add_argument("--service", default="", help="optional: only run one service directory name")
1567
+ parser.add_argument("--csv", default="issues/server-api-issues.csv", help="execution contract CSV path (relative to workspace)")
1568
+ parser.add_argument("--out-dir", default=".agentdocs/tmp/server-test", help="output dir for responses (relative to workspace)")
1569
+ parser.add_argument("--base-url", default="", help="override base_url (otherwise use secrets/test-accounts.json or AI_WORKSPACE.md)")
1570
+ parser.add_argument("--health-timeout", type=int, default=60, help="seconds to wait for health endpoint")
1571
+ parser.add_argument("--no-wait-health", action="store_true", help="skip health check wait")
1572
+ parser.add_argument("--refresh-openapi", action="store_true", help="always export OpenAPI from openapi_url into docs/openapi.json")
1573
+ parser.add_argument("--manage-service", action="store_true", help="run build/start/stop using AI_WORKSPACE.md build_cmd/start_cmd/stop_cmd")
1574
+ parser.add_argument("--max-endpoints", type=int, default=0, help="limit endpoints executed per run (0=unlimited)")
1575
+ parser.add_argument("--request-timeout", type=int, default=15, help="per-request timeout seconds")
1576
+ parser.add_argument("--max-response-bytes", type=int, default=65536, help="truncate response body written to .out (0=unlimited)")
1577
+ parser.add_argument("--max-log-snippet-lines", type=int, default=80, help="max log lines written into *.log.txt")
1578
+ parser.add_argument("--max-report-blocked", type=int, default=200, help="max BLOCKED items persisted into report.json")
1579
+ args = parser.parse_args(argv)
1580
+
1581
+ workspace_root = Path(args.workspace).resolve()
1582
+ ai_workspace_md = workspace_root / "AI_WORKSPACE.md"
1583
+ cfg = parse_ai_workspace_md(ai_workspace_md)
1584
+
1585
+ secrets_path = workspace_root / "secrets" / "test-accounts.json"
1586
+ secrets = load_secrets(secrets_path)
1587
+
1588
+ csv_path = (workspace_root / args.csv).resolve()
1589
+ out_dir = (workspace_root / args.out_dir).resolve()
1590
+
1591
+ server_dirs = discover_server_dirs(workspace_root, cfg.server_dirs)
1592
+ if args.service:
1593
+ server_dirs = [p for p in server_dirs if p.name == args.service]
1594
+
1595
+ if not server_dirs:
1596
+ print("no server directories found (configure server_dirs in AI_WORKSPACE.md or add marker files)", file=sys.stderr)
1597
+ return 2
1598
+
1599
+ ensure_csv(csv_path)
1600
+ lint_blocked: List[Dict[str, str]] = []
1601
+
1602
+ procs: Dict[str, subprocess.Popen[bytes]] = {}
1603
+ server_dir_by_name: Dict[str, Path] = {p.name: p for p in server_dirs}
1604
+ report: Dict[str, Any] = {
1605
+ "workspace": workspace_root.as_posix(),
1606
+ "generated_at": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
1607
+ "csv": os.path.relpath(csv_path, workspace_root),
1608
+ "out_dir": os.path.relpath(out_dir, workspace_root),
1609
+ "services": {},
1610
+ "summary": {"DONE": 0, "BLOCKED": 0, "SKIP": 0, "TODO": 0, "DOING": 0},
1611
+ "blocked": [],
1612
+ }
1613
+ min_interval_s = 0.0
1614
+ if cfg.max_requests_per_minute > 0:
1615
+ min_interval_s = 60.0 / float(cfg.max_requests_per_minute)
1616
+ last_request_at = 0.0
1617
+ try:
1618
+ if args.manage_service and cfg.start_cmd:
1619
+ for server_dir in server_dirs:
1620
+ service_name = server_dir.name
1621
+ if args.service and service_name != args.service:
1622
+ continue
1623
+ if cfg.build_cmd:
1624
+ run_shell(render_cmd(cfg.build_cmd, service_dir=server_dir, service_name=service_name), cwd=workspace_root)
1625
+ log_file = resolve_log_file(cfg.log_path, workspace_root=workspace_root, service_name=service_name)
1626
+ start_cmd = render_cmd(cfg.start_cmd, service_dir=server_dir, service_name=service_name)
1627
+ procs[service_name] = start_service(start_cmd, cwd=workspace_root, log_file=log_file)
1628
+
1629
+ openapi_used: Dict[str, str] = {}
1630
+ for server_dir in server_dirs:
1631
+ service_name = server_dir.name
1632
+ base_url = effective_base_url(
1633
+ cli_base_url=args.base_url,
1634
+ secrets=secrets,
1635
+ workspace=cfg,
1636
+ service_name=service_name,
1637
+ )
1638
+ auth_headers = build_auth_headers(base_url=base_url, secrets=secrets, request_id_header=cfg.request_id_header)
1639
+
1640
+ if not args.no_wait_health:
1641
+ try:
1642
+ wait_for_health(base_url, cfg.health_path, headers=auth_headers, timeout_s=args.health_timeout)
1643
+ except Exception as e: # noqa: BLE001
1644
+ print(f"[{service_name}] health check failed: {e}", file=sys.stderr)
1645
+ return 3
1646
+
1647
+ openapi_file = (server_dir / cfg.openapi_path).resolve()
1648
+ openapi_paths = candidate_openapi_paths(cfg)
1649
+ if openapi_file.exists() and not args.refresh_openapi:
1650
+ openapi = load_openapi(openapi_file)
1651
+ OPENAPI_DOC.clear()
1652
+ OPENAPI_DOC.update(openapi)
1653
+ else:
1654
+ openapi = None
1655
+ last_err: Optional[Exception] = None
1656
+ for rel_path in openapi_paths:
1657
+ try:
1658
+ openapi_headers = build_openapi_headers(secrets=secrets, request_id_header=cfg.request_id_header)
1659
+ export_openapi(base_url, rel_path, headers=openapi_headers, dest=openapi_file)
1660
+ openapi = load_openapi(openapi_file)
1661
+ OPENAPI_DOC.clear()
1662
+ OPENAPI_DOC.update(openapi)
1663
+ openapi_used[service_name] = rel_path
1664
+ break
1665
+ except Exception as e: # noqa: BLE001
1666
+ last_err = e
1667
+ continue
1668
+ if openapi is None:
1669
+ print(f"[{service_name}] openapi unavailable (tried: {', '.join(openapi_paths)}): {last_err}", file=sys.stderr)
1670
+ continue
1671
+
1672
+ endpoints = list(iter_openapi_endpoints(openapi))
1673
+ upsert_endpoints_into_csv(
1674
+ csv_path=csv_path,
1675
+ service_name=service_name,
1676
+ base_url=base_url,
1677
+ request_id_header=cfg.request_id_header,
1678
+ auth_headers=auth_headers,
1679
+ endpoints=endpoints,
1680
+ allow_mutations=cfg.allow_mutations,
1681
+ )
1682
+
1683
+ rows, lint_blocked = sync_ai_project_rule_rows(csv_path, workspace_root)
1684
+ updated_rows: List[Dict[str, str]] = []
1685
+ op_lookup: Dict[Tuple[str, str, str], Dict[str, Any]] = {}
1686
+ for server_dir in server_dirs:
1687
+ service_name = server_dir.name
1688
+ openapi_file = (server_dir / cfg.openapi_path).resolve()
1689
+ if not openapi_file.exists():
1690
+ continue
1691
+ try:
1692
+ openapi = load_openapi(openapi_file)
1693
+ OPENAPI_DOC.clear()
1694
+ OPENAPI_DOC.update(openapi)
1695
+ except Exception:
1696
+ continue
1697
+ for method, path, op in iter_openapi_endpoints(openapi):
1698
+ if isinstance(op, dict):
1699
+ op["_openapi_doc"] = openapi
1700
+ op_lookup[(service_name, method, path)] = op
1701
+
1702
+ executed = 0
1703
+ for idx, row in enumerate(rows):
1704
+ issue_id_raw = (row.get("Issue_ID", "") or "").strip()
1705
+ if issue_id_raw.startswith("RULE-"):
1706
+ updated_rows.append(row)
1707
+ continue
1708
+ service = row.get("Service", "")
1709
+ if args.service and service != args.service:
1710
+ updated_rows.append(row)
1711
+ continue
1712
+ status = (row.get("Test_Status", "") or "").strip().upper()
1713
+ if status not in ("TODO", "BLOCKED"):
1714
+ updated_rows.append(row)
1715
+ continue
1716
+ if args.max_endpoints and executed >= max(0, int(args.max_endpoints)):
1717
+ updated_rows.append(row)
1718
+ updated_rows.extend(rows[idx + 1 :])
1719
+ break
1720
+
1721
+ method = (row.get("Method", "GET") or "GET").strip().upper()
1722
+ path = (row.get("Path", "") or "").strip()
1723
+ if not path:
1724
+ updated_rows.append(row)
1725
+ continue
1726
+
1727
+ if not cfg.allow_mutations and method in ("POST", "PUT", "PATCH", "DELETE"):
1728
+ row["Test_Status"] = "SKIP"
1729
+ row["Notes"] = (row.get("Notes", "") + " skipped: allow_mutations=false").strip()
1730
+ updated_rows.append(row)
1731
+ continue
1732
+
1733
+ if cfg.dangerous_disabled:
1734
+ tags = op_lookup.get((service, method, path), {}).get("tags") if op_lookup.get((service, method, path)) else None
1735
+ tag_list = tags if isinstance(tags, list) else []
1736
+ dangerous_tag = any(isinstance(t, str) and t.lower() == "dangerous" for t in tag_list)
1737
+ op_obj = op_lookup.get((service, method, path)) or {}
1738
+ dangerous_flag = bool(op_obj.get("x-dangerous") is True)
1739
+ if method == "DELETE" or dangerous_tag or dangerous_flag:
1740
+ row["Test_Status"] = "SKIP"
1741
+ row["Notes"] = append_note(row.get("Notes", ""), "skipped: dangerous_disabled=true")
1742
+ updated_rows.append(row)
1743
+ continue
1744
+
1745
+ row["Test_Status"] = "DOING"
1746
+ write_csv_rows(csv_path, updated_rows + [row] + rows[len(updated_rows) + 1 :])
1747
+
1748
+ base_url_effective = effective_base_url(
1749
+ cli_base_url=args.base_url,
1750
+ secrets=secrets,
1751
+ workspace=cfg,
1752
+ service_name=service,
1753
+ )
1754
+ assert_base_url_allowed(base_url_effective, cfg.base_url_allowlist)
1755
+ op = op_lookup.get((service, method, path))
1756
+
1757
+ try:
1758
+ if min_interval_s > 0 and last_request_at > 0:
1759
+ elapsed = time.time() - last_request_at
1760
+ if elapsed < min_interval_s:
1761
+ time.sleep(min_interval_s - elapsed)
1762
+ code, out_path, request_id = run_endpoint_request(
1763
+ base_url=base_url_effective,
1764
+ method=method,
1765
+ path=path,
1766
+ op=op,
1767
+ headers=auth_headers,
1768
+ request_id_header=cfg.request_id_header,
1769
+ out_dir=out_dir / service,
1770
+ resource_ids=secrets.resource_ids,
1771
+ response_max_bytes=args.max_response_bytes,
1772
+ request_timeout_s=args.request_timeout,
1773
+ )
1774
+ last_request_at = time.time()
1775
+ resp_rel = os.path.relpath(out_path, workspace_root)
1776
+ row["Notes"] = append_note(row.get("Notes", ""), f"status={code}")
1777
+ row["Notes"] = append_note(row.get("Notes", ""), f"expected={row.get('Expected_Status','').strip() or '<any 2xx/3xx>'}")
1778
+ row["Notes"] = append_note(row.get("Notes", ""), f"resp={resp_rel}")
1779
+ row["Notes"] = append_note(row.get("Notes", ""), f"request_id={request_id}")
1780
+
1781
+ log_file = resolve_log_file(cfg.log_path, workspace_root=workspace_root, service_name=service)
1782
+ log_snippet = grep_log_by_request_id(log_file, request_id=request_id, max_lines=max(1, int(args.max_log_snippet_lines)))
1783
+ log_rel = os.path.relpath(log_file, workspace_root) if log_file.exists() else ""
1784
+ expected_raw = (row.get("Expected_Status", "") or "").strip()
1785
+ expected_code = int(expected_raw) if expected_raw.isdigit() else None
1786
+ log_snippet_rel = ""
1787
+
1788
+ if log_snippet and re.search(r"\b(ERROR|Exception|Stacktrace)\b", log_snippet):
1789
+ row["Test_Status"] = "BLOCKED"
1790
+ row["Notes"] = append_note(row.get("Notes", ""), "log_error_by_request_id")
1791
+ else:
1792
+ if not log_snippet:
1793
+ fallback = tail_log(log_file, max_lines=200)
1794
+ if fallback and re.search(r"\b(ERROR|Exception|Stacktrace)\b", fallback):
1795
+ row["Test_Status"] = "BLOCKED"
1796
+ row["Notes"] = append_note(row.get("Notes", ""), "log_error_tail_fallback")
1797
+ updated_rows.append(row)
1798
+ continue
1799
+ if expected_code is not None and code != expected_code:
1800
+ row["Test_Status"] = "BLOCKED"
1801
+ row["Notes"] = append_note(row.get("Notes", ""), f"expected_mismatch")
1802
+ else:
1803
+ if expected_code is None and not (200 <= code < 400):
1804
+ row["Test_Status"] = "BLOCKED"
1805
+ row["Notes"] = append_note(row.get("Notes", ""), "non_2xx_3xx")
1806
+ else:
1807
+ row["Test_Status"] = "DONE"
1808
+
1809
+ if not log_snippet:
1810
+ row["Notes"] = append_note(row.get("Notes", ""), "no_request_id_in_log")
1811
+ else:
1812
+ # Persist the matched snippet so the agent can triage without re-grepping.
1813
+ snippet_path = (out_dir / service / f"{slugify(method)}-{slugify(path)}.log.txt").resolve()
1814
+ write_text_if_changed(snippet_path, log_snippet + "\n")
1815
+ log_snippet_rel = os.path.relpath(snippet_path, workspace_root)
1816
+ row["Notes"] = append_note(row.get("Notes", ""), f"log={log_rel}")
1817
+ row["Notes"] = append_note(row.get("Notes", ""), f"log_snippet={log_snippet_rel}")
1818
+ except Exception as e: # noqa: BLE001
1819
+ row["Test_Status"] = "BLOCKED"
1820
+ row["Notes"] = append_note(row.get("Notes", ""), f"error={type(e).__name__}")
1821
+
1822
+ # Update report stats.
1823
+ svc = report["services"].setdefault(service, {"DONE": 0, "BLOCKED": 0, "SKIP": 0, "TODO": 0, "DOING": 0})
1824
+ st = (row.get("Test_Status", "") or "").strip().upper()
1825
+ if st in svc:
1826
+ svc[st] += 1
1827
+ if st in report["summary"]:
1828
+ report["summary"][st] += 1
1829
+ if st == "BLOCKED":
1830
+ if len(report["blocked"]) < max(0, int(args.max_report_blocked)):
1831
+ report["blocked"].append(
1832
+ {
1833
+ "issue_id": row.get("Issue_ID", ""),
1834
+ "service": service,
1835
+ "method": method,
1836
+ "path": path,
1837
+ "expected_status": row.get("Expected_Status", ""),
1838
+ "notes": row.get("Notes", ""),
1839
+ }
1840
+ )
1841
+
1842
+ updated_rows.append(row)
1843
+ executed += 1
1844
+
1845
+ write_csv_rows(csv_path, updated_rows)
1846
+ report_path = (out_dir / "report.json").resolve()
1847
+ for item in lint_blocked:
1848
+ report["summary"]["BLOCKED"] += 1
1849
+ report["blocked"].append(item)
1850
+ if openapi_used:
1851
+ report["openapi_used"] = openapi_used
1852
+ write_text_if_changed(report_path, json.dumps(report, ensure_ascii=False, indent=2) + "\n")
1853
+ fix_csv_path = (workspace_root / "issues" / "server-fix-issues.csv").resolve()
1854
+ write_fix_issues_csv(
1855
+ fix_csv_path=fix_csv_path,
1856
+ endpoint_rows=updated_rows,
1857
+ blocked_items=report.get("blocked") or [],
1858
+ generated_at=report["generated_at"],
1859
+ workspace_root=workspace_root,
1860
+ )
1861
+ # Minimal human summary in Chinese (keep short to avoid token spam).
1862
+ md_lines = [
1863
+ "# API 测试报告(自动生成)",
1864
+ "",
1865
+ f"- ws: `{report['workspace']}`",
1866
+ f"- generated_at: `{report['generated_at']}`",
1867
+ f"- csv: `{report['csv']}`",
1868
+ f"- out_dir: `{report['out_dir']}`",
1869
+ f"- fix_issues: `{os.path.relpath(fix_csv_path, workspace_root)}`",
1870
+ "",
1871
+ "## 汇总",
1872
+ f"- DONE: {report['summary']['DONE']}",
1873
+ f"- BLOCKED: {report['summary']['BLOCKED']}",
1874
+ f"- SKIP: {report['summary']['SKIP']}",
1875
+ "",
1876
+ "## BLOCKED(最多展示 20 条)",
1877
+ ]
1878
+ for item in report["blocked"][:20]:
1879
+ md_lines.append(f"- {item.get('service')} {item.get('method')} {item.get('path')} (Issue_ID={item.get('issue_id')})")
1880
+ md_lines.append("")
1881
+ md_lines.append("提示:可用 `/server-fix` 基于 report.json + CSV 进入自动修复闭环。")
1882
+ write_text_if_changed((out_dir / "report.md").resolve(), "\n".join(md_lines) + "\n")
1883
+
1884
+ # Exit code policy for automation:
1885
+ # - 0: all ok (no BLOCKED)
1886
+ # - 3: has BLOCKED (fix required)
1887
+ return 3 if int(report["summary"].get("BLOCKED", 0) or 0) > 0 else 0
1888
+ finally:
1889
+ if args.manage_service and procs:
1890
+ for service_name, proc in procs.items():
1891
+ server_dir = server_dir_by_name.get(service_name)
1892
+ if cfg.stop_cmd and server_dir is not None:
1893
+ try:
1894
+ run_shell(render_cmd(cfg.stop_cmd, service_dir=server_dir, service_name=service_name), cwd=workspace_root)
1895
+ continue
1896
+ except Exception:
1897
+ pass
1898
+ stop_service(proc)
1899
+
1900
+
1901
+ if __name__ == "__main__":
1902
+ raise SystemExit(main(sys.argv[1:]))