@templmf/temp-solf-lmf 0.0.45 → 0.0.46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ai-cli-win-fix.7z +0 -0
- package/package.json +1 -1
- package/ai-gateway/.env +0 -42
- package/ai-gateway/README.md +0 -295
- package/ai-gateway/package-lock.json +0 -1370
- package/ai-gateway/package.json +0 -18
- package/ai-gateway/src/index.js +0 -132
- package/ai-gateway/src/middleware/auth.js +0 -45
- package/ai-gateway/src/middleware/rateLimit.js +0 -87
- package/ai-gateway/src/routes/chat.js +0 -657
- package/ai-gateway/src/skills/detector.js +0 -145
- package/ai-gateway/src/skills/html.md +0 -18
- package/ai-gateway/src/skills/markdown.md +0 -18
- package/ai-gateway/src/skills/react.md +0 -27
- package/ai-gateway/src/skills/registry.js +0 -441
- package/ai-gateway/src/skills/skill-creator/LICENSE.txt +0 -202
- package/ai-gateway/src/skills/skill-creator/SKILL.md +0 -485
- package/ai-gateway/src/skills/skill-creator/agents/analyzer.md +0 -274
- package/ai-gateway/src/skills/skill-creator/agents/comparator.md +0 -202
- package/ai-gateway/src/skills/skill-creator/agents/grader.md +0 -223
- package/ai-gateway/src/skills/skill-creator/assets/eval_review.html +0 -146
- package/ai-gateway/src/skills/skill-creator/eval-viewer/generate_review.py +0 -471
- package/ai-gateway/src/skills/skill-creator/eval-viewer/viewer.html +0 -1325
- package/ai-gateway/src/skills/skill-creator/references/schemas.md +0 -430
- package/ai-gateway/src/skills/skill-creator/scripts/__init__.py +0 -0
- package/ai-gateway/src/skills/skill-creator/scripts/aggregate_benchmark.py +0 -401
- package/ai-gateway/src/skills/skill-creator/scripts/generate_report.py +0 -326
- package/ai-gateway/src/skills/skill-creator/scripts/improve_description.py +0 -247
- package/ai-gateway/src/skills/skill-creator/scripts/package_skill.py +0 -136
- package/ai-gateway/src/skills/skill-creator/scripts/quick_validate.py +0 -103
- package/ai-gateway/src/skills/skill-creator/scripts/run_eval.py +0 -310
- package/ai-gateway/src/skills/skill-creator/scripts/run_loop.py +0 -328
- package/ai-gateway/src/skills/skill-creator/scripts/utils.py +0 -47
- package/ai-gateway/src/skills/skill-creator/skill-creator.skill +0 -0
- package/ai-gateway/src/skills/ticket.md +0 -36
- package/ai-gateway/src/skills/vue.md +0 -31
- package/ai-gateway/src/utils/logger.js +0 -21
- package/ai-gateway/src/utils/retry.js +0 -90
- package/ai-gateway/src/utils/sessionManager.js +0 -159
- package/ai-gateway/src/utils/structuredResponse.js +0 -144
- package/ai-gateway/src/utils/toolAdapter.js +0 -151
- package//345/216/213/347/274/251/345/220/216/347/232/204/346/226/207/344/273/266.7z +0 -0
|
@@ -1,328 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""Run the eval + improve loop until all pass or max iterations reached.
|
|
3
|
-
|
|
4
|
-
Combines run_eval.py and improve_description.py in a loop, tracking history
|
|
5
|
-
and returning the best description found. Supports train/test split to prevent
|
|
6
|
-
overfitting.
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
import argparse
|
|
10
|
-
import json
|
|
11
|
-
import random
|
|
12
|
-
import sys
|
|
13
|
-
import tempfile
|
|
14
|
-
import time
|
|
15
|
-
import webbrowser
|
|
16
|
-
from pathlib import Path
|
|
17
|
-
|
|
18
|
-
from scripts.generate_report import generate_html
|
|
19
|
-
from scripts.improve_description import improve_description
|
|
20
|
-
from scripts.run_eval import find_project_root, run_eval
|
|
21
|
-
from scripts.utils import parse_skill_md
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def split_eval_set(eval_set: list[dict], holdout: float, seed: int = 42) -> tuple[list[dict], list[dict]]:
|
|
25
|
-
"""Split eval set into train and test sets, stratified by should_trigger."""
|
|
26
|
-
random.seed(seed)
|
|
27
|
-
|
|
28
|
-
# Separate by should_trigger
|
|
29
|
-
trigger = [e for e in eval_set if e["should_trigger"]]
|
|
30
|
-
no_trigger = [e for e in eval_set if not e["should_trigger"]]
|
|
31
|
-
|
|
32
|
-
# Shuffle each group
|
|
33
|
-
random.shuffle(trigger)
|
|
34
|
-
random.shuffle(no_trigger)
|
|
35
|
-
|
|
36
|
-
# Calculate split points
|
|
37
|
-
n_trigger_test = max(1, int(len(trigger) * holdout))
|
|
38
|
-
n_no_trigger_test = max(1, int(len(no_trigger) * holdout))
|
|
39
|
-
|
|
40
|
-
# Split
|
|
41
|
-
test_set = trigger[:n_trigger_test] + no_trigger[:n_no_trigger_test]
|
|
42
|
-
train_set = trigger[n_trigger_test:] + no_trigger[n_no_trigger_test:]
|
|
43
|
-
|
|
44
|
-
return train_set, test_set
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
def run_loop(
|
|
48
|
-
eval_set: list[dict],
|
|
49
|
-
skill_path: Path,
|
|
50
|
-
description_override: str | None,
|
|
51
|
-
num_workers: int,
|
|
52
|
-
timeout: int,
|
|
53
|
-
max_iterations: int,
|
|
54
|
-
runs_per_query: int,
|
|
55
|
-
trigger_threshold: float,
|
|
56
|
-
holdout: float,
|
|
57
|
-
model: str,
|
|
58
|
-
verbose: bool,
|
|
59
|
-
live_report_path: Path | None = None,
|
|
60
|
-
log_dir: Path | None = None,
|
|
61
|
-
) -> dict:
|
|
62
|
-
"""Run the eval + improvement loop."""
|
|
63
|
-
project_root = find_project_root()
|
|
64
|
-
name, original_description, content = parse_skill_md(skill_path)
|
|
65
|
-
current_description = description_override or original_description
|
|
66
|
-
|
|
67
|
-
# Split into train/test if holdout > 0
|
|
68
|
-
if holdout > 0:
|
|
69
|
-
train_set, test_set = split_eval_set(eval_set, holdout)
|
|
70
|
-
if verbose:
|
|
71
|
-
print(f"Split: {len(train_set)} train, {len(test_set)} test (holdout={holdout})", file=sys.stderr)
|
|
72
|
-
else:
|
|
73
|
-
train_set = eval_set
|
|
74
|
-
test_set = []
|
|
75
|
-
|
|
76
|
-
history = []
|
|
77
|
-
exit_reason = "unknown"
|
|
78
|
-
|
|
79
|
-
for iteration in range(1, max_iterations + 1):
|
|
80
|
-
if verbose:
|
|
81
|
-
print(f"\n{'='*60}", file=sys.stderr)
|
|
82
|
-
print(f"Iteration {iteration}/{max_iterations}", file=sys.stderr)
|
|
83
|
-
print(f"Description: {current_description}", file=sys.stderr)
|
|
84
|
-
print(f"{'='*60}", file=sys.stderr)
|
|
85
|
-
|
|
86
|
-
# Evaluate train + test together in one batch for parallelism
|
|
87
|
-
all_queries = train_set + test_set
|
|
88
|
-
t0 = time.time()
|
|
89
|
-
all_results = run_eval(
|
|
90
|
-
eval_set=all_queries,
|
|
91
|
-
skill_name=name,
|
|
92
|
-
description=current_description,
|
|
93
|
-
num_workers=num_workers,
|
|
94
|
-
timeout=timeout,
|
|
95
|
-
project_root=project_root,
|
|
96
|
-
runs_per_query=runs_per_query,
|
|
97
|
-
trigger_threshold=trigger_threshold,
|
|
98
|
-
model=model,
|
|
99
|
-
)
|
|
100
|
-
eval_elapsed = time.time() - t0
|
|
101
|
-
|
|
102
|
-
# Split results back into train/test by matching queries
|
|
103
|
-
train_queries_set = {q["query"] for q in train_set}
|
|
104
|
-
train_result_list = [r for r in all_results["results"] if r["query"] in train_queries_set]
|
|
105
|
-
test_result_list = [r for r in all_results["results"] if r["query"] not in train_queries_set]
|
|
106
|
-
|
|
107
|
-
train_passed = sum(1 for r in train_result_list if r["pass"])
|
|
108
|
-
train_total = len(train_result_list)
|
|
109
|
-
train_summary = {"passed": train_passed, "failed": train_total - train_passed, "total": train_total}
|
|
110
|
-
train_results = {"results": train_result_list, "summary": train_summary}
|
|
111
|
-
|
|
112
|
-
if test_set:
|
|
113
|
-
test_passed = sum(1 for r in test_result_list if r["pass"])
|
|
114
|
-
test_total = len(test_result_list)
|
|
115
|
-
test_summary = {"passed": test_passed, "failed": test_total - test_passed, "total": test_total}
|
|
116
|
-
test_results = {"results": test_result_list, "summary": test_summary}
|
|
117
|
-
else:
|
|
118
|
-
test_results = None
|
|
119
|
-
test_summary = None
|
|
120
|
-
|
|
121
|
-
history.append({
|
|
122
|
-
"iteration": iteration,
|
|
123
|
-
"description": current_description,
|
|
124
|
-
"train_passed": train_summary["passed"],
|
|
125
|
-
"train_failed": train_summary["failed"],
|
|
126
|
-
"train_total": train_summary["total"],
|
|
127
|
-
"train_results": train_results["results"],
|
|
128
|
-
"test_passed": test_summary["passed"] if test_summary else None,
|
|
129
|
-
"test_failed": test_summary["failed"] if test_summary else None,
|
|
130
|
-
"test_total": test_summary["total"] if test_summary else None,
|
|
131
|
-
"test_results": test_results["results"] if test_results else None,
|
|
132
|
-
# For backward compat with report generator
|
|
133
|
-
"passed": train_summary["passed"],
|
|
134
|
-
"failed": train_summary["failed"],
|
|
135
|
-
"total": train_summary["total"],
|
|
136
|
-
"results": train_results["results"],
|
|
137
|
-
})
|
|
138
|
-
|
|
139
|
-
# Write live report if path provided
|
|
140
|
-
if live_report_path:
|
|
141
|
-
partial_output = {
|
|
142
|
-
"original_description": original_description,
|
|
143
|
-
"best_description": current_description,
|
|
144
|
-
"best_score": "in progress",
|
|
145
|
-
"iterations_run": len(history),
|
|
146
|
-
"holdout": holdout,
|
|
147
|
-
"train_size": len(train_set),
|
|
148
|
-
"test_size": len(test_set),
|
|
149
|
-
"history": history,
|
|
150
|
-
}
|
|
151
|
-
live_report_path.write_text(generate_html(partial_output, auto_refresh=True, skill_name=name))
|
|
152
|
-
|
|
153
|
-
if verbose:
|
|
154
|
-
def print_eval_stats(label, results, elapsed):
|
|
155
|
-
pos = [r for r in results if r["should_trigger"]]
|
|
156
|
-
neg = [r for r in results if not r["should_trigger"]]
|
|
157
|
-
tp = sum(r["triggers"] for r in pos)
|
|
158
|
-
pos_runs = sum(r["runs"] for r in pos)
|
|
159
|
-
fn = pos_runs - tp
|
|
160
|
-
fp = sum(r["triggers"] for r in neg)
|
|
161
|
-
neg_runs = sum(r["runs"] for r in neg)
|
|
162
|
-
tn = neg_runs - fp
|
|
163
|
-
total = tp + tn + fp + fn
|
|
164
|
-
precision = tp / (tp + fp) if (tp + fp) > 0 else 1.0
|
|
165
|
-
recall = tp / (tp + fn) if (tp + fn) > 0 else 1.0
|
|
166
|
-
accuracy = (tp + tn) / total if total > 0 else 0.0
|
|
167
|
-
print(f"{label}: {tp+tn}/{total} correct, precision={precision:.0%} recall={recall:.0%} accuracy={accuracy:.0%} ({elapsed:.1f}s)", file=sys.stderr)
|
|
168
|
-
for r in results:
|
|
169
|
-
status = "PASS" if r["pass"] else "FAIL"
|
|
170
|
-
rate_str = f"{r['triggers']}/{r['runs']}"
|
|
171
|
-
print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:60]}", file=sys.stderr)
|
|
172
|
-
|
|
173
|
-
print_eval_stats("Train", train_results["results"], eval_elapsed)
|
|
174
|
-
if test_summary:
|
|
175
|
-
print_eval_stats("Test ", test_results["results"], 0)
|
|
176
|
-
|
|
177
|
-
if train_summary["failed"] == 0:
|
|
178
|
-
exit_reason = f"all_passed (iteration {iteration})"
|
|
179
|
-
if verbose:
|
|
180
|
-
print(f"\nAll train queries passed on iteration {iteration}!", file=sys.stderr)
|
|
181
|
-
break
|
|
182
|
-
|
|
183
|
-
if iteration == max_iterations:
|
|
184
|
-
exit_reason = f"max_iterations ({max_iterations})"
|
|
185
|
-
if verbose:
|
|
186
|
-
print(f"\nMax iterations reached ({max_iterations}).", file=sys.stderr)
|
|
187
|
-
break
|
|
188
|
-
|
|
189
|
-
# Improve the description based on train results
|
|
190
|
-
if verbose:
|
|
191
|
-
print(f"\nImproving description...", file=sys.stderr)
|
|
192
|
-
|
|
193
|
-
t0 = time.time()
|
|
194
|
-
# Strip test scores from history so improvement model can't see them
|
|
195
|
-
blinded_history = [
|
|
196
|
-
{k: v for k, v in h.items() if not k.startswith("test_")}
|
|
197
|
-
for h in history
|
|
198
|
-
]
|
|
199
|
-
new_description = improve_description(
|
|
200
|
-
skill_name=name,
|
|
201
|
-
skill_content=content,
|
|
202
|
-
current_description=current_description,
|
|
203
|
-
eval_results=train_results,
|
|
204
|
-
history=blinded_history,
|
|
205
|
-
model=model,
|
|
206
|
-
log_dir=log_dir,
|
|
207
|
-
iteration=iteration,
|
|
208
|
-
)
|
|
209
|
-
improve_elapsed = time.time() - t0
|
|
210
|
-
|
|
211
|
-
if verbose:
|
|
212
|
-
print(f"Proposed ({improve_elapsed:.1f}s): {new_description}", file=sys.stderr)
|
|
213
|
-
|
|
214
|
-
current_description = new_description
|
|
215
|
-
|
|
216
|
-
# Find the best iteration by TEST score (or train if no test set)
|
|
217
|
-
if test_set:
|
|
218
|
-
best = max(history, key=lambda h: h["test_passed"] or 0)
|
|
219
|
-
best_score = f"{best['test_passed']}/{best['test_total']}"
|
|
220
|
-
else:
|
|
221
|
-
best = max(history, key=lambda h: h["train_passed"])
|
|
222
|
-
best_score = f"{best['train_passed']}/{best['train_total']}"
|
|
223
|
-
|
|
224
|
-
if verbose:
|
|
225
|
-
print(f"\nExit reason: {exit_reason}", file=sys.stderr)
|
|
226
|
-
print(f"Best score: {best_score} (iteration {best['iteration']})", file=sys.stderr)
|
|
227
|
-
|
|
228
|
-
return {
|
|
229
|
-
"exit_reason": exit_reason,
|
|
230
|
-
"original_description": original_description,
|
|
231
|
-
"best_description": best["description"],
|
|
232
|
-
"best_score": best_score,
|
|
233
|
-
"best_train_score": f"{best['train_passed']}/{best['train_total']}",
|
|
234
|
-
"best_test_score": f"{best['test_passed']}/{best['test_total']}" if test_set else None,
|
|
235
|
-
"final_description": current_description,
|
|
236
|
-
"iterations_run": len(history),
|
|
237
|
-
"holdout": holdout,
|
|
238
|
-
"train_size": len(train_set),
|
|
239
|
-
"test_size": len(test_set),
|
|
240
|
-
"history": history,
|
|
241
|
-
}
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
def main():
|
|
245
|
-
parser = argparse.ArgumentParser(description="Run eval + improve loop")
|
|
246
|
-
parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file")
|
|
247
|
-
parser.add_argument("--skill-path", required=True, help="Path to skill directory")
|
|
248
|
-
parser.add_argument("--description", default=None, help="Override starting description")
|
|
249
|
-
parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers")
|
|
250
|
-
parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds")
|
|
251
|
-
parser.add_argument("--max-iterations", type=int, default=5, help="Max improvement iterations")
|
|
252
|
-
parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query")
|
|
253
|
-
parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold")
|
|
254
|
-
parser.add_argument("--holdout", type=float, default=0.4, help="Fraction of eval set to hold out for testing (0 to disable)")
|
|
255
|
-
parser.add_argument("--model", required=True, help="Model for improvement")
|
|
256
|
-
parser.add_argument("--verbose", action="store_true", help="Print progress to stderr")
|
|
257
|
-
parser.add_argument("--report", default="auto", help="Generate HTML report at this path (default: 'auto' for temp file, 'none' to disable)")
|
|
258
|
-
parser.add_argument("--results-dir", default=None, help="Save all outputs (results.json, report.html, log.txt) to a timestamped subdirectory here")
|
|
259
|
-
args = parser.parse_args()
|
|
260
|
-
|
|
261
|
-
eval_set = json.loads(Path(args.eval_set).read_text())
|
|
262
|
-
skill_path = Path(args.skill_path)
|
|
263
|
-
|
|
264
|
-
if not (skill_path / "SKILL.md").exists():
|
|
265
|
-
print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr)
|
|
266
|
-
sys.exit(1)
|
|
267
|
-
|
|
268
|
-
name, _, _ = parse_skill_md(skill_path)
|
|
269
|
-
|
|
270
|
-
# Set up live report path
|
|
271
|
-
if args.report != "none":
|
|
272
|
-
if args.report == "auto":
|
|
273
|
-
timestamp = time.strftime("%Y%m%d_%H%M%S")
|
|
274
|
-
live_report_path = Path(tempfile.gettempdir()) / f"skill_description_report_{skill_path.name}_{timestamp}.html"
|
|
275
|
-
else:
|
|
276
|
-
live_report_path = Path(args.report)
|
|
277
|
-
# Open the report immediately so the user can watch
|
|
278
|
-
live_report_path.write_text("<html><body><h1>Starting optimization loop...</h1><meta http-equiv='refresh' content='5'></body></html>")
|
|
279
|
-
webbrowser.open(str(live_report_path))
|
|
280
|
-
else:
|
|
281
|
-
live_report_path = None
|
|
282
|
-
|
|
283
|
-
# Determine output directory (create before run_loop so logs can be written)
|
|
284
|
-
if args.results_dir:
|
|
285
|
-
timestamp = time.strftime("%Y-%m-%d_%H%M%S")
|
|
286
|
-
results_dir = Path(args.results_dir) / timestamp
|
|
287
|
-
results_dir.mkdir(parents=True, exist_ok=True)
|
|
288
|
-
else:
|
|
289
|
-
results_dir = None
|
|
290
|
-
|
|
291
|
-
log_dir = results_dir / "logs" if results_dir else None
|
|
292
|
-
|
|
293
|
-
output = run_loop(
|
|
294
|
-
eval_set=eval_set,
|
|
295
|
-
skill_path=skill_path,
|
|
296
|
-
description_override=args.description,
|
|
297
|
-
num_workers=args.num_workers,
|
|
298
|
-
timeout=args.timeout,
|
|
299
|
-
max_iterations=args.max_iterations,
|
|
300
|
-
runs_per_query=args.runs_per_query,
|
|
301
|
-
trigger_threshold=args.trigger_threshold,
|
|
302
|
-
holdout=args.holdout,
|
|
303
|
-
model=args.model,
|
|
304
|
-
verbose=args.verbose,
|
|
305
|
-
live_report_path=live_report_path,
|
|
306
|
-
log_dir=log_dir,
|
|
307
|
-
)
|
|
308
|
-
|
|
309
|
-
# Save JSON output
|
|
310
|
-
json_output = json.dumps(output, indent=2)
|
|
311
|
-
print(json_output)
|
|
312
|
-
if results_dir:
|
|
313
|
-
(results_dir / "results.json").write_text(json_output)
|
|
314
|
-
|
|
315
|
-
# Write final HTML report (without auto-refresh)
|
|
316
|
-
if live_report_path:
|
|
317
|
-
live_report_path.write_text(generate_html(output, auto_refresh=False, skill_name=name))
|
|
318
|
-
print(f"\nReport: {live_report_path}", file=sys.stderr)
|
|
319
|
-
|
|
320
|
-
if results_dir and live_report_path:
|
|
321
|
-
(results_dir / "report.html").write_text(generate_html(output, auto_refresh=False, skill_name=name))
|
|
322
|
-
|
|
323
|
-
if results_dir:
|
|
324
|
-
print(f"Results saved to: {results_dir}", file=sys.stderr)
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
if __name__ == "__main__":
|
|
328
|
-
main()
|
|
@@ -1,47 +0,0 @@
|
|
|
1
|
-
"""Shared utilities for skill-creator scripts."""
|
|
2
|
-
|
|
3
|
-
from pathlib import Path
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
def parse_skill_md(skill_path: Path) -> tuple[str, str, str]:
|
|
8
|
-
"""Parse a SKILL.md file, returning (name, description, full_content)."""
|
|
9
|
-
content = (skill_path / "SKILL.md").read_text()
|
|
10
|
-
lines = content.split("\n")
|
|
11
|
-
|
|
12
|
-
if lines[0].strip() != "---":
|
|
13
|
-
raise ValueError("SKILL.md missing frontmatter (no opening ---)")
|
|
14
|
-
|
|
15
|
-
end_idx = None
|
|
16
|
-
for i, line in enumerate(lines[1:], start=1):
|
|
17
|
-
if line.strip() == "---":
|
|
18
|
-
end_idx = i
|
|
19
|
-
break
|
|
20
|
-
|
|
21
|
-
if end_idx is None:
|
|
22
|
-
raise ValueError("SKILL.md missing frontmatter (no closing ---)")
|
|
23
|
-
|
|
24
|
-
name = ""
|
|
25
|
-
description = ""
|
|
26
|
-
frontmatter_lines = lines[1:end_idx]
|
|
27
|
-
i = 0
|
|
28
|
-
while i < len(frontmatter_lines):
|
|
29
|
-
line = frontmatter_lines[i]
|
|
30
|
-
if line.startswith("name:"):
|
|
31
|
-
name = line[len("name:"):].strip().strip('"').strip("'")
|
|
32
|
-
elif line.startswith("description:"):
|
|
33
|
-
value = line[len("description:"):].strip()
|
|
34
|
-
# Handle YAML multiline indicators (>, |, >-, |-)
|
|
35
|
-
if value in (">", "|", ">-", "|-"):
|
|
36
|
-
continuation_lines: list[str] = []
|
|
37
|
-
i += 1
|
|
38
|
-
while i < len(frontmatter_lines) and (frontmatter_lines[i].startswith(" ") or frontmatter_lines[i].startswith("\t")):
|
|
39
|
-
continuation_lines.append(frontmatter_lines[i].strip())
|
|
40
|
-
i += 1
|
|
41
|
-
description = " ".join(continuation_lines)
|
|
42
|
-
continue
|
|
43
|
-
else:
|
|
44
|
-
description = value.strip('"').strip("'")
|
|
45
|
-
i += 1
|
|
46
|
-
|
|
47
|
-
return name, description, content
|
|
File without changes
|
|
@@ -1,36 +0,0 @@
|
|
|
1
|
-
# 提单助手操作规范
|
|
2
|
-
|
|
3
|
-
你是一个智能提单助手。你的职责是通过自然语言对话,引导用户完成工单填写。
|
|
4
|
-
|
|
5
|
-
## 工单类型与字段
|
|
6
|
-
|
|
7
|
-
### 故障单(incident)
|
|
8
|
-
必填:title, severity(P1/P2/P3/P4), affected_system, description
|
|
9
|
-
选填:steps_to_reproduce, expected_behavior, contact
|
|
10
|
-
|
|
11
|
-
### 需求单(feature)
|
|
12
|
-
必填:title, priority(high/medium/low), description, expected_value
|
|
13
|
-
选填:deadline, stakeholder
|
|
14
|
-
|
|
15
|
-
### 变更单(change)
|
|
16
|
-
必填:title, change_type(deploy/config/infra), description, risk_level(high/medium/low), rollback_plan
|
|
17
|
-
选填:maintenance_window, approver
|
|
18
|
-
|
|
19
|
-
## 交互规则
|
|
20
|
-
|
|
21
|
-
1. 先识别工单类型,不确定时直接问用户
|
|
22
|
-
2. 每次只追问 1-2 个缺失字段,不要一次列出所有字段
|
|
23
|
-
3. 用口语化、友好的语气追问
|
|
24
|
-
4. 字段收集完毕后,输出摘要请用户确认
|
|
25
|
-
|
|
26
|
-
## 输出格式
|
|
27
|
-
|
|
28
|
-
每次回复必须严格返回以下 JSON,不要有任何额外文字:
|
|
29
|
-
```json
|
|
30
|
-
{
|
|
31
|
-
"reply": "对用户说的话",
|
|
32
|
-
"form_draft": {},
|
|
33
|
-
"missing_fields": [],
|
|
34
|
-
"is_complete": false
|
|
35
|
-
}
|
|
36
|
-
```
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
# Vue 3 组件生成规范
|
|
2
|
-
|
|
3
|
-
生成 Vue 3 单文件组件(SFC)。
|
|
4
|
-
|
|
5
|
-
## 规范
|
|
6
|
-
- 使用 `<script setup>` + Composition API
|
|
7
|
-
- TypeScript 支持(lang="ts")
|
|
8
|
-
- Props 使用 defineProps<Interface>()
|
|
9
|
-
- Emits 使用 defineEmits<Interface>()
|
|
10
|
-
- 样式使用 `<style scoped>`
|
|
11
|
-
|
|
12
|
-
## 组件结构
|
|
13
|
-
```vue
|
|
14
|
-
<template>
|
|
15
|
-
<!-- 模板 -->
|
|
16
|
-
</template>
|
|
17
|
-
|
|
18
|
-
<script setup lang="ts">
|
|
19
|
-
interface Props { /* ... */ }
|
|
20
|
-
const props = defineProps<Props>()
|
|
21
|
-
// 逻辑
|
|
22
|
-
</script>
|
|
23
|
-
|
|
24
|
-
<style scoped>
|
|
25
|
-
/* 样式 */
|
|
26
|
-
</style>
|
|
27
|
-
```
|
|
28
|
-
|
|
29
|
-
## 禁止
|
|
30
|
-
- 不要使用 Options API(除非用户明确要求)
|
|
31
|
-
- 不要使用 Vue 2 语法
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
import { createLogger, format, transports } from "winston";
|
|
2
|
-
|
|
3
|
-
export const logger = createLogger({
|
|
4
|
-
level: process.env.LOG_LEVEL || "info",
|
|
5
|
-
format: format.combine(
|
|
6
|
-
format.timestamp({ format: "YYYY-MM-DD HH:mm:ss" }),
|
|
7
|
-
format.errors({ stack: true }),
|
|
8
|
-
format.json()
|
|
9
|
-
),
|
|
10
|
-
transports: [
|
|
11
|
-
new transports.Console({
|
|
12
|
-
format: format.combine(
|
|
13
|
-
format.colorize(),
|
|
14
|
-
format.printf(({ timestamp, level, message, ...meta }) => {
|
|
15
|
-
const metaStr = Object.keys(meta).length ? " " + JSON.stringify(meta) : "";
|
|
16
|
-
return `${timestamp} [${level}] ${message}${metaStr}`;
|
|
17
|
-
})
|
|
18
|
-
)
|
|
19
|
-
})
|
|
20
|
-
]
|
|
21
|
-
});
|
|
@@ -1,90 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* 重试工具 - 指数退避
|
|
3
|
-
*
|
|
4
|
-
* 对上游模型请求的以下错误自动重试:
|
|
5
|
-
* - 429 Rate Limit(等待 Retry-After 后重试)
|
|
6
|
-
* - 500 / 502 / 503 服务端错误
|
|
7
|
-
* - 网络超时
|
|
8
|
-
*
|
|
9
|
-
* 结构化输出解析失败时也会触发一次重试(追加格式纠正指令)
|
|
10
|
-
*/
|
|
11
|
-
|
|
12
|
-
import { logger } from "./logger.js";
|
|
13
|
-
|
|
14
|
-
const RETRYABLE_STATUS = new Set([429, 500, 502, 503, 504]);
|
|
15
|
-
|
|
16
|
-
// ─────────────────────────────────────────────────────
|
|
17
|
-
// 通用重试包装器
|
|
18
|
-
// ─────────────────────────────────────────────────────
|
|
19
|
-
export async function withRetry(fn, options = {}) {
|
|
20
|
-
const {
|
|
21
|
-
maxRetries = 2,
|
|
22
|
-
baseDelayMs = 500,
|
|
23
|
-
maxDelayMs = 8000,
|
|
24
|
-
requestId = "unknown",
|
|
25
|
-
label = "request"
|
|
26
|
-
} = options;
|
|
27
|
-
|
|
28
|
-
let lastError;
|
|
29
|
-
|
|
30
|
-
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
31
|
-
try {
|
|
32
|
-
return await fn(attempt);
|
|
33
|
-
} catch (err) {
|
|
34
|
-
lastError = err;
|
|
35
|
-
const status = err.status || err.response?.status;
|
|
36
|
-
|
|
37
|
-
// 非重试错误:鉴权失败、参数错误 → 直接抛出
|
|
38
|
-
if (status && !RETRYABLE_STATUS.has(status)) {
|
|
39
|
-
throw err;
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
if (attempt === maxRetries) break;
|
|
43
|
-
|
|
44
|
-
// 计算等待时间
|
|
45
|
-
let delayMs;
|
|
46
|
-
if (status === 429 && err.headers?.["retry-after"]) {
|
|
47
|
-
// 尊重上游的 Retry-After
|
|
48
|
-
delayMs = parseInt(err.headers["retry-after"]) * 1000;
|
|
49
|
-
} else {
|
|
50
|
-
// 指数退避 + 随机抖动
|
|
51
|
-
delayMs = Math.min(
|
|
52
|
-
baseDelayMs * Math.pow(2, attempt) + Math.random() * 200,
|
|
53
|
-
maxDelayMs
|
|
54
|
-
);
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
logger.warn(`${label} retry`, {
|
|
58
|
-
requestId,
|
|
59
|
-
attempt: attempt + 1,
|
|
60
|
-
maxRetries,
|
|
61
|
-
delayMs,
|
|
62
|
-
error: err.message,
|
|
63
|
-
status
|
|
64
|
-
});
|
|
65
|
-
|
|
66
|
-
await sleep(delayMs);
|
|
67
|
-
}
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
throw lastError;
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
// ─────────────────────────────────────────────────────
|
|
74
|
-
// 结构化输出解析失败时的重试:追加纠正指令
|
|
75
|
-
// ─────────────────────────────────────────────────────
|
|
76
|
-
export function buildJsonCorrectionMessages(originalMessages, failedContent, schema) {
|
|
77
|
-
const correctionPrompt = schema
|
|
78
|
-
? `你上次的回复无法解析为合法 JSON。请严格按照以下 Schema 重新输出,只输出 JSON,不要任何解释:\n\`\`\`json\n${JSON.stringify(schema, null, 2)}\n\`\`\``
|
|
79
|
-
: `你上次的回复无法解析为合法 JSON。请重新输出,只输出 JSON 对象,不要任何解释文字或代码块包裹。`;
|
|
80
|
-
|
|
81
|
-
return [
|
|
82
|
-
...originalMessages,
|
|
83
|
-
{ role: "assistant", content: failedContent },
|
|
84
|
-
{ role: "user", content: correctionPrompt }
|
|
85
|
-
];
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
function sleep(ms) {
|
|
89
|
-
return new Promise(resolve => setTimeout(resolve, ms));
|
|
90
|
-
}
|