harness-evolver 3.3.1 → 4.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/plugin.json +1 -1
- package/README.md +54 -29
- package/agents/evolver-architect.md +56 -23
- package/agents/evolver-consolidator.md +57 -0
- package/agents/evolver-critic.md +58 -15
- package/agents/evolver-proposer.md +21 -0
- package/agents/evolver-testgen.md +22 -0
- package/package.json +1 -1
- package/skills/evolve/SKILL.md +343 -71
- package/tools/__pycache__/add_evaluator.cpython-313.pyc +0 -0
- package/tools/__pycache__/adversarial_inject.cpython-313.pyc +0 -0
- package/tools/__pycache__/consolidate.cpython-313.pyc +0 -0
- package/tools/__pycache__/iteration_gate.cpython-313.pyc +0 -0
- package/tools/__pycache__/regression_tracker.cpython-313.pyc +0 -0
- package/tools/__pycache__/synthesize_strategy.cpython-313.pyc +0 -0
- package/tools/__pycache__/validate_state.cpython-313.pyc +0 -0
- package/tools/add_evaluator.py +103 -0
- package/tools/adversarial_inject.py +205 -0
- package/tools/consolidate.py +235 -0
- package/tools/iteration_gate.py +140 -0
- package/tools/regression_tracker.py +175 -0
- package/tools/synthesize_strategy.py +178 -0
- package/tools/validate_state.py +212 -0
- package/tools/__pycache__/detect_stack.cpython-314.pyc +0 -0
- package/tools/__pycache__/trace_logger.cpython-314.pyc +0 -0
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Track regression examples across evolution iterations.
|
|
3
|
+
|
|
4
|
+
Compares per-example scores between consecutive iterations.
|
|
5
|
+
When an example transitions from failing (<0.5) to passing (>0.8),
|
|
6
|
+
adds a variation to the dataset as a regression guard.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python3 regression_tracker.py \
|
|
10
|
+
--config .evolver.json \
|
|
11
|
+
--previous-experiment v001a \
|
|
12
|
+
--current-experiment v002c \
|
|
13
|
+
--output regression_report.json
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import argparse
|
|
17
|
+
import json
|
|
18
|
+
import os
|
|
19
|
+
import platform
|
|
20
|
+
import sys
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def ensure_langsmith_api_key():
|
|
24
|
+
"""Load LANGSMITH_API_KEY from credentials file or .env if not in env."""
|
|
25
|
+
if os.environ.get("LANGSMITH_API_KEY"):
|
|
26
|
+
return True
|
|
27
|
+
if platform.system() == "Darwin":
|
|
28
|
+
creds_path = os.path.expanduser("~/Library/Application Support/langsmith-cli/credentials")
|
|
29
|
+
else:
|
|
30
|
+
creds_path = os.path.expanduser("~/.config/langsmith-cli/credentials")
|
|
31
|
+
if os.path.exists(creds_path):
|
|
32
|
+
try:
|
|
33
|
+
with open(creds_path) as f:
|
|
34
|
+
for line in f:
|
|
35
|
+
line = line.strip()
|
|
36
|
+
if line.startswith("LANGSMITH_API_KEY="):
|
|
37
|
+
key = line.split("=", 1)[1].strip()
|
|
38
|
+
if key:
|
|
39
|
+
os.environ["LANGSMITH_API_KEY"] = key
|
|
40
|
+
return True
|
|
41
|
+
except OSError:
|
|
42
|
+
pass
|
|
43
|
+
if os.path.exists(".env"):
|
|
44
|
+
try:
|
|
45
|
+
with open(".env") as f:
|
|
46
|
+
for line in f:
|
|
47
|
+
line = line.strip()
|
|
48
|
+
if line.startswith("LANGSMITH_API_KEY=") and not line.startswith("#"):
|
|
49
|
+
key = line.split("=", 1)[1].strip().strip("'\"")
|
|
50
|
+
if key:
|
|
51
|
+
os.environ["LANGSMITH_API_KEY"] = key
|
|
52
|
+
return True
|
|
53
|
+
except OSError:
|
|
54
|
+
pass
|
|
55
|
+
return False
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def get_per_example_scores(client, experiment_name):
|
|
59
|
+
"""Get per-example scores from an experiment."""
|
|
60
|
+
scores = {}
|
|
61
|
+
try:
|
|
62
|
+
runs = list(client.list_runs(project_name=experiment_name, is_root=True, limit=200))
|
|
63
|
+
for run in runs:
|
|
64
|
+
example_id = str(run.reference_example_id or run.id)
|
|
65
|
+
feedbacks = list(client.list_feedback(run_ids=[run.id]))
|
|
66
|
+
fb_scores = {}
|
|
67
|
+
for fb in feedbacks:
|
|
68
|
+
if fb.score is not None:
|
|
69
|
+
fb_scores[fb.key] = fb.score
|
|
70
|
+
avg = sum(fb_scores.values()) / len(fb_scores) if fb_scores else 0.0
|
|
71
|
+
scores[example_id] = {
|
|
72
|
+
"score": avg,
|
|
73
|
+
"input": str(run.inputs)[:500] if run.inputs else "",
|
|
74
|
+
"output": str(run.outputs)[:500] if run.outputs else "",
|
|
75
|
+
}
|
|
76
|
+
except Exception as e:
|
|
77
|
+
print(f"Error reading {experiment_name}: {e}", file=sys.stderr)
|
|
78
|
+
return scores
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def find_transitions(prev_scores, curr_scores, fail_threshold=0.5, pass_threshold=0.8):
|
|
82
|
+
"""Find examples that transitioned from failing to passing."""
|
|
83
|
+
transitions = []
|
|
84
|
+
regressions = []
|
|
85
|
+
|
|
86
|
+
for example_id in set(prev_scores) & set(curr_scores):
|
|
87
|
+
prev = prev_scores[example_id]["score"]
|
|
88
|
+
curr = curr_scores[example_id]["score"]
|
|
89
|
+
|
|
90
|
+
if prev < fail_threshold and curr >= pass_threshold:
|
|
91
|
+
transitions.append({
|
|
92
|
+
"example_id": example_id,
|
|
93
|
+
"prev_score": prev,
|
|
94
|
+
"curr_score": curr,
|
|
95
|
+
"type": "fixed",
|
|
96
|
+
"input": curr_scores[example_id]["input"],
|
|
97
|
+
})
|
|
98
|
+
elif prev >= pass_threshold and curr < fail_threshold:
|
|
99
|
+
regressions.append({
|
|
100
|
+
"example_id": example_id,
|
|
101
|
+
"prev_score": prev,
|
|
102
|
+
"curr_score": curr,
|
|
103
|
+
"type": "regressed",
|
|
104
|
+
"input": curr_scores[example_id]["input"],
|
|
105
|
+
})
|
|
106
|
+
|
|
107
|
+
return transitions, regressions
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def add_regression_guards(client, dataset_id, transitions, max_guards=5):
|
|
111
|
+
"""Add regression guard examples to the dataset."""
|
|
112
|
+
added = 0
|
|
113
|
+
for t in transitions[:max_guards]:
|
|
114
|
+
try:
|
|
115
|
+
input_data = json.loads(t["input"]) if t["input"].startswith("{") else {"input": t["input"]}
|
|
116
|
+
client.create_example(
|
|
117
|
+
inputs=input_data,
|
|
118
|
+
dataset_id=dataset_id,
|
|
119
|
+
metadata={"source": "regression_guard", "original_example_id": t["example_id"]},
|
|
120
|
+
)
|
|
121
|
+
added += 1
|
|
122
|
+
except Exception as e:
|
|
123
|
+
print(f"Failed to add guard for {t['example_id']}: {e}", file=sys.stderr)
|
|
124
|
+
return added
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def main():
|
|
128
|
+
parser = argparse.ArgumentParser(description="Track regressions across iterations")
|
|
129
|
+
parser.add_argument("--config", default=".evolver.json")
|
|
130
|
+
parser.add_argument("--previous-experiment", required=True, help="Previous iteration experiment name")
|
|
131
|
+
parser.add_argument("--current-experiment", required=True, help="Current iteration experiment name")
|
|
132
|
+
parser.add_argument("--output", default=None, help="Output JSON report")
|
|
133
|
+
parser.add_argument("--add-guards", action="store_true", help="Add regression guard examples to dataset")
|
|
134
|
+
parser.add_argument("--max-guards", type=int, default=5, help="Max guard examples to add")
|
|
135
|
+
args = parser.parse_args()
|
|
136
|
+
|
|
137
|
+
with open(args.config) as f:
|
|
138
|
+
config = json.load(f)
|
|
139
|
+
|
|
140
|
+
ensure_langsmith_api_key()
|
|
141
|
+
from langsmith import Client
|
|
142
|
+
client = Client()
|
|
143
|
+
|
|
144
|
+
prev_scores = get_per_example_scores(client, args.previous_experiment)
|
|
145
|
+
curr_scores = get_per_example_scores(client, args.current_experiment)
|
|
146
|
+
|
|
147
|
+
transitions, regressions = find_transitions(prev_scores, curr_scores)
|
|
148
|
+
|
|
149
|
+
added = 0
|
|
150
|
+
if args.add_guards and transitions:
|
|
151
|
+
added = add_regression_guards(client, config["dataset_id"], transitions, args.max_guards)
|
|
152
|
+
|
|
153
|
+
result = {
|
|
154
|
+
"previous": args.previous_experiment,
|
|
155
|
+
"current": args.current_experiment,
|
|
156
|
+
"fixed_count": len(transitions),
|
|
157
|
+
"regression_count": len(regressions),
|
|
158
|
+
"guards_added": added,
|
|
159
|
+
"fixed": transitions,
|
|
160
|
+
"regressions": regressions,
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
output = json.dumps(result, indent=2)
|
|
164
|
+
if args.output:
|
|
165
|
+
with open(args.output, "w") as f:
|
|
166
|
+
f.write(output)
|
|
167
|
+
print(output)
|
|
168
|
+
|
|
169
|
+
if regressions:
|
|
170
|
+
print(f"\nWARNING: {len(regressions)} regressions detected!", file=sys.stderr)
|
|
171
|
+
sys.exit(1)
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
if __name__ == "__main__":
|
|
175
|
+
main()
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Synthesize evolution strategy document from trace analysis.
|
|
3
|
+
|
|
4
|
+
Reads trace_insights.json, best_results.json, and evolution_memory.json
|
|
5
|
+
to produce a targeted strategy document with specific file paths,
|
|
6
|
+
line numbers, and concrete change recommendations for proposers.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python3 synthesize_strategy.py \
|
|
10
|
+
--config .evolver.json \
|
|
11
|
+
--trace-insights trace_insights.json \
|
|
12
|
+
--best-results best_results.json \
|
|
13
|
+
--evolution-memory evolution_memory.json \
|
|
14
|
+
--output strategy.md
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import argparse
|
|
18
|
+
import json
|
|
19
|
+
import os
|
|
20
|
+
import sys
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def load_json_safe(path):
|
|
24
|
+
"""Load JSON file, return None if missing or invalid."""
|
|
25
|
+
if not path or not os.path.exists(path):
|
|
26
|
+
return None
|
|
27
|
+
try:
|
|
28
|
+
with open(path) as f:
|
|
29
|
+
return json.load(f)
|
|
30
|
+
except (json.JSONDecodeError, OSError):
|
|
31
|
+
return None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def identify_target_files(config):
|
|
35
|
+
"""Identify which files proposers should focus on."""
|
|
36
|
+
entry_point = config.get("entry_point", "")
|
|
37
|
+
parts = entry_point.split()
|
|
38
|
+
target_files = []
|
|
39
|
+
for part in parts:
|
|
40
|
+
if part.endswith(".py") and not part.startswith("-"):
|
|
41
|
+
target_files.append(part)
|
|
42
|
+
return target_files
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def synthesize(config, insights, results, memory):
|
|
46
|
+
"""Produce strategy recommendations."""
|
|
47
|
+
strategy = {
|
|
48
|
+
"primary_targets": [],
|
|
49
|
+
"failure_clusters": [],
|
|
50
|
+
"recommended_approaches": [],
|
|
51
|
+
"avoid": [],
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
strategy["primary_targets"] = identify_target_files(config)
|
|
55
|
+
|
|
56
|
+
if insights:
|
|
57
|
+
for issue in insights.get("top_issues", [])[:5]:
|
|
58
|
+
strategy["failure_clusters"].append({
|
|
59
|
+
"type": issue.get("type", "unknown"),
|
|
60
|
+
"severity": issue.get("severity", "medium"),
|
|
61
|
+
"description": issue.get("description", ""),
|
|
62
|
+
"count": issue.get("count", 0),
|
|
63
|
+
})
|
|
64
|
+
|
|
65
|
+
if memory:
|
|
66
|
+
for insight in memory.get("insights", []):
|
|
67
|
+
if insight.get("recurrence", 0) >= 2:
|
|
68
|
+
if insight["type"] == "strategy_effectiveness":
|
|
69
|
+
strategy["recommended_approaches"].append(insight["insight"])
|
|
70
|
+
elif insight["type"] == "recurring_failure":
|
|
71
|
+
strategy["failure_clusters"].append({
|
|
72
|
+
"type": "recurring",
|
|
73
|
+
"severity": "high",
|
|
74
|
+
"description": insight["insight"],
|
|
75
|
+
"count": insight["recurrence"],
|
|
76
|
+
})
|
|
77
|
+
|
|
78
|
+
if memory:
|
|
79
|
+
for insight in memory.get("insights", []):
|
|
80
|
+
if "losing" in insight.get("type", "") or "regression" in insight.get("type", ""):
|
|
81
|
+
strategy["avoid"].append(insight["insight"])
|
|
82
|
+
|
|
83
|
+
if results:
|
|
84
|
+
per_example = results.get("per_example", {})
|
|
85
|
+
failing = [(eid, data) for eid, data in per_example.items() if data.get("score", 0) < 0.5]
|
|
86
|
+
failing.sort(key=lambda x: x[1].get("score", 0))
|
|
87
|
+
strategy["failing_examples"] = [
|
|
88
|
+
{
|
|
89
|
+
"example_id": eid,
|
|
90
|
+
"score": data["score"],
|
|
91
|
+
"input_preview": data.get("input_preview", "")[:200],
|
|
92
|
+
"error": data.get("error"),
|
|
93
|
+
}
|
|
94
|
+
for eid, data in failing[:10]
|
|
95
|
+
]
|
|
96
|
+
|
|
97
|
+
return strategy
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def format_strategy_md(strategy, config):
|
|
101
|
+
"""Format strategy as markdown document."""
|
|
102
|
+
lines = [
|
|
103
|
+
"# Evolution Strategy Document",
|
|
104
|
+
"",
|
|
105
|
+
f"*Framework: {config.get('framework', 'unknown')} | Entry point: {config.get('entry_point', 'N/A')}*",
|
|
106
|
+
"",
|
|
107
|
+
]
|
|
108
|
+
|
|
109
|
+
lines.append("## Target Files")
|
|
110
|
+
for f in strategy.get("primary_targets", []):
|
|
111
|
+
lines.append(f"- `{f}`")
|
|
112
|
+
lines.append("")
|
|
113
|
+
|
|
114
|
+
clusters = strategy.get("failure_clusters", [])
|
|
115
|
+
if clusters:
|
|
116
|
+
lines.append("## Failure Clusters (prioritized)")
|
|
117
|
+
for i, c in enumerate(clusters, 1):
|
|
118
|
+
lines.append(f"{i}. **[{c['severity']}]** {c['description']} (count: {c['count']})")
|
|
119
|
+
lines.append("")
|
|
120
|
+
|
|
121
|
+
approaches = strategy.get("recommended_approaches", [])
|
|
122
|
+
if approaches:
|
|
123
|
+
lines.append("## Recommended Approaches (from evolution memory)")
|
|
124
|
+
for a in approaches:
|
|
125
|
+
lines.append(f"- {a}")
|
|
126
|
+
lines.append("")
|
|
127
|
+
|
|
128
|
+
avoid = strategy.get("avoid", [])
|
|
129
|
+
if avoid:
|
|
130
|
+
lines.append("## Avoid (previously unsuccessful)")
|
|
131
|
+
for a in avoid:
|
|
132
|
+
lines.append(f"- {a}")
|
|
133
|
+
lines.append("")
|
|
134
|
+
|
|
135
|
+
failing = strategy.get("failing_examples", [])
|
|
136
|
+
if failing:
|
|
137
|
+
lines.append(f"## Top Failing Examples ({len(failing)})")
|
|
138
|
+
for ex in failing:
|
|
139
|
+
score = ex["score"]
|
|
140
|
+
preview = ex["input_preview"][:100]
|
|
141
|
+
error = f" — Error: {ex['error'][:80]}" if ex.get("error") else ""
|
|
142
|
+
lines.append(f"- `{ex['example_id']}` (score: {score:.2f}): {preview}{error}")
|
|
143
|
+
lines.append("")
|
|
144
|
+
|
|
145
|
+
return "\n".join(lines)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def main():
|
|
149
|
+
parser = argparse.ArgumentParser(description="Synthesize evolution strategy")
|
|
150
|
+
parser.add_argument("--config", default=".evolver.json")
|
|
151
|
+
parser.add_argument("--trace-insights", default="trace_insights.json")
|
|
152
|
+
parser.add_argument("--best-results", default="best_results.json")
|
|
153
|
+
parser.add_argument("--evolution-memory", default="evolution_memory.json")
|
|
154
|
+
parser.add_argument("--output", default="strategy.md")
|
|
155
|
+
args = parser.parse_args()
|
|
156
|
+
|
|
157
|
+
with open(args.config) as f:
|
|
158
|
+
config = json.load(f)
|
|
159
|
+
|
|
160
|
+
insights = load_json_safe(args.trace_insights)
|
|
161
|
+
results = load_json_safe(args.best_results)
|
|
162
|
+
memory = load_json_safe(args.evolution_memory)
|
|
163
|
+
|
|
164
|
+
strategy = synthesize(config, insights, results, memory)
|
|
165
|
+
|
|
166
|
+
md = format_strategy_md(strategy, config)
|
|
167
|
+
with open(args.output, "w") as f:
|
|
168
|
+
f.write(md)
|
|
169
|
+
|
|
170
|
+
json_path = args.output.replace(".md", ".json")
|
|
171
|
+
with open(json_path, "w") as f:
|
|
172
|
+
json.dump(strategy, f, indent=2)
|
|
173
|
+
|
|
174
|
+
print(md)
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
if __name__ == "__main__":
|
|
178
|
+
main()
|
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Validate .evolver.json state against LangSmith reality.
|
|
3
|
+
|
|
4
|
+
Checks that referenced experiments, datasets, and projects still exist.
|
|
5
|
+
Returns JSON with validation results and any divergences found.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
python3 validate_state.py --config .evolver.json --output validation.json
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import argparse
|
|
12
|
+
import json
|
|
13
|
+
import os
|
|
14
|
+
import platform
|
|
15
|
+
import sys
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def ensure_langsmith_api_key():
|
|
19
|
+
"""Load LANGSMITH_API_KEY from credentials file or .env if not in env."""
|
|
20
|
+
if os.environ.get("LANGSMITH_API_KEY"):
|
|
21
|
+
return True
|
|
22
|
+
if platform.system() == "Darwin":
|
|
23
|
+
creds_path = os.path.expanduser("~/Library/Application Support/langsmith-cli/credentials")
|
|
24
|
+
else:
|
|
25
|
+
creds_path = os.path.expanduser("~/.config/langsmith-cli/credentials")
|
|
26
|
+
if os.path.exists(creds_path):
|
|
27
|
+
try:
|
|
28
|
+
with open(creds_path) as f:
|
|
29
|
+
for line in f:
|
|
30
|
+
line = line.strip()
|
|
31
|
+
if line.startswith("LANGSMITH_API_KEY="):
|
|
32
|
+
key = line.split("=", 1)[1].strip()
|
|
33
|
+
if key:
|
|
34
|
+
os.environ["LANGSMITH_API_KEY"] = key
|
|
35
|
+
return True
|
|
36
|
+
except OSError:
|
|
37
|
+
pass
|
|
38
|
+
if os.path.exists(".env"):
|
|
39
|
+
try:
|
|
40
|
+
with open(".env") as f:
|
|
41
|
+
for line in f:
|
|
42
|
+
line = line.strip()
|
|
43
|
+
if line.startswith("LANGSMITH_API_KEY=") and not line.startswith("#"):
|
|
44
|
+
key = line.split("=", 1)[1].strip().strip("'\"")
|
|
45
|
+
if key:
|
|
46
|
+
os.environ["LANGSMITH_API_KEY"] = key
|
|
47
|
+
return True
|
|
48
|
+
except OSError:
|
|
49
|
+
pass
|
|
50
|
+
return False
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def validate_dataset(client, config):
|
|
54
|
+
"""Check dataset exists and has expected example count."""
|
|
55
|
+
issues = []
|
|
56
|
+
dataset_name = config.get("dataset")
|
|
57
|
+
dataset_id = config.get("dataset_id")
|
|
58
|
+
if not dataset_name:
|
|
59
|
+
issues.append({"field": "dataset", "severity": "critical", "message": "No dataset configured"})
|
|
60
|
+
return issues, 0
|
|
61
|
+
try:
|
|
62
|
+
dataset = client.read_dataset(dataset_name=dataset_name)
|
|
63
|
+
if dataset_id and str(dataset.id) != dataset_id:
|
|
64
|
+
issues.append({
|
|
65
|
+
"field": "dataset_id",
|
|
66
|
+
"severity": "warning",
|
|
67
|
+
"message": f"dataset_id mismatch: config has {dataset_id}, LangSmith has {dataset.id}",
|
|
68
|
+
})
|
|
69
|
+
count = len(list(client.list_examples(dataset_id=dataset.id, limit=500)))
|
|
70
|
+
return issues, count
|
|
71
|
+
except Exception as e:
|
|
72
|
+
issues.append({"field": "dataset", "severity": "critical", "message": f"Dataset not found: {e}"})
|
|
73
|
+
return issues, 0
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def validate_best_experiment(client, config):
|
|
77
|
+
"""Check best_experiment still exists and score matches."""
|
|
78
|
+
issues = []
|
|
79
|
+
best = config.get("best_experiment")
|
|
80
|
+
if not best:
|
|
81
|
+
return issues
|
|
82
|
+
try:
|
|
83
|
+
runs = list(client.list_runs(project_name=best, is_root=True, limit=1))
|
|
84
|
+
if not runs:
|
|
85
|
+
issues.append({
|
|
86
|
+
"field": "best_experiment",
|
|
87
|
+
"severity": "critical",
|
|
88
|
+
"message": f"Best experiment '{best}' has no runs in LangSmith",
|
|
89
|
+
})
|
|
90
|
+
except Exception as e:
|
|
91
|
+
issues.append({
|
|
92
|
+
"field": "best_experiment",
|
|
93
|
+
"severity": "critical",
|
|
94
|
+
"message": f"Best experiment '{best}' not accessible: {e}",
|
|
95
|
+
})
|
|
96
|
+
return issues
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def validate_git_state(config):
|
|
100
|
+
"""Check that current git HEAD matches expected state."""
|
|
101
|
+
import subprocess
|
|
102
|
+
issues = []
|
|
103
|
+
try:
|
|
104
|
+
result = subprocess.run(
|
|
105
|
+
["git", "log", "--oneline", "-1"],
|
|
106
|
+
capture_output=True, text=True, timeout=10,
|
|
107
|
+
)
|
|
108
|
+
head = result.stdout.strip()
|
|
109
|
+
if not head:
|
|
110
|
+
issues.append({"field": "git", "severity": "warning", "message": "Could not read git HEAD"})
|
|
111
|
+
except Exception as e:
|
|
112
|
+
issues.append({"field": "git", "severity": "warning", "message": f"Git check failed: {e}"})
|
|
113
|
+
return issues
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def main():
|
|
117
|
+
parser = argparse.ArgumentParser(description="Validate .evolver.json against LangSmith")
|
|
118
|
+
parser.add_argument("--config", default=".evolver.json", help="Config path")
|
|
119
|
+
parser.add_argument("--output", default=None, help="Output JSON path")
|
|
120
|
+
parser.add_argument("--fix", action="store_true", help="Auto-fix divergences where possible")
|
|
121
|
+
args = parser.parse_args()
|
|
122
|
+
|
|
123
|
+
if not os.path.exists(args.config):
|
|
124
|
+
print(json.dumps({"valid": False, "issues": [{"severity": "critical", "message": f"{args.config} not found"}]}))
|
|
125
|
+
sys.exit(1)
|
|
126
|
+
|
|
127
|
+
with open(args.config) as f:
|
|
128
|
+
config = json.load(f)
|
|
129
|
+
|
|
130
|
+
ensure_langsmith_api_key()
|
|
131
|
+
from langsmith import Client
|
|
132
|
+
client = Client()
|
|
133
|
+
|
|
134
|
+
all_issues = []
|
|
135
|
+
|
|
136
|
+
# Validate dataset
|
|
137
|
+
dataset_issues, example_count = validate_dataset(client, config)
|
|
138
|
+
all_issues.extend(dataset_issues)
|
|
139
|
+
|
|
140
|
+
# Validate best experiment
|
|
141
|
+
experiment_issues = validate_best_experiment(client, config)
|
|
142
|
+
all_issues.extend(experiment_issues)
|
|
143
|
+
|
|
144
|
+
# Validate git state
|
|
145
|
+
git_issues = validate_git_state(config)
|
|
146
|
+
all_issues.extend(git_issues)
|
|
147
|
+
|
|
148
|
+
# Check history consistency
|
|
149
|
+
history = config.get("history", [])
|
|
150
|
+
if history:
|
|
151
|
+
last = history[-1]
|
|
152
|
+
if last.get("experiment") != config.get("best_experiment"):
|
|
153
|
+
best_score = config.get("best_score", 0)
|
|
154
|
+
last_score = last.get("score", 0)
|
|
155
|
+
if last_score >= best_score:
|
|
156
|
+
all_issues.append({
|
|
157
|
+
"field": "history",
|
|
158
|
+
"severity": "warning",
|
|
159
|
+
"message": f"Last history entry ({last['experiment']}) differs from best_experiment ({config.get('best_experiment')})",
|
|
160
|
+
})
|
|
161
|
+
|
|
162
|
+
# Auto-fix divergences if --fix flag is set
|
|
163
|
+
if args.fix:
|
|
164
|
+
fixed = []
|
|
165
|
+
for issue in all_issues:
|
|
166
|
+
if issue.get("field") == "dataset_id" and issue.get("severity") == "warning":
|
|
167
|
+
try:
|
|
168
|
+
dataset = client.read_dataset(dataset_name=config["dataset"])
|
|
169
|
+
config["dataset_id"] = str(dataset.id)
|
|
170
|
+
with open(args.config, "w") as f:
|
|
171
|
+
json.dump(config, f, indent=2)
|
|
172
|
+
fixed.append(f"Fixed dataset_id: updated to {dataset.id}")
|
|
173
|
+
issue["severity"] = "fixed"
|
|
174
|
+
except Exception:
|
|
175
|
+
pass
|
|
176
|
+
elif issue.get("field") == "history" and issue.get("severity") == "warning":
|
|
177
|
+
history = config.get("history", [])
|
|
178
|
+
if history:
|
|
179
|
+
best_in_history = max(history, key=lambda h: h.get("score", 0))
|
|
180
|
+
config["best_experiment"] = best_in_history["experiment"]
|
|
181
|
+
config["best_score"] = best_in_history["score"]
|
|
182
|
+
with open(args.config, "w") as f:
|
|
183
|
+
json.dump(config, f, indent=2)
|
|
184
|
+
fixed.append(f"Fixed best_experiment: set to {best_in_history['experiment']}")
|
|
185
|
+
issue["severity"] = "fixed"
|
|
186
|
+
if fixed:
|
|
187
|
+
print(f"Auto-fixed {len(fixed)} issues:", file=sys.stderr)
|
|
188
|
+
for f_msg in fixed:
|
|
189
|
+
print(f" {f_msg}", file=sys.stderr)
|
|
190
|
+
|
|
191
|
+
all_issues = [i for i in all_issues if i.get("severity") != "fixed"]
|
|
192
|
+
critical = [i for i in all_issues if i.get("severity") == "critical"]
|
|
193
|
+
result = {
|
|
194
|
+
"valid": len(critical) == 0,
|
|
195
|
+
"issues": all_issues,
|
|
196
|
+
"dataset_examples": example_count,
|
|
197
|
+
"config_iterations": config.get("iterations", 0),
|
|
198
|
+
"config_best_score": config.get("best_score", 0),
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
output = json.dumps(result, indent=2)
|
|
202
|
+
if args.output:
|
|
203
|
+
with open(args.output, "w") as f:
|
|
204
|
+
f.write(output)
|
|
205
|
+
print(output)
|
|
206
|
+
|
|
207
|
+
if critical:
|
|
208
|
+
sys.exit(1)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
if __name__ == "__main__":
|
|
212
|
+
main()
|
|
Binary file
|
|
Binary file
|