claude-evolve 1.3.7 → 1.3.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/claude-evolve-ideate +56 -10
- package/bin/claude-evolve-run +11 -49
- package/lib/csv_helper.py +116 -0
- package/package.json +1 -1
package/bin/claude-evolve-ideate
CHANGED
|
@@ -276,9 +276,15 @@ generate_novel_ideas_direct() {
|
|
|
276
276
|
Current CSV content:
|
|
277
277
|
$(cat "$FULL_CSV_PATH")
|
|
278
278
|
|
|
279
|
-
Algorithm files you
|
|
279
|
+
Algorithm files you MUST examine for context:
|
|
280
280
|
- Base algorithm: $FULL_ALGORITHM_PATH
|
|
281
|
-
- Evolved algorithms: $FULL_OUTPUT_DIR/
|
|
281
|
+
- Evolved algorithms: $FULL_OUTPUT_DIR/evolution_*.py (examine ALL to see what's been tried)
|
|
282
|
+
|
|
283
|
+
IMPORTANT: Before generating ideas, you should:
|
|
284
|
+
1. Read the base algorithm to understand the codebase structure and possibilities
|
|
285
|
+
2. Read ALL existing evolution_*.py files to see what modifications have been attempted
|
|
286
|
+
3. Analyze the CSV to see which approaches worked (high scores) and which failed
|
|
287
|
+
4. Avoid repeating failed approaches unless trying them with significant modifications
|
|
282
288
|
|
|
283
289
|
Project Brief:
|
|
284
290
|
$(cat "$FULL_BRIEF_PATH")
|
|
@@ -290,6 +296,10 @@ Requirements for new CSV rows:
|
|
|
290
296
|
- Each description should be one clear sentence describing a specific algorithmic change
|
|
291
297
|
- Descriptions should explore completely different approaches than existing ones
|
|
292
298
|
- All new rows should have empty performance and status fields
|
|
299
|
+
- CRITICAL: You must read existing evolution files to avoid suggesting changes that:
|
|
300
|
+
* Have already been tried and failed
|
|
301
|
+
* Are impossible given the codebase structure
|
|
302
|
+
* Would break the algorithm interface requirements
|
|
293
303
|
|
|
294
304
|
Example descriptions:
|
|
295
305
|
- Use ensemble of 3 random forests with different feature subsets
|
|
@@ -316,9 +326,15 @@ generate_hill_climbing_direct() {
|
|
|
316
326
|
Current CSV content:
|
|
317
327
|
$(cat "$FULL_CSV_PATH")
|
|
318
328
|
|
|
319
|
-
Algorithm files you
|
|
329
|
+
Algorithm files you MUST examine for context:
|
|
320
330
|
- Base algorithm: $FULL_ALGORITHM_PATH
|
|
321
|
-
- Evolved algorithms: $FULL_OUTPUT_DIR/
|
|
331
|
+
- Evolved algorithms: $FULL_OUTPUT_DIR/evolution_*.py (examine ALL to see what's been tried)
|
|
332
|
+
|
|
333
|
+
IMPORTANT: Before generating ideas, you should:
|
|
334
|
+
1. Read the base algorithm to understand the codebase structure and possibilities
|
|
335
|
+
2. Read ALL existing evolution_*.py files to see what modifications have been attempted
|
|
336
|
+
3. Analyze the CSV to see which approaches worked (high scores) and which failed
|
|
337
|
+
4. Avoid repeating failed approaches unless trying them with significant modifications
|
|
322
338
|
|
|
323
339
|
Successful algorithms to build on:
|
|
324
340
|
$top_performers
|
|
@@ -333,6 +349,10 @@ Requirements for new CSV rows:
|
|
|
333
349
|
- Each description should be one clear sentence about parameter tuning
|
|
334
350
|
- Focus on adjusting hyperparameters, thresholds, sizes, learning rates
|
|
335
351
|
- All new rows should have empty performance and status fields
|
|
352
|
+
- CRITICAL: You must read the parent algorithm file to understand:
|
|
353
|
+
* What parameters are actually tunable in the code
|
|
354
|
+
* What changes made this algorithm successful vs its parent
|
|
355
|
+
* What parameter ranges make sense given the implementation
|
|
336
356
|
|
|
337
357
|
Example descriptions:
|
|
338
358
|
- Increase learning rate from 0.001 to 0.01 for faster convergence
|
|
@@ -359,9 +379,15 @@ generate_structural_mutation_direct() {
|
|
|
359
379
|
Current CSV content:
|
|
360
380
|
$(cat "$FULL_CSV_PATH")
|
|
361
381
|
|
|
362
|
-
Algorithm files you
|
|
382
|
+
Algorithm files you MUST examine for context:
|
|
363
383
|
- Base algorithm: $FULL_ALGORITHM_PATH
|
|
364
|
-
- Evolved algorithms: $FULL_OUTPUT_DIR/
|
|
384
|
+
- Evolved algorithms: $FULL_OUTPUT_DIR/evolution_*.py (examine ALL to see what's been tried)
|
|
385
|
+
|
|
386
|
+
IMPORTANT: Before generating ideas, you should:
|
|
387
|
+
1. Read the base algorithm to understand the codebase structure and possibilities
|
|
388
|
+
2. Read ALL existing evolution_*.py files to see what modifications have been attempted
|
|
389
|
+
3. Analyze the CSV to see which approaches worked (high scores) and which failed
|
|
390
|
+
4. Avoid repeating failed approaches unless trying them with significant modifications
|
|
365
391
|
|
|
366
392
|
Successful algorithms to build on:
|
|
367
393
|
$top_performers
|
|
@@ -376,6 +402,10 @@ Requirements for new CSV rows:
|
|
|
376
402
|
- Each description should be one clear sentence about architectural changes
|
|
377
403
|
- Keep core insights but change implementation approach
|
|
378
404
|
- All new rows should have empty performance and status fields
|
|
405
|
+
- CRITICAL: You must read the parent algorithm file to understand:
|
|
406
|
+
* What structural elements can be modified within the codebase constraints
|
|
407
|
+
* What architectural decisions led to this algorithm's success
|
|
408
|
+
* Which components are essential vs which can be replaced
|
|
379
409
|
|
|
380
410
|
Example descriptions:
|
|
381
411
|
- Replace linear layers with convolutional layers for spatial feature learning
|
|
@@ -402,9 +432,15 @@ generate_crossover_direct() {
|
|
|
402
432
|
Current CSV content:
|
|
403
433
|
$(cat "$FULL_CSV_PATH")
|
|
404
434
|
|
|
405
|
-
Algorithm files you
|
|
435
|
+
Algorithm files you MUST examine for context:
|
|
406
436
|
- Base algorithm: $FULL_ALGORITHM_PATH
|
|
407
|
-
- Evolved algorithms: $FULL_OUTPUT_DIR/
|
|
437
|
+
- Evolved algorithms: $FULL_OUTPUT_DIR/evolution_*.py (examine ALL to see what's been tried)
|
|
438
|
+
|
|
439
|
+
IMPORTANT: Before generating ideas, you should:
|
|
440
|
+
1. Read the base algorithm to understand the codebase structure and possibilities
|
|
441
|
+
2. Read ALL existing evolution_*.py files to see what modifications have been attempted
|
|
442
|
+
3. Analyze the CSV to see which approaches worked (high scores) and which failed
|
|
443
|
+
4. Avoid repeating failed approaches unless trying them with significant modifications
|
|
408
444
|
|
|
409
445
|
Top performers to combine:
|
|
410
446
|
$top_performers
|
|
@@ -419,6 +455,10 @@ Requirements for new CSV rows:
|
|
|
419
455
|
- Each description should be one clear sentence combining elements from different algorithms
|
|
420
456
|
- Be specific about what elements to merge
|
|
421
457
|
- All new rows should have empty performance and status fields
|
|
458
|
+
- CRITICAL: You must read the relevant algorithm files to:
|
|
459
|
+
* Identify the specific improvements that made each algorithm successful
|
|
460
|
+
* Understand which components are compatible for merging
|
|
461
|
+
* Ensure the combined approach is technically feasible in the codebase
|
|
422
462
|
|
|
423
463
|
Example descriptions:
|
|
424
464
|
- Combine ensemble voting from algorithm 3 with feature selection from algorithm 5
|
|
@@ -461,9 +501,15 @@ ideate_ai_legacy() {
|
|
|
461
501
|
Current CSV content:
|
|
462
502
|
$(cat "$FULL_CSV_PATH")
|
|
463
503
|
|
|
464
|
-
Algorithm files you
|
|
504
|
+
Algorithm files you MUST examine for context:
|
|
465
505
|
- Base algorithm: $FULL_ALGORITHM_PATH
|
|
466
|
-
- Evolved algorithms: $FULL_OUTPUT_DIR/
|
|
506
|
+
- Evolved algorithms: $FULL_OUTPUT_DIR/evolution_*.py (examine ALL to see what's been tried)
|
|
507
|
+
|
|
508
|
+
IMPORTANT: Before generating ideas, you should:
|
|
509
|
+
1. Read the base algorithm to understand the codebase structure and possibilities
|
|
510
|
+
2. Read ALL existing evolution_*.py files to see what modifications have been attempted
|
|
511
|
+
3. Analyze the CSV to see which approaches worked (high scores) and which failed
|
|
512
|
+
4. Avoid repeating failed approaches unless trying them with significant modifications
|
|
467
513
|
|
|
468
514
|
Project Brief:
|
|
469
515
|
$(cat "$FULL_BRIEF_PATH")"
|
package/bin/claude-evolve-run
CHANGED
|
@@ -145,55 +145,20 @@ if [[ ! -f "$FULL_ALGORITHM_PATH" ]]; then
|
|
|
145
145
|
exit 1
|
|
146
146
|
fi
|
|
147
147
|
|
|
148
|
-
# Find oldest pending row (
|
|
148
|
+
# Find oldest pending row (using CSV helper)
|
|
149
149
|
find_empty_row() {
|
|
150
|
-
|
|
151
|
-
local csv_id csv_based_on csv_desc csv_perf csv_status
|
|
152
|
-
while IFS=, read -r csv_id csv_based_on csv_desc csv_perf csv_status; do
|
|
153
|
-
# Look for rows with pending status or empty status (but not complete/failed/running/timeout)
|
|
154
|
-
# Treat blank status as pending
|
|
155
|
-
if [[ $csv_status == "pending" || -z $csv_status || $csv_status == '""' ]]; then
|
|
156
|
-
# Skip if status is explicitly complete, failed, running, or timeout
|
|
157
|
-
if [[ $csv_status == "complete" || $csv_status == "failed" || $csv_status == "running" || $csv_status == "timeout" ]]; then
|
|
158
|
-
((row_num++))
|
|
159
|
-
continue
|
|
160
|
-
fi
|
|
161
|
-
echo $row_num
|
|
162
|
-
return 0
|
|
163
|
-
fi
|
|
164
|
-
((row_num++))
|
|
165
|
-
done < <(tail -n +2 "$FULL_CSV_PATH")
|
|
166
|
-
return 1
|
|
150
|
+
"$PYTHON_CMD" "$SCRIPT_DIR/../lib/csv_helper.py" find_pending "$FULL_CSV_PATH"
|
|
167
151
|
}
|
|
168
152
|
|
|
169
|
-
# Get CSV row
|
|
170
|
-
get_csv_row() {
|
|
171
|
-
sed -n "${1}p" "$FULL_CSV_PATH"
|
|
172
|
-
}
|
|
153
|
+
# Get CSV row - replaced by csv_helper.py
|
|
173
154
|
|
|
174
|
-
# Update CSV row (
|
|
155
|
+
# Update CSV row (using CSV helper)
|
|
175
156
|
update_csv_row() {
|
|
176
157
|
local row_num="$1"
|
|
177
158
|
local performance="$2"
|
|
178
159
|
local status="$3"
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
local temp_file="${FULL_CSV_PATH}.tmp"
|
|
182
|
-
local current_row=1
|
|
183
|
-
local csv_id csv_based_on csv_desc csv_perf csv_stat
|
|
184
|
-
|
|
185
|
-
while IFS=, read -r csv_id csv_based_on csv_desc csv_perf csv_stat; do
|
|
186
|
-
if [[ $current_row -eq $row_num ]]; then
|
|
187
|
-
# Update this row
|
|
188
|
-
echo "$csv_id,$csv_based_on,$csv_desc,$performance,$status"
|
|
189
|
-
else
|
|
190
|
-
# Keep original row
|
|
191
|
-
echo "$csv_id,$csv_based_on,$csv_desc,$csv_perf,$csv_stat"
|
|
192
|
-
fi
|
|
193
|
-
((current_row++))
|
|
194
|
-
done <"$FULL_CSV_PATH" >"$temp_file"
|
|
195
|
-
|
|
196
|
-
mv "$temp_file" "$FULL_CSV_PATH"
|
|
160
|
+
|
|
161
|
+
"$PYTHON_CMD" "$SCRIPT_DIR/../lib/csv_helper.py" update_row "$FULL_CSV_PATH" "$row_num" "$performance" "$status"
|
|
197
162
|
}
|
|
198
163
|
|
|
199
164
|
# Auto-recovery mechanism for common failures
|
|
@@ -298,21 +263,18 @@ while true; do
|
|
|
298
263
|
# Create log file for this iteration
|
|
299
264
|
LOGFILE="logs/claude-$(date +%Y%m%d_%H%M%S).txt"
|
|
300
265
|
|
|
301
|
-
# Get row data
|
|
302
|
-
|
|
303
|
-
|
|
266
|
+
# Get row data using CSV helper
|
|
267
|
+
eval "$("$PYTHON_CMD" "$SCRIPT_DIR/../lib/csv_helper.py" get_row "$FULL_CSV_PATH" "$row_num")"
|
|
268
|
+
|
|
269
|
+
# Variables are now set: id, basedOnId, description, performance, status
|
|
270
|
+
based_on_id="$basedOnId" # Convert to expected variable name
|
|
304
271
|
|
|
305
272
|
# Check if ID is empty
|
|
306
273
|
if [[ -z $id ]]; then
|
|
307
274
|
echo "[ERROR] Empty ID found at row $row_num. CSV may be malformed." >&2
|
|
308
|
-
echo "[ERROR] Row data: $row_data" >&2
|
|
309
275
|
exit 1
|
|
310
276
|
fi
|
|
311
277
|
|
|
312
|
-
# Clean up description (remove quotes)
|
|
313
|
-
description=${description#\"}
|
|
314
|
-
description=${description%\"}
|
|
315
|
-
|
|
316
278
|
echo "[INFO] Processing candidate ID: $id"
|
|
317
279
|
echo "[INFO] Description: $description"
|
|
318
280
|
echo "[INFO] Based on ID: $based_on_id"
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
CSV helper for claude-evolve to properly handle CSV parsing with quoted fields.
|
|
4
|
+
"""
|
|
5
|
+
import csv
|
|
6
|
+
import sys
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
def find_pending_row(csv_path):
|
|
10
|
+
"""Find the first pending row in the CSV."""
|
|
11
|
+
with open(csv_path, 'r') as f:
|
|
12
|
+
reader = csv.reader(f)
|
|
13
|
+
next(reader) # Skip header
|
|
14
|
+
for row_num, row in enumerate(reader, start=2):
|
|
15
|
+
# Ensure row has at least 5 fields
|
|
16
|
+
while len(row) < 5:
|
|
17
|
+
row.append('')
|
|
18
|
+
|
|
19
|
+
status = row[4].strip()
|
|
20
|
+
# Check if status is pending or empty
|
|
21
|
+
if status == 'pending' or status == '':
|
|
22
|
+
return row_num
|
|
23
|
+
return None
|
|
24
|
+
|
|
25
|
+
def get_row_data(csv_path, row_num):
|
|
26
|
+
"""Get data from a specific row."""
|
|
27
|
+
with open(csv_path, 'r') as f:
|
|
28
|
+
reader = csv.reader(f)
|
|
29
|
+
for i, row in enumerate(reader, start=1):
|
|
30
|
+
if i == row_num:
|
|
31
|
+
# Ensure row has at least 5 fields
|
|
32
|
+
while len(row) < 5:
|
|
33
|
+
row.append('')
|
|
34
|
+
return {
|
|
35
|
+
'id': row[0],
|
|
36
|
+
'basedOnId': row[1],
|
|
37
|
+
'description': row[2],
|
|
38
|
+
'performance': row[3],
|
|
39
|
+
'status': row[4]
|
|
40
|
+
}
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
def update_row(csv_path, row_num, performance, status):
|
|
44
|
+
"""Update a specific row in the CSV."""
|
|
45
|
+
rows = []
|
|
46
|
+
with open(csv_path, 'r') as f:
|
|
47
|
+
reader = csv.reader(f)
|
|
48
|
+
rows = list(reader)
|
|
49
|
+
|
|
50
|
+
# Update the specific row
|
|
51
|
+
if row_num <= len(rows):
|
|
52
|
+
row = rows[row_num - 1]
|
|
53
|
+
# Ensure row has at least 5 fields
|
|
54
|
+
while len(row) < 5:
|
|
55
|
+
row.append('')
|
|
56
|
+
row[3] = performance # performance field
|
|
57
|
+
row[4] = status # status field
|
|
58
|
+
|
|
59
|
+
# Write back
|
|
60
|
+
with open(csv_path, 'w', newline='') as f:
|
|
61
|
+
writer = csv.writer(f)
|
|
62
|
+
writer.writerows(rows)
|
|
63
|
+
|
|
64
|
+
if __name__ == '__main__':
|
|
65
|
+
if len(sys.argv) < 3:
|
|
66
|
+
print("Usage: csv_helper.py <command> <csv_path> [args...]", file=sys.stderr)
|
|
67
|
+
sys.exit(1)
|
|
68
|
+
|
|
69
|
+
command = sys.argv[1]
|
|
70
|
+
csv_path = sys.argv[2]
|
|
71
|
+
|
|
72
|
+
try:
|
|
73
|
+
if command == 'find_pending':
|
|
74
|
+
row_num = find_pending_row(csv_path)
|
|
75
|
+
if row_num:
|
|
76
|
+
print(row_num)
|
|
77
|
+
sys.exit(0)
|
|
78
|
+
else:
|
|
79
|
+
sys.exit(1)
|
|
80
|
+
|
|
81
|
+
elif command == 'get_row':
|
|
82
|
+
if len(sys.argv) < 4:
|
|
83
|
+
print("Usage: csv_helper.py get_row <csv_path> <row_num>", file=sys.stderr)
|
|
84
|
+
sys.exit(1)
|
|
85
|
+
row_num = int(sys.argv[3])
|
|
86
|
+
data = get_row_data(csv_path, row_num)
|
|
87
|
+
if data:
|
|
88
|
+
# Output as shell variable assignments
|
|
89
|
+
for key, value in data.items():
|
|
90
|
+
# Escape special characters for shell
|
|
91
|
+
value = value.replace('\\', '\\\\')
|
|
92
|
+
value = value.replace('"', '\\"')
|
|
93
|
+
value = value.replace('$', '\\$')
|
|
94
|
+
value = value.replace('`', '\\`')
|
|
95
|
+
print(f'{key}="{value}"')
|
|
96
|
+
sys.exit(0)
|
|
97
|
+
else:
|
|
98
|
+
sys.exit(1)
|
|
99
|
+
|
|
100
|
+
elif command == 'update_row':
|
|
101
|
+
if len(sys.argv) < 6:
|
|
102
|
+
print("Usage: csv_helper.py update_row <csv_path> <row_num> <performance> <status>", file=sys.stderr)
|
|
103
|
+
sys.exit(1)
|
|
104
|
+
row_num = int(sys.argv[3])
|
|
105
|
+
performance = sys.argv[4]
|
|
106
|
+
status = sys.argv[5]
|
|
107
|
+
update_row(csv_path, row_num, performance, status)
|
|
108
|
+
sys.exit(0)
|
|
109
|
+
|
|
110
|
+
else:
|
|
111
|
+
print(f"Unknown command: {command}", file=sys.stderr)
|
|
112
|
+
sys.exit(1)
|
|
113
|
+
|
|
114
|
+
except Exception as e:
|
|
115
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
116
|
+
sys.exit(1)
|