claude-evolve 1.8.11 → 1.8.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/claude-evolve-ideate +83 -41
- package/package.json +1 -1
package/bin/claude-evolve-ideate
CHANGED
|
@@ -362,6 +362,12 @@ print(max_id + 1)
|
|
|
362
362
|
"
|
|
363
363
|
}
|
|
364
364
|
|
|
365
|
+
# AIDEV-NOTE: This function had a critical race condition bug that caused wrong rows to be updated
|
|
366
|
+
# The bug occurred when parallel processes modified the main CSV between temp CSV creation and append.
|
|
367
|
+
# FIX: Now requires original_main_csv_lines parameter (6th arg) to track the exact line count at copy time.
|
|
368
|
+
# This ensures we always append the correct new rows from temp CSV, regardless of concurrent modifications.
|
|
369
|
+
# Without this fix, the system would update wrong IDs (e.g., claim to add gen81 but update gen80 instead).
|
|
370
|
+
#
|
|
365
371
|
# Validate that AI directly modified the CSV file
|
|
366
372
|
validate_direct_csv_modification() {
|
|
367
373
|
local temp_csv="$1"
|
|
@@ -369,6 +375,7 @@ validate_direct_csv_modification() {
|
|
|
369
375
|
local idea_type="$3"
|
|
370
376
|
local ai_model="${4:-}" # AI model that generated the ideas
|
|
371
377
|
local expected_ids="${5:-}" # Optional: comma or space separated list of expected IDs
|
|
378
|
+
local original_main_csv_lines="${6:-}" # CRITICAL: Line count of main CSV when temp CSV was created
|
|
372
379
|
|
|
373
380
|
# Check if the file was actually modified
|
|
374
381
|
if [[ ! -f "$temp_csv" ]]; then
|
|
@@ -376,32 +383,37 @@ validate_direct_csv_modification() {
|
|
|
376
383
|
return 1
|
|
377
384
|
fi
|
|
378
385
|
|
|
379
|
-
# Get the count before modification from the temp CSV (which was copied from original before AI ran)
|
|
380
|
-
# We need to track this before the AI runs by reading from the beginning state
|
|
381
|
-
# First, get a fresh count from the current main CSV (which reflects any previous operations in this session)
|
|
382
|
-
local current_original_count
|
|
383
|
-
current_original_count=$(grep -v '^[[:space:]]*$' "$FULL_CSV_PATH" | tail -n +2 | wc -l | tr -d '[:space:]')
|
|
384
|
-
|
|
385
386
|
# Count data rows in the modified temp CSV
|
|
386
387
|
local new_count
|
|
387
388
|
new_count=$(grep -v '^[[:space:]]*$' "$temp_csv" | tail -n +2 | wc -l | tr -d '[:space:]')
|
|
388
389
|
|
|
390
|
+
# If original line count wasn't provided, fall back to current main CSV count (old behavior)
|
|
391
|
+
# This preserves backward compatibility but may have race conditions
|
|
392
|
+
if [[ -z "$original_main_csv_lines" ]]; then
|
|
393
|
+
echo "[WARN] No original line count provided - using current main CSV count (may cause race conditions)" >&2
|
|
394
|
+
original_main_csv_lines=$(wc -l < "$FULL_CSV_PATH" | tr -d '[:space:]')
|
|
395
|
+
fi
|
|
396
|
+
|
|
397
|
+
# Calculate how many data rows the temp CSV started with (before stubs were added)
|
|
398
|
+
# This should match the original main CSV line count (including header)
|
|
399
|
+
local original_data_rows=$((original_main_csv_lines - 1)) # Subtract header
|
|
400
|
+
|
|
401
|
+
# Calculate how many rows were actually added to temp CSV
|
|
402
|
+
local added_count=$((new_count - original_data_rows))
|
|
389
403
|
|
|
390
404
|
# Check if AI overwrote the file instead of appending
|
|
391
|
-
if [[ $new_count -lt $
|
|
392
|
-
echo "[ERROR] AI overwrote the CSV file instead of appending ($new_count < $
|
|
405
|
+
if [[ $new_count -lt $original_data_rows ]]; then
|
|
406
|
+
echo "[ERROR] AI overwrote the CSV file instead of appending ($new_count < $original_data_rows)" >&2
|
|
393
407
|
head -10 "$temp_csv" >&2
|
|
394
408
|
return 1
|
|
395
409
|
fi
|
|
396
410
|
|
|
397
411
|
# Check if no changes were made
|
|
398
|
-
if [[ $new_count -eq $
|
|
399
|
-
echo "[ERROR] CSV file wasn't modified - same number of data rows ($new_count = $
|
|
412
|
+
if [[ $new_count -eq $original_data_rows ]]; then
|
|
413
|
+
echo "[ERROR] CSV file wasn't modified - same number of data rows ($new_count = $original_data_rows)" >&2
|
|
400
414
|
head -10 "$temp_csv" >&2
|
|
401
415
|
return 1
|
|
402
416
|
fi
|
|
403
|
-
|
|
404
|
-
local added_count=$((new_count - current_original_count))
|
|
405
417
|
if [[ $added_count -ne $expected_count ]]; then
|
|
406
418
|
echo "[ERROR] Expected to add $expected_count ideas but only added $added_count" >&2
|
|
407
419
|
echo "[ERROR] Ideation failed - rejecting partial results to prevent endless loops" >&2
|
|
@@ -434,43 +446,47 @@ validate_direct_csv_modification() {
|
|
|
434
446
|
|
|
435
447
|
# Use proper locking to safely update the CSV
|
|
436
448
|
echo "[INFO] Acquiring CSV lock to apply changes..."
|
|
437
|
-
|
|
449
|
+
|
|
438
450
|
# Set the lockfile path
|
|
439
451
|
CSV_LOCKFILE="$FULL_EVOLUTION_DIR/.evolution.csv.lock"
|
|
440
|
-
|
|
452
|
+
|
|
441
453
|
if ! acquire_csv_lock; then
|
|
442
454
|
echo "[ERROR] Failed to acquire CSV lock for update" >&2
|
|
443
455
|
rm -f "$temp_csv"
|
|
444
456
|
return 1
|
|
445
457
|
fi
|
|
446
|
-
|
|
447
|
-
#
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
tail -n +$((
|
|
452
|
-
|
|
458
|
+
|
|
459
|
+
# CRITICAL FIX: Use the original line count (when temp CSV was created) to determine which lines to append
|
|
460
|
+
# This prevents race conditions where other processes modify the main CSV between temp CSV creation and append
|
|
461
|
+
# Append only the NEW lines from temp CSV (those added after the original content)
|
|
462
|
+
echo "[DEBUG] Appending last $added_count rows from temp CSV (from line $((original_main_csv_lines + 1)) onwards)" >&2
|
|
463
|
+
tail -n +$((original_main_csv_lines + 1)) "$temp_csv" >> "$FULL_CSV_PATH"
|
|
464
|
+
|
|
465
|
+
# Get the IDs that were actually added by reading them from temp CSV (not main CSV)
|
|
466
|
+
# This avoids race conditions where other processes add rows to main CSV
|
|
467
|
+
local new_ids
|
|
468
|
+
new_ids=$(tail -n $added_count "$temp_csv" | grep -v "^id," | cut -d',' -f1 | tr -d '"')
|
|
469
|
+
echo "[DEBUG] IDs being added: $new_ids" >&2
|
|
470
|
+
|
|
453
471
|
# Clean up temp file
|
|
454
472
|
rm -f "$temp_csv"
|
|
455
|
-
|
|
473
|
+
|
|
456
474
|
# Update idea-LLM field for newly added rows if model is known
|
|
457
475
|
if [[ -n "$ai_model" ]]; then
|
|
458
476
|
echo "[INFO] Recording that $ai_model generated the ideas" >&2
|
|
459
|
-
|
|
460
|
-
local new_ids
|
|
461
|
-
new_ids=$(tail -n $added_count "$FULL_CSV_PATH" | grep -v "^id," | cut -d',' -f1 | tr -d '"')
|
|
462
|
-
|
|
477
|
+
|
|
463
478
|
# Update each new row with the model that generated it
|
|
464
479
|
for id in $new_ids; do
|
|
465
480
|
if [[ -n "$id" && "$id" != "id" ]]; then
|
|
481
|
+
echo "[DEBUG] Updating field for $id" >&2
|
|
466
482
|
"$PYTHON_CMD" "$SCRIPT_DIR/../lib/evolution_csv.py" "$FULL_CSV_PATH" field "$id" "idea-LLM" "$ai_model" || echo "[WARN] Failed to update $id" >&2
|
|
467
483
|
fi
|
|
468
484
|
done
|
|
469
485
|
fi
|
|
470
|
-
|
|
486
|
+
|
|
471
487
|
# Release the lock
|
|
472
488
|
release_csv_lock
|
|
473
|
-
|
|
489
|
+
|
|
474
490
|
echo "[INFO] Successfully added $added_count $idea_type ideas to CSV"
|
|
475
491
|
return 0
|
|
476
492
|
}
|
|
@@ -1001,6 +1017,12 @@ generate_novel_ideas_direct() {
|
|
|
1001
1017
|
local temp_csv="$FULL_EVOLUTION_DIR/temp-csv-$$.csv"
|
|
1002
1018
|
cp "$FULL_CSV_PATH" "$temp_csv"
|
|
1003
1019
|
|
|
1020
|
+
# CRITICAL: Capture the original line count immediately after copying
|
|
1021
|
+
# This is needed to correctly append rows later, preventing race conditions
|
|
1022
|
+
local original_csv_lines
|
|
1023
|
+
original_csv_lines=$(wc -l < "$temp_csv" | tr -d '[:space:]')
|
|
1024
|
+
echo "[DEBUG] Original CSV has $original_csv_lines lines (including header)" >&2
|
|
1025
|
+
|
|
1004
1026
|
# Pre-populate the CSV with stub rows containing the correct IDs
|
|
1005
1027
|
# This ensures the AI can't possibly use wrong IDs - it just fills in descriptions
|
|
1006
1028
|
echo "[INFO] Pre-populating CSV with stub rows: $required_ids_str"
|
|
@@ -1098,10 +1120,10 @@ CRITICAL: Do NOT use any git commands (git add, git commit, git reset, etc.). On
|
|
|
1098
1120
|
|
|
1099
1121
|
# Restore working directory
|
|
1100
1122
|
cd "$original_pwd"
|
|
1101
|
-
|
|
1102
1123
|
|
|
1103
1124
|
# Validate that the CSV file was actually modified with correct IDs
|
|
1104
|
-
|
|
1125
|
+
# Pass original_csv_lines to prevent race conditions
|
|
1126
|
+
if ! validate_direct_csv_modification "$temp_csv" "$count" "novel" "$ai_response" "$required_ids_str" "$original_csv_lines"; then
|
|
1105
1127
|
rm -f "$temp_csv"
|
|
1106
1128
|
return 1
|
|
1107
1129
|
fi
|
|
@@ -1135,6 +1157,11 @@ generate_hill_climbing_direct() {
|
|
|
1135
1157
|
local temp_csv="$FULL_EVOLUTION_DIR/temp-csv-$$.csv"
|
|
1136
1158
|
cp "$FULL_CSV_PATH" "$temp_csv"
|
|
1137
1159
|
|
|
1160
|
+
# CRITICAL: Capture the original line count immediately after copying
|
|
1161
|
+
local original_csv_lines
|
|
1162
|
+
original_csv_lines=$(wc -l < "$temp_csv" | tr -d '[:space:]')
|
|
1163
|
+
echo "[DEBUG] Original CSV has $original_csv_lines lines (including header)" >&2
|
|
1164
|
+
|
|
1138
1165
|
# Extract just the IDs from top performers for clarity (needed before pre-populating)
|
|
1139
1166
|
local valid_parent_ids
|
|
1140
1167
|
valid_parent_ids=$(echo "$top_performers" | cut -d',' -f1 | paste -sd ',' -)
|
|
@@ -1230,10 +1257,10 @@ CRITICAL INSTRUCTIONS:
|
|
|
1230
1257
|
|
|
1231
1258
|
# Restore working directory
|
|
1232
1259
|
cd "$original_pwd"
|
|
1233
|
-
|
|
1234
1260
|
|
|
1235
1261
|
# Validate that the CSV file was actually modified with correct IDs
|
|
1236
|
-
|
|
1262
|
+
# Pass original_csv_lines to prevent race conditions
|
|
1263
|
+
if ! validate_direct_csv_modification "$temp_csv" "$count" "hill-climbing" "$ai_response" "$required_ids_str" "$original_csv_lines"; then
|
|
1237
1264
|
rm -f "$temp_csv"
|
|
1238
1265
|
return 1
|
|
1239
1266
|
fi
|
|
@@ -1267,6 +1294,11 @@ generate_structural_mutation_direct() {
|
|
|
1267
1294
|
local temp_csv="$FULL_EVOLUTION_DIR/temp-csv-$$.csv"
|
|
1268
1295
|
cp "$FULL_CSV_PATH" "$temp_csv"
|
|
1269
1296
|
|
|
1297
|
+
# CRITICAL: Capture the original line count immediately after copying
|
|
1298
|
+
local original_csv_lines
|
|
1299
|
+
original_csv_lines=$(wc -l < "$temp_csv" | tr -d '[:space:]')
|
|
1300
|
+
echo "[DEBUG] Original CSV has $original_csv_lines lines (including header)" >&2
|
|
1301
|
+
|
|
1270
1302
|
# Extract just the IDs from top performers for clarity (needed before pre-populating)
|
|
1271
1303
|
local valid_parent_ids
|
|
1272
1304
|
valid_parent_ids=$(echo "$top_performers" | cut -d',' -f1 | paste -sd ',' -)
|
|
@@ -1352,10 +1384,10 @@ CRITICAL INSTRUCTIONS:
|
|
|
1352
1384
|
|
|
1353
1385
|
# Restore working directory
|
|
1354
1386
|
cd "$original_pwd"
|
|
1355
|
-
|
|
1356
1387
|
|
|
1357
1388
|
# Validate that the CSV file was actually modified with correct IDs
|
|
1358
|
-
|
|
1389
|
+
# Pass original_csv_lines to prevent race conditions
|
|
1390
|
+
if ! validate_direct_csv_modification "$temp_csv" "$count" "structural" "$ai_response" "$required_ids_str" "$original_csv_lines"; then
|
|
1359
1391
|
rm -f "$temp_csv"
|
|
1360
1392
|
return 1
|
|
1361
1393
|
fi
|
|
@@ -1389,6 +1421,11 @@ generate_crossover_direct() {
|
|
|
1389
1421
|
local temp_csv="$FULL_EVOLUTION_DIR/temp-csv-$$.csv"
|
|
1390
1422
|
cp "$FULL_CSV_PATH" "$temp_csv"
|
|
1391
1423
|
|
|
1424
|
+
# CRITICAL: Capture the original line count immediately after copying
|
|
1425
|
+
local original_csv_lines
|
|
1426
|
+
original_csv_lines=$(wc -l < "$temp_csv" | tr -d '[:space:]')
|
|
1427
|
+
echo "[DEBUG] Original CSV has $original_csv_lines lines (including header)" >&2
|
|
1428
|
+
|
|
1392
1429
|
# Extract just the IDs from top performers for clarity (needed before pre-populating)
|
|
1393
1430
|
local valid_parent_ids
|
|
1394
1431
|
valid_parent_ids=$(echo "$top_performers" | cut -d',' -f1 | paste -sd ',' -)
|
|
@@ -1474,10 +1511,10 @@ CRITICAL INSTRUCTIONS:
|
|
|
1474
1511
|
|
|
1475
1512
|
# Restore working directory
|
|
1476
1513
|
cd "$original_pwd"
|
|
1477
|
-
|
|
1478
1514
|
|
|
1479
1515
|
# Validate that the CSV file was actually modified with correct IDs
|
|
1480
|
-
|
|
1516
|
+
# Pass original_csv_lines to prevent race conditions
|
|
1517
|
+
if ! validate_direct_csv_modification "$temp_csv" "$count" "crossover" "$ai_response" "$required_ids_str" "$original_csv_lines"; then
|
|
1481
1518
|
rm -f "$temp_csv"
|
|
1482
1519
|
return 1
|
|
1483
1520
|
fi
|
|
@@ -1496,7 +1533,12 @@ ideate_ai_legacy() {
|
|
|
1496
1533
|
# Create temporary CSV copy in evolution directory (so AI can access it)
|
|
1497
1534
|
local temp_csv="$FULL_EVOLUTION_DIR/temp-csv-$$.csv"
|
|
1498
1535
|
cp "$FULL_CSV_PATH" "$temp_csv"
|
|
1499
|
-
|
|
1536
|
+
|
|
1537
|
+
# CRITICAL: Capture the original line count immediately after copying
|
|
1538
|
+
local original_csv_lines
|
|
1539
|
+
original_csv_lines=$(wc -l < "$temp_csv" | tr -d '[:space:]')
|
|
1540
|
+
echo "[DEBUG] Original CSV has $original_csv_lines lines (including header)" >&2
|
|
1541
|
+
|
|
1500
1542
|
echo "[INFO] Generating $TOTAL_IDEAS ideas (legacy mode)..."
|
|
1501
1543
|
|
|
1502
1544
|
# Get top performers for context
|
|
@@ -1580,14 +1622,14 @@ CRITICAL: Do NOT use any git commands (git add, git commit, git reset, etc.). On
|
|
|
1580
1622
|
|
|
1581
1623
|
# Restore working directory
|
|
1582
1624
|
cd "$original_pwd"
|
|
1583
|
-
|
|
1584
|
-
|
|
1625
|
+
|
|
1585
1626
|
# Validate that the CSV file was actually modified
|
|
1586
|
-
|
|
1627
|
+
# Pass original_csv_lines to prevent race conditions
|
|
1628
|
+
if ! validate_direct_csv_modification "$temp_csv" "$TOTAL_IDEAS" "mixed" "$ai_response" "" "$original_csv_lines"; then
|
|
1587
1629
|
rm -f "$temp_csv"
|
|
1588
1630
|
return 1
|
|
1589
1631
|
fi
|
|
1590
|
-
|
|
1632
|
+
|
|
1591
1633
|
echo "[INFO] Legacy ideas generated"
|
|
1592
1634
|
return 0
|
|
1593
1635
|
}
|