claude-evolve 1.7.13 → 1.7.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/claude-evolve-ideate +159 -43
- package/lib/ai-cli.sh +3 -2
- package/package.json +1 -1
package/bin/claude-evolve-ideate
CHANGED
|
@@ -706,7 +706,7 @@ get_next_id() {
|
|
|
706
706
|
echo "gen${generation}-001"
|
|
707
707
|
return
|
|
708
708
|
fi
|
|
709
|
-
|
|
709
|
+
|
|
710
710
|
# Use Python for proper CSV parsing
|
|
711
711
|
local max_id
|
|
712
712
|
max_id=$("$PYTHON_CMD" -c "
|
|
@@ -726,11 +726,53 @@ with open('$FULL_CSV_PATH', 'r') as f:
|
|
|
726
726
|
max_id = max(max_id, id_num)
|
|
727
727
|
print(max_id)
|
|
728
728
|
")
|
|
729
|
-
|
|
729
|
+
|
|
730
730
|
# Format next ID with generation and 3-digit number
|
|
731
731
|
printf "gen%s-%03d" "$generation" $((max_id + 1))
|
|
732
732
|
}
|
|
733
733
|
|
|
734
|
+
# Get the next N available IDs for current generation as a comma-separated list
|
|
735
|
+
get_next_ids() {
|
|
736
|
+
local generation="$1"
|
|
737
|
+
local count="$2"
|
|
738
|
+
|
|
739
|
+
# Get the starting ID number
|
|
740
|
+
local start_id
|
|
741
|
+
if [[ ! -f "$FULL_CSV_PATH" ]]; then
|
|
742
|
+
start_id=1
|
|
743
|
+
else
|
|
744
|
+
# Use Python for proper CSV parsing
|
|
745
|
+
start_id=$("$PYTHON_CMD" -c "
|
|
746
|
+
import csv
|
|
747
|
+
import re
|
|
748
|
+
max_id = 0
|
|
749
|
+
pattern = re.compile(r'^gen${generation}-(\d+)$')
|
|
750
|
+
with open('$FULL_CSV_PATH', 'r') as f:
|
|
751
|
+
reader = csv.reader(f)
|
|
752
|
+
next(reader, None) # Skip header
|
|
753
|
+
for row in reader:
|
|
754
|
+
if row and len(row) > 0:
|
|
755
|
+
id_field = row[0].strip()
|
|
756
|
+
match = pattern.match(id_field)
|
|
757
|
+
if match:
|
|
758
|
+
id_num = int(match.group(1))
|
|
759
|
+
max_id = max(max_id, id_num)
|
|
760
|
+
print(max_id + 1)
|
|
761
|
+
")
|
|
762
|
+
fi
|
|
763
|
+
|
|
764
|
+
# Generate the list of IDs
|
|
765
|
+
local ids=()
|
|
766
|
+
for ((i=0; i<count; i++)); do
|
|
767
|
+
local id_num=$((start_id + i))
|
|
768
|
+
ids+=("$(printf "gen%s-%03d" "$generation" "$id_num")")
|
|
769
|
+
done
|
|
770
|
+
|
|
771
|
+
# Join with commas
|
|
772
|
+
local IFS=','
|
|
773
|
+
echo "${ids[*]}"
|
|
774
|
+
}
|
|
775
|
+
|
|
734
776
|
|
|
735
777
|
# Get top performers for parent selection (absolute + top novel candidates)
|
|
736
778
|
get_top_performers() {
|
|
@@ -884,20 +926,35 @@ ideate_ai_strategies() {
|
|
|
884
926
|
# Generate novel exploration ideas using direct CSV modification
|
|
885
927
|
generate_novel_ideas_direct() {
|
|
886
928
|
local count="$1"
|
|
887
|
-
|
|
929
|
+
|
|
930
|
+
# Get the next available ID BEFORE creating temp CSV
|
|
931
|
+
# This ensures each strategy gets unique IDs even in parallel runs
|
|
932
|
+
local next_id
|
|
933
|
+
next_id=$(get_next_id "$CURRENT_GENERATION")
|
|
934
|
+
echo "[INFO] Next available ID for novel ideas: $next_id" >&2
|
|
935
|
+
|
|
936
|
+
# Generate the list of IDs this strategy should use
|
|
937
|
+
local next_id_num
|
|
938
|
+
next_id_num=$(echo "$next_id" | grep -o '[0-9]*$')
|
|
939
|
+
local required_ids=()
|
|
940
|
+
for ((i=0; i<count; i++)); do
|
|
941
|
+
required_ids+=("$(printf "gen%s-%03d" "$CURRENT_GENERATION" $((next_id_num + i)))")
|
|
942
|
+
done
|
|
943
|
+
local required_ids_str="${required_ids[*]}"
|
|
944
|
+
|
|
888
945
|
# Create temporary CSV copy in evolution directory (so AI can access it)
|
|
889
946
|
local temp_csv="$FULL_EVOLUTION_DIR/temp-csv-$$.csv"
|
|
890
947
|
cp "$FULL_CSV_PATH" "$temp_csv"
|
|
891
|
-
|
|
892
|
-
echo "[INFO] Generating $count novel exploration ideas
|
|
948
|
+
|
|
949
|
+
echo "[INFO] Generating $count novel exploration ideas with IDs: $required_ids_str"
|
|
893
950
|
local data_rows=$(grep -v '^[[:space:]]*$' "$FULL_CSV_PATH" | tail -n +2 | wc -l)
|
|
894
|
-
|
|
951
|
+
|
|
895
952
|
# Use relative paths and change to evolution directory so AI can access files
|
|
896
953
|
local temp_csv_basename=$(basename "$temp_csv")
|
|
897
|
-
|
|
954
|
+
|
|
898
955
|
# Get existing Python files for this generation to avoid ID collisions
|
|
899
956
|
local existing_py_files=$(get_existing_py_files_for_generation "$CURRENT_GENERATION")
|
|
900
|
-
|
|
957
|
+
|
|
901
958
|
local prompt="I need you to use your file editing capabilities to APPEND exactly $count novel algorithmic ideas to the CSV file: $temp_csv_basename
|
|
902
959
|
|
|
903
960
|
Current evolution context:
|
|
@@ -907,21 +964,23 @@ Current evolution context:
|
|
|
907
964
|
|
|
908
965
|
IMPORTANT: DO NOT read algorithm.py or any evolution_*.py files. Focus on creative ideation based on the brief and CSV context only. Reading code files wastes tokens and time.
|
|
909
966
|
|
|
967
|
+
CRITICAL ID ASSIGNMENT:
|
|
968
|
+
You MUST use EXACTLY these IDs in order: $required_ids_str
|
|
969
|
+
These IDs have been pre-calculated to avoid collisions with parallel evolution runs.
|
|
970
|
+
DO NOT try to find the next ID yourself - use the IDs provided above.
|
|
971
|
+
|
|
910
972
|
CRITICAL INSTRUCTIONS:
|
|
911
973
|
1. Use the Read tool to examine the current CSV file
|
|
912
974
|
IMPORTANT: If the CSV file is large (>200 lines), read it in chunks using the offset and limit parameters to avoid context overload
|
|
913
975
|
Example: Read(file_path='temp-csv-123.csv', offset=0, limit=100) then Read(offset=100, limit=100), etc.
|
|
914
976
|
2. DO NOT DELETE OR REPLACE ANY EXISTING ROWS - YOU MUST PRESERVE ALL EXISTING DATA
|
|
915
|
-
3.
|
|
916
|
-
4.
|
|
917
|
-
|
|
977
|
+
3. Use EXACTLY the IDs specified above: $required_ids_str
|
|
978
|
+
4. DO NOT calculate or infer IDs - use the exact IDs provided"
|
|
979
|
+
|
|
918
980
|
if [[ -n "$existing_py_files" ]]; then
|
|
919
981
|
prompt+="
|
|
920
|
-
5. IMPORTANT: The following IDs already have Python files
|
|
921
|
-
|
|
922
|
-
else
|
|
923
|
-
prompt+="
|
|
924
|
-
5. No existing Python files found for generation $CURRENT_GENERATION"
|
|
982
|
+
5. IMPORTANT: The following IDs already have Python files: $existing_py_files
|
|
983
|
+
(This is informational only - use the IDs specified above)"
|
|
925
984
|
fi
|
|
926
985
|
|
|
927
986
|
prompt+="
|
|
@@ -980,24 +1039,38 @@ CRITICAL: Do NOT use any git commands (git add, git commit, git reset, etc.). On
|
|
|
980
1039
|
generate_hill_climbing_direct() {
|
|
981
1040
|
local count="$1"
|
|
982
1041
|
local top_performers="$2"
|
|
983
|
-
|
|
1042
|
+
|
|
1043
|
+
# Get the next available ID BEFORE creating temp CSV
|
|
1044
|
+
local next_id
|
|
1045
|
+
next_id=$(get_next_id "$CURRENT_GENERATION")
|
|
1046
|
+
echo "[INFO] Next available ID for hill climbing: $next_id" >&2
|
|
1047
|
+
|
|
1048
|
+
# Generate the list of IDs this strategy should use
|
|
1049
|
+
local next_id_num
|
|
1050
|
+
next_id_num=$(echo "$next_id" | grep -o '[0-9]*$')
|
|
1051
|
+
local required_ids=()
|
|
1052
|
+
for ((i=0; i<count; i++)); do
|
|
1053
|
+
required_ids+=("$(printf "gen%s-%03d" "$CURRENT_GENERATION" $((next_id_num + i)))")
|
|
1054
|
+
done
|
|
1055
|
+
local required_ids_str="${required_ids[*]}"
|
|
1056
|
+
|
|
984
1057
|
# Create temporary CSV copy in evolution directory (so AI can access it)
|
|
985
1058
|
local temp_csv="$FULL_EVOLUTION_DIR/temp-csv-$$.csv"
|
|
986
1059
|
cp "$FULL_CSV_PATH" "$temp_csv"
|
|
987
|
-
|
|
988
|
-
echo "[INFO] Generating $count hill climbing ideas
|
|
1060
|
+
|
|
1061
|
+
echo "[INFO] Generating $count hill climbing ideas with IDs: $required_ids_str"
|
|
989
1062
|
local data_rows=$(grep -v '^[[:space:]]*$' "$FULL_CSV_PATH" | tail -n +2 | wc -l)
|
|
990
|
-
|
|
1063
|
+
|
|
991
1064
|
# Get existing Python files for this generation to avoid ID collisions
|
|
992
1065
|
local existing_py_files=$(get_existing_py_files_for_generation "$CURRENT_GENERATION")
|
|
993
|
-
|
|
1066
|
+
|
|
994
1067
|
# Extract just the IDs from top performers for clarity
|
|
995
1068
|
local valid_parent_ids
|
|
996
1069
|
valid_parent_ids=$(echo "$top_performers" | cut -d',' -f1 | paste -sd ',' -)
|
|
997
|
-
|
|
1070
|
+
|
|
998
1071
|
# Use relative paths and change to evolution directory so AI can access files
|
|
999
1072
|
local temp_csv_basename=$(basename "$temp_csv")
|
|
1000
|
-
|
|
1073
|
+
|
|
1001
1074
|
local prompt="I need you to use your file editing capabilities to APPEND exactly $count parameter tuning ideas to the CSV file: $temp_csv_basename
|
|
1002
1075
|
|
|
1003
1076
|
IMPORTANT: You MUST use one of these exact parent IDs: $valid_parent_ids
|
|
@@ -1019,13 +1092,18 @@ If you must read source files:
|
|
|
1019
1092
|
|
|
1020
1093
|
Most of the time, you can infer parameters from descriptions like "RSI with threshold 30" or "MA period 20".
|
|
1021
1094
|
|
|
1095
|
+
CRITICAL ID ASSIGNMENT:
|
|
1096
|
+
You MUST use EXACTLY these IDs in order: $required_ids_str
|
|
1097
|
+
These IDs have been pre-calculated to avoid collisions with parallel evolution runs.
|
|
1098
|
+
DO NOT try to find the next ID yourself - use the IDs provided above.
|
|
1099
|
+
|
|
1022
1100
|
CRITICAL INSTRUCTIONS:
|
|
1023
1101
|
1. Use the Read tool to examine the current CSV file
|
|
1024
1102
|
IMPORTANT: If the CSV file is large (>200 lines), read it in chunks using the offset and limit parameters to avoid context overload
|
|
1025
1103
|
Example: Read(file_path='temp-csv-123.csv', offset=0, limit=100) then Read(offset=100, limit=100), etc.
|
|
1026
1104
|
2. DO NOT DELETE OR REPLACE ANY EXISTING ROWS - YOU MUST PRESERVE ALL EXISTING DATA
|
|
1027
|
-
3.
|
|
1028
|
-
4.
|
|
1105
|
+
3. Use EXACTLY the IDs specified above: $required_ids_str
|
|
1106
|
+
4. DO NOT calculate or infer IDs - use the exact IDs provided
|
|
1029
1107
|
5. Use the Edit or MultiEdit tool to APPEND exactly $count new rows AT THE END of the CSV file
|
|
1030
1108
|
6. For each idea, create a row with: id,parent_id,description,,pending
|
|
1031
1109
|
7. Each parent_id MUST be one of: $valid_parent_ids
|
|
@@ -1077,24 +1155,38 @@ CRITICAL: Do NOT use any git commands (git add, git commit, git reset, etc.). On
|
|
|
1077
1155
|
generate_structural_mutation_direct() {
|
|
1078
1156
|
local count="$1"
|
|
1079
1157
|
local top_performers="$2"
|
|
1080
|
-
|
|
1158
|
+
|
|
1159
|
+
# Get the next available ID BEFORE creating temp CSV
|
|
1160
|
+
local next_id
|
|
1161
|
+
next_id=$(get_next_id "$CURRENT_GENERATION")
|
|
1162
|
+
echo "[INFO] Next available ID for structural mutation: $next_id" >&2
|
|
1163
|
+
|
|
1164
|
+
# Generate the list of IDs this strategy should use
|
|
1165
|
+
local next_id_num
|
|
1166
|
+
next_id_num=$(echo "$next_id" | grep -o '[0-9]*$')
|
|
1167
|
+
local required_ids=()
|
|
1168
|
+
for ((i=0; i<count; i++)); do
|
|
1169
|
+
required_ids+=("$(printf "gen%s-%03d" "$CURRENT_GENERATION" $((next_id_num + i)))")
|
|
1170
|
+
done
|
|
1171
|
+
local required_ids_str="${required_ids[*]}"
|
|
1172
|
+
|
|
1081
1173
|
# Create temporary CSV copy in evolution directory (so AI can access it)
|
|
1082
1174
|
local temp_csv="$FULL_EVOLUTION_DIR/temp-csv-$$.csv"
|
|
1083
1175
|
cp "$FULL_CSV_PATH" "$temp_csv"
|
|
1084
|
-
|
|
1085
|
-
echo "[INFO] Generating $count structural mutation ideas
|
|
1176
|
+
|
|
1177
|
+
echo "[INFO] Generating $count structural mutation ideas with IDs: $required_ids_str"
|
|
1086
1178
|
local data_rows=$(grep -v '^[[:space:]]*$' "$FULL_CSV_PATH" | tail -n +2 | wc -l)
|
|
1087
|
-
|
|
1179
|
+
|
|
1088
1180
|
# Get existing Python files for this generation to avoid ID collisions
|
|
1089
1181
|
local existing_py_files=$(get_existing_py_files_for_generation "$CURRENT_GENERATION")
|
|
1090
|
-
|
|
1182
|
+
|
|
1091
1183
|
# Extract just the IDs from top performers for clarity
|
|
1092
1184
|
local valid_parent_ids
|
|
1093
1185
|
valid_parent_ids=$(echo "$top_performers" | cut -d',' -f1 | paste -sd ',' -)
|
|
1094
|
-
|
|
1186
|
+
|
|
1095
1187
|
# Use relative paths and change to evolution directory so AI can access files
|
|
1096
1188
|
local temp_csv_basename=$(basename "$temp_csv")
|
|
1097
|
-
|
|
1189
|
+
|
|
1098
1190
|
local prompt="I need you to use your file editing capabilities to APPEND exactly $count structural modification ideas to the CSV file: $temp_csv_basename
|
|
1099
1191
|
|
|
1100
1192
|
IMPORTANT: You MUST use one of these exact parent IDs: $valid_parent_ids
|
|
@@ -1108,13 +1200,18 @@ IMPORTANT: DO NOT read evolution_*.py files. Generate structural ideas based ONL
|
|
|
1108
1200
|
- Your knowledge of common algorithmic structures and patterns
|
|
1109
1201
|
Reading code files wastes tokens and time. Focus on high-level architectural ideas based on the descriptions.
|
|
1110
1202
|
|
|
1203
|
+
CRITICAL ID ASSIGNMENT:
|
|
1204
|
+
You MUST use EXACTLY these IDs in order: $required_ids_str
|
|
1205
|
+
These IDs have been pre-calculated to avoid collisions with parallel evolution runs.
|
|
1206
|
+
DO NOT try to find the next ID yourself - use the IDs provided above.
|
|
1207
|
+
|
|
1111
1208
|
CRITICAL INSTRUCTIONS:
|
|
1112
1209
|
1. Use the Read tool to examine the current CSV file
|
|
1113
1210
|
IMPORTANT: If the CSV file is large (>200 lines), read it in chunks using the offset and limit parameters to avoid context overload
|
|
1114
1211
|
Example: Read(file_path='temp-csv-123.csv', offset=0, limit=100) then Read(offset=100, limit=100), etc.
|
|
1115
1212
|
2. DO NOT DELETE OR REPLACE ANY EXISTING ROWS - YOU MUST PRESERVE ALL EXISTING DATA
|
|
1116
|
-
3.
|
|
1117
|
-
4.
|
|
1213
|
+
3. Use EXACTLY the IDs specified above: $required_ids_str
|
|
1214
|
+
4. DO NOT calculate or infer IDs - use the exact IDs provided
|
|
1118
1215
|
5. Use the Edit or MultiEdit tool to APPEND exactly $count new rows AT THE END of the CSV file
|
|
1119
1216
|
6. For each idea, create a row with: id,parent_id,description,,pending
|
|
1120
1217
|
7. Each parent_id MUST be one of: $valid_parent_ids
|
|
@@ -1166,24 +1263,38 @@ CRITICAL: Do NOT use any git commands (git add, git commit, git reset, etc.). On
|
|
|
1166
1263
|
generate_crossover_direct() {
|
|
1167
1264
|
local count="$1"
|
|
1168
1265
|
local top_performers="$2"
|
|
1169
|
-
|
|
1266
|
+
|
|
1267
|
+
# Get the next available ID BEFORE creating temp CSV
|
|
1268
|
+
local next_id
|
|
1269
|
+
next_id=$(get_next_id "$CURRENT_GENERATION")
|
|
1270
|
+
echo "[INFO] Next available ID for crossover: $next_id" >&2
|
|
1271
|
+
|
|
1272
|
+
# Generate the list of IDs this strategy should use
|
|
1273
|
+
local next_id_num
|
|
1274
|
+
next_id_num=$(echo "$next_id" | grep -o '[0-9]*$')
|
|
1275
|
+
local required_ids=()
|
|
1276
|
+
for ((i=0; i<count; i++)); do
|
|
1277
|
+
required_ids+=("$(printf "gen%s-%03d" "$CURRENT_GENERATION" $((next_id_num + i)))")
|
|
1278
|
+
done
|
|
1279
|
+
local required_ids_str="${required_ids[*]}"
|
|
1280
|
+
|
|
1170
1281
|
# Create temporary CSV copy in evolution directory (so AI can access it)
|
|
1171
1282
|
local temp_csv="$FULL_EVOLUTION_DIR/temp-csv-$$.csv"
|
|
1172
1283
|
cp "$FULL_CSV_PATH" "$temp_csv"
|
|
1173
|
-
|
|
1174
|
-
echo "[INFO] Generating $count crossover hybrid ideas
|
|
1284
|
+
|
|
1285
|
+
echo "[INFO] Generating $count crossover hybrid ideas with IDs: $required_ids_str"
|
|
1175
1286
|
local data_rows=$(grep -v '^[[:space:]]*$' "$FULL_CSV_PATH" | tail -n +2 | wc -l)
|
|
1176
|
-
|
|
1287
|
+
|
|
1177
1288
|
# Get existing Python files for this generation to avoid ID collisions
|
|
1178
1289
|
local existing_py_files=$(get_existing_py_files_for_generation "$CURRENT_GENERATION")
|
|
1179
|
-
|
|
1290
|
+
|
|
1180
1291
|
# Extract just the IDs from top performers for clarity
|
|
1181
1292
|
local valid_parent_ids
|
|
1182
1293
|
valid_parent_ids=$(echo "$top_performers" | cut -d',' -f1 | paste -sd ',' -)
|
|
1183
|
-
|
|
1294
|
+
|
|
1184
1295
|
# Use relative paths and change to evolution directory so AI can access files
|
|
1185
1296
|
local temp_csv_basename=$(basename "$temp_csv")
|
|
1186
|
-
|
|
1297
|
+
|
|
1187
1298
|
local prompt="I need you to use your file editing capabilities to APPEND exactly $count hybrid combination ideas to the CSV file: $temp_csv_basename
|
|
1188
1299
|
|
|
1189
1300
|
IMPORTANT: You MUST use ONLY these exact parent IDs: $valid_parent_ids
|
|
@@ -1197,13 +1308,18 @@ IMPORTANT: DO NOT read evolution_*.py files. Generate crossover ideas based ONLY
|
|
|
1197
1308
|
- Your knowledge of how different algorithmic approaches can be combined
|
|
1198
1309
|
Reading code files wastes tokens and time. Focus on combining the described features creatively.
|
|
1199
1310
|
|
|
1311
|
+
CRITICAL ID ASSIGNMENT:
|
|
1312
|
+
You MUST use EXACTLY these IDs in order: $required_ids_str
|
|
1313
|
+
These IDs have been pre-calculated to avoid collisions with parallel evolution runs.
|
|
1314
|
+
DO NOT try to find the next ID yourself - use the IDs provided above.
|
|
1315
|
+
|
|
1200
1316
|
CRITICAL INSTRUCTIONS:
|
|
1201
1317
|
1. Use the Read tool to examine the current CSV file
|
|
1202
1318
|
IMPORTANT: If the CSV file is large (>200 lines), read it in chunks using the offset and limit parameters to avoid context overload
|
|
1203
1319
|
Example: Read(file_path='temp-csv-123.csv', offset=0, limit=100) then Read(offset=100, limit=100), etc.
|
|
1204
1320
|
2. DO NOT DELETE OR REPLACE ANY EXISTING ROWS - YOU MUST PRESERVE ALL EXISTING DATA
|
|
1205
|
-
3.
|
|
1206
|
-
4.
|
|
1321
|
+
3. Use EXACTLY the IDs specified above: $required_ids_str
|
|
1322
|
+
4. DO NOT calculate or infer IDs - use the exact IDs provided
|
|
1207
1323
|
5. Use the Edit or MultiEdit tool to APPEND exactly $count new rows AT THE END of the CSV file
|
|
1208
1324
|
6. For each idea, create a row with: id,parent_id,description,,pending
|
|
1209
1325
|
7. Each parent_id MUST be one of: $valid_parent_ids (choose the primary parent)
|
package/lib/ai-cli.sh
CHANGED
|
@@ -68,7 +68,7 @@ $prompt"
|
|
|
68
68
|
gemini-pro)
|
|
69
69
|
local ai_output
|
|
70
70
|
# Gemini needs longer timeout as it streams output while working (20 minutes)
|
|
71
|
-
ai_output=$(timeout
|
|
71
|
+
ai_output=$(timeout 1800 gemini -y -m gemini-2.5-pro -p "$prompt" 2>&1)
|
|
72
72
|
local ai_exit_code=$?
|
|
73
73
|
;;
|
|
74
74
|
gemini-flash)
|
|
@@ -93,8 +93,9 @@ $prompt"
|
|
|
93
93
|
local ai_exit_code=$?
|
|
94
94
|
;;
|
|
95
95
|
glm-zai)
|
|
96
|
+
# GLM -- can be slow sometimes
|
|
96
97
|
local ai_output
|
|
97
|
-
ai_output=$(timeout
|
|
98
|
+
ai_output=$(timeout 1800 opencode -m zai-coding-plan/glm-4.6 run "$prompt" 2>&1)
|
|
98
99
|
local ai_exit_code=$?
|
|
99
100
|
;;
|
|
100
101
|
deepseek-openrouter)
|