claude-evolve 1.3.40 → 1.3.42

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,6 +18,59 @@ cleanup_temp() {
18
18
  # Set trap to clean up temp files on exit
19
19
  trap cleanup_temp EXIT INT TERM
20
20
 
21
+ # Function to handle failures with retry logic
22
+ handle_failure() {
23
+ local candidate_id="$1"
24
+ local current_status="$2"
25
+ local performance="${3:-0}"
26
+
27
+ # If this is already a retry, increment the retry count
28
+ if [[ $current_status =~ ^failed-retry([0-9]+)$ ]]; then
29
+ local retry_num=${BASH_REMATCH[1]}
30
+ local new_retry_num=$((retry_num + 1))
31
+
32
+ if [[ $new_retry_num -le $MAX_RETRIES ]]; then
33
+ local new_status="failed-retry${new_retry_num}"
34
+ update_csv_row_with_lock "$candidate_id" "status" "$new_status"
35
+ update_csv_row_with_lock "$candidate_id" "performance" "$performance"
36
+ echo "[WORKER-$$] ✗ Retry $retry_num failed, marked as $new_status"
37
+ exit 1
38
+ else
39
+ update_csv_row_with_lock "$candidate_id" "status" "failed"
40
+ update_csv_row_with_lock "$candidate_id" "performance" "$performance"
41
+ echo "[WORKER-$$] ✗ Max retries ($MAX_RETRIES) exceeded, marking as permanently failed"
42
+ exit 1
43
+ fi
44
+ elif [[ $current_status == "failed" ]]; then
45
+ # Initial failure, convert to retry1 if retries are enabled
46
+ if [[ $MAX_RETRIES -gt 0 ]]; then
47
+ update_csv_row_with_lock "$candidate_id" "status" "failed-retry1"
48
+ update_csv_row_with_lock "$candidate_id" "performance" "$performance"
49
+ echo "[WORKER-$$] ✗ Initial failure, marked as failed-retry1 for retry"
50
+ exit 1
51
+ else
52
+ update_csv_row_with_lock "$candidate_id" "status" "failed"
53
+ update_csv_row_with_lock "$candidate_id" "performance" "$performance"
54
+ echo "[WORKER-$$] ✗ Failed (retries disabled)"
55
+ # Use exit code 1 - systemic issue since retries are disabled
56
+ exit 1
57
+ fi
58
+ else
59
+ # Not a failure scenario, convert to retry1 if retries enabled
60
+ if [[ $MAX_RETRIES -gt 0 ]]; then
61
+ update_csv_row_with_lock "$candidate_id" "status" "failed-retry1"
62
+ update_csv_row_with_lock "$candidate_id" "performance" "$performance"
63
+ echo "[WORKER-$$] ✗ Evaluation failed, marked as failed-retry1 for retry"
64
+ exit 1
65
+ else
66
+ update_csv_row_with_lock "$candidate_id" "status" "failed"
67
+ update_csv_row_with_lock "$candidate_id" "performance" "$performance"
68
+ echo "[WORKER-$$] ✗ Evaluation failed (retries disabled)"
69
+ exit 1
70
+ fi
71
+ fi
72
+ }
73
+
21
74
  # Load configuration
22
75
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
23
76
  # shellcheck source=../lib/config.sh
@@ -59,14 +112,25 @@ done
59
112
 
60
113
  # If no ID provided, find next pending
61
114
  if [[ -z $candidate_id ]]; then
62
- candidate_id=$(find_next_pending_with_lock)
63
- if [[ -z $candidate_id ]]; then
115
+ candidate_result=$(find_next_pending_with_lock)
116
+ if [[ -z $candidate_result ]]; then
64
117
  echo "[DEBUG] No pending candidates found" >&2
65
118
  exit 0
66
119
  fi
120
+
121
+ # Parse candidate_id|original_status format
122
+ if [[ $candidate_result == *"|"* ]]; then
123
+ candidate_id="${candidate_result%|*}" # Everything before |
124
+ original_candidate_status="${candidate_result#*|}" # Everything after |
125
+ else
126
+ # Fallback for old format (shouldn't happen)
127
+ candidate_id="$candidate_result"
128
+ original_candidate_status=""
129
+ fi
67
130
  else
68
131
  # Mark specified candidate as running
69
132
  update_csv_row_with_lock "$candidate_id" "status" "running"
133
+ original_candidate_status="" # Unknown for manually specified candidates
70
134
  fi
71
135
 
72
136
  echo "[WORKER-$$] Processing candidate ID: $candidate_id"
@@ -124,6 +188,23 @@ fi
124
188
  echo "[WORKER-$$] Description: $description"
125
189
  echo "[WORKER-$$] Based on ID: $based_on_id"
126
190
 
191
+ # AIDEV-NOTE: Retry logic - detect if this is a retry attempt
192
+ is_retry=false
193
+ retry_count=0
194
+ # Use original_candidate_status for retry detection (if available), otherwise fall back to CSV status
195
+ retry_status="$original_candidate_status"
196
+ if [[ -z "$retry_status" ]]; then
197
+ retry_status="$status"
198
+ fi
199
+
200
+ if [[ $retry_status =~ ^failed-retry([0-9]+)$ ]]; then
201
+ is_retry=true
202
+ retry_count=${BASH_REMATCH[1]}
203
+ echo "[WORKER-$$] 🔄 Processing retry attempt #$retry_count"
204
+ elif [[ $retry_status == "failed" ]]; then
205
+ echo "[WORKER-$$] ⚠️ Initial failure detected - this should be converted to failed-retry1 to enable retries"
206
+ fi
207
+
127
208
  # AIDEV-NOTE: Using common evolution processor logic for consistent handling
128
209
  # Determine parent algorithm
129
210
  if [[ -z $based_on_id || $based_on_id == "0" || $based_on_id == '""' ]]; then
@@ -139,7 +220,7 @@ else
139
220
 
140
221
  if [[ ! -f $parent_file ]]; then
141
222
  echo "[ERROR] Parent algorithm not found: $parent_file" >&2
142
- update_csv_row_with_lock "$candidate_id" "status" "failed"
223
+ handle_failure "$candidate_id" "$retry_status" "0"
143
224
  exit 1
144
225
  fi
145
226
  fi
@@ -155,11 +236,28 @@ fi
155
236
  temp_file="${output_file}.tmp$$"
156
237
 
157
238
  # Check if processing should be skipped using common logic
239
+ # Set environment variable for retry detection
240
+ if [[ $is_retry == "true" ]]; then
241
+ export RETRY_CANDIDATE=true
242
+ else
243
+ export RETRY_CANDIDATE=false
244
+ fi
245
+
158
246
  eval "$("$PYTHON_CMD" "$SCRIPT_DIR/../lib/evolution_processor.py" "$id" "$based_on_id" "$FULL_OUTPUT_DIR" "$ROOT_DIR" "$parent_file" "$output_file")"
159
247
 
160
248
  # Handle copy operation to temp file
161
249
  if [[ "$skip_copy" == "True" ]]; then
162
250
  echo "[WORKER-$$] ⚠️ Skipping copy - $reason"
251
+ elif [[ $is_retry == "true" ]]; then
252
+ # For retries, edit the existing failed algorithm in-place
253
+ if [[ -f "$output_file" ]]; then
254
+ cp "$output_file" "$temp_file"
255
+ echo "[WORKER-$$] 🔄 Copied existing algorithm for retry: $temp_file"
256
+ else
257
+ # Fallback to parent if existing file doesn't exist
258
+ cp "$parent_file" "$temp_file"
259
+ echo "[WORKER-$$] ⚠️ Existing algorithm not found, using parent: $temp_file"
260
+ fi
163
261
  else
164
262
  cp "$parent_file" "$temp_file"
165
263
  echo "[WORKER-$$] Copied parent to temp file: $temp_file"
@@ -178,15 +276,28 @@ else
178
276
  claude_cmd="${CLAUDE_CMD:-claude}"
179
277
  if ! command -v "$claude_cmd" >/dev/null 2>&1; then
180
278
  echo "[ERROR] Claude CLI not found" >&2
181
- update_csv_row_with_lock "$candidate_id" "status" "failed"
279
+ handle_failure "$candidate_id" "$retry_status" "0"
182
280
  exit 1
183
281
  fi
184
282
 
185
283
  CLAUDE_MODEL="sonnet"
186
284
  echo "[WORKER-$$] Using Claude $CLAUDE_MODEL for mutation"
187
285
 
188
- # Create mutation prompt
189
- prompt="Edit the file $temp_file to implement this specific change: $description
286
+ # Create mutation prompt (different for retries vs initial attempts)
287
+ if [[ $is_retry == "true" ]]; then
288
+ prompt="Fix the bugs in the file $temp_file. This algorithm was attempting to implement: $description
289
+
290
+ The algorithm failed during evaluation. Please:
291
+ - Analyze the code for potential bugs (syntax errors, logical issues, missing imports, etc.)
292
+ - Fix any problems you find
293
+ - Ensure the code runs without errors
294
+ - Make sure it still implements the intended change: $description
295
+ - Add appropriate error handling and validation
296
+ - If possible, suggest a simple way to test this fix
297
+
298
+ This is retry attempt #$retry_count. Focus on making the code robust and correct."
299
+ else
300
+ prompt="Edit the file $temp_file to implement this specific change: $description
190
301
 
191
302
  Requirements:
192
303
  - Edit the file directly (don't just provide comments or suggestions)
@@ -196,6 +307,7 @@ Requirements:
196
307
  - Add proper error handling if needed
197
308
 
198
309
  The file currently contains the parent algorithm. Modify it according to the description above."
310
+ fi
199
311
 
200
312
  # Log prompt
201
313
  {
@@ -213,7 +325,9 @@ The file currently contains the parent algorithm. Modify it according to the des
213
325
 
214
326
  # Check for rate limit (multiple possible messages)
215
327
  if echo "$claude_output" | grep -q -E "(usage limit|rate limit|limit reached|too many requests)"; then
216
- echo "[ERROR] Claude API rate limit reached" >&2
328
+ echo "⚠️ Claude API rate limit reached" >&2
329
+ echo "⚠️ Claude output:" >&2
330
+ echo "$claude_output" >&2
217
331
  # Clean up the temp file
218
332
  if [[ -f "$temp_file" ]]; then
219
333
  rm "$temp_file"
@@ -225,30 +339,29 @@ The file currently contains the parent algorithm. Modify it according to the des
225
339
  fi
226
340
 
227
341
  if [[ $claude_exit_code -ne 0 ]]; then
228
- echo "[ERROR] Claude failed to mutate algorithm" >&2
342
+ echo "⚠️ Claude failed to mutate algorithm (exit code: $claude_exit_code)" >&2
343
+ echo "⚠️ Claude output:" >&2
344
+ echo "$claude_output" >&2
229
345
  # Clean up the temp file
230
346
  if [[ -f "$temp_file" ]]; then
231
347
  rm "$temp_file"
232
348
  echo "[WORKER-$$] Cleaned up temp file due to Claude failure" >&2
233
349
  fi
234
- update_csv_row_with_lock "$candidate_id" "status" "failed"
350
+ handle_failure "$candidate_id" "$retry_status" "0"
235
351
  exit 1
236
352
  fi
237
353
 
238
354
  # Verify that Claude actually modified the file
239
355
  if [[ -f "$temp_file" && -f "$parent_file" ]]; then
240
356
  if cmp -s "$temp_file" "$parent_file"; then
241
- echo "" >&2
242
- echo "🚨🚨🚨 RED ALERT: UNCHANGED ALGORITHM DETECTED 🚨🚨🚨" >&2
243
- echo "ERROR: Temp file is IDENTICAL to parent algorithm!" >&2
244
- echo "ERROR: Claude failed to make any changes" >&2
245
- echo "ERROR: Marking as failed - no evaluation will run" >&2
246
- echo "🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨" >&2
247
- echo "" >&2
357
+ echo "⚠️ Unchanged algorithm detected - Claude didn't modify the file" >&2
358
+ echo "⚠️ Description was: $description" >&2
359
+ echo "⚠️ Claude's response:" >&2
360
+ echo "$claude_output" >&2
248
361
 
249
362
  # Clean up temp file and mark as failed
250
363
  rm "$temp_file"
251
- update_csv_row_with_lock "$candidate_id" "status" "failed"
364
+ handle_failure "$candidate_id" "$retry_status" "0"
252
365
  exit 1
253
366
  else
254
367
  # Changes were made - move temp file to final location
@@ -269,28 +382,41 @@ echo "[WORKER-$$] Running evaluation..."
269
382
  eval_output=""
270
383
  eval_exit_code=0
271
384
 
385
+ # Stream evaluator output in real-time while capturing it
386
+ eval_tempfile=$(mktemp)
272
387
  if [[ -n $timeout_seconds ]]; then
273
388
  echo "[WORKER-$$] Evaluation timeout: ${timeout_seconds}s"
274
- # For Modal compatibility, don't capture stderr
275
- if eval_output=$(EXPERIMENT_ID="$id" timeout "$timeout_seconds" "$PYTHON_CMD" "$FULL_EVALUATOR_PATH" "$output_file"); then
276
- eval_exit_code=0
389
+ # Stream output to both log and temp file
390
+ if EXPERIMENT_ID="$id" timeout "$timeout_seconds" "$PYTHON_CMD" "$FULL_EVALUATOR_PATH" "$output_file" 2>&1 | tee "$eval_tempfile" | while IFS= read -r line; do
391
+ echo "[EVALUATOR] $line" >> "$LOGFILE"
392
+ echo "[EVALUATOR] $line" >&2
393
+ done; then
394
+ eval_exit_code=${PIPESTATUS[0]}
277
395
  else
278
396
  eval_exit_code=$?
279
397
  if [[ $eval_exit_code -eq 124 ]]; then
280
398
  echo "[ERROR] Evaluation timed out" >&2
281
399
  update_csv_row_with_lock "$candidate_id" "status" "timeout"
400
+ rm -f "$eval_tempfile"
282
401
  exit 1
283
402
  fi
284
403
  fi
285
404
  else
286
- # For Modal compatibility, don't capture stderr
287
- if eval_output=$(EXPERIMENT_ID="$id" "$PYTHON_CMD" "$FULL_EVALUATOR_PATH" "$output_file"); then
288
- eval_exit_code=0
405
+ # Stream output to both log and temp file
406
+ if EXPERIMENT_ID="$id" "$PYTHON_CMD" "$FULL_EVALUATOR_PATH" "$output_file" 2>&1 | tee "$eval_tempfile" | while IFS= read -r line; do
407
+ echo "[EVALUATOR] $line" >> "$LOGFILE"
408
+ echo "[EVALUATOR] $line" >&2
409
+ done; then
410
+ eval_exit_code=${PIPESTATUS[0]}
289
411
  else
290
412
  eval_exit_code=$?
291
413
  fi
292
414
  fi
293
415
 
416
+ # Read the complete output from temp file
417
+ eval_output=$(cat "$eval_tempfile")
418
+ rm -f "$eval_tempfile"
419
+
294
420
  # Log evaluator output
295
421
  {
296
422
  echo "=== WORKER $$ - EVALUATOR OUTPUT ==="
@@ -305,9 +431,7 @@ if [[ $eval_exit_code -eq 0 ]]; then
305
431
  if [[ $eval_output =~ ^[[:space:]]*-?[0-9]+\.?[0-9]*[[:space:]]*$ ]]; then
306
432
  score=$(echo "$eval_output" | tr -d ' ')
307
433
  if [[ $(echo "$score == 0" | bc -l) == "1" ]]; then
308
- update_csv_row_with_lock "$candidate_id" "status" "failed"
309
- update_csv_row_with_lock "$candidate_id" "performance" "$score"
310
- echo "[WORKER-$$] ✗ Evaluation failed with score 0"
434
+ handle_failure "$candidate_id" "$retry_status" "$score"
311
435
  exit 1
312
436
  else
313
437
  update_csv_row_with_lock "$candidate_id" "performance" "$score"
@@ -347,9 +471,7 @@ if [[ $eval_exit_code -eq 0 ]]; then
347
471
  if score=$(echo "$eval_output" | grep -o '"score"[[:space:]]*:[[:space:]]*[0-9.]*' | cut -d: -f2 | tr -d ' '); then
348
472
  if [[ -n $score ]]; then
349
473
  if [[ $(echo "$score == 0" | bc -l) == "1" ]]; then
350
- update_csv_row_with_lock "$candidate_id" "status" "failed"
351
- update_csv_row_with_lock "$candidate_id" "performance" "$score"
352
- echo "[WORKER-$$] ✗ Evaluation failed with score 0"
474
+ handle_failure "$candidate_id" "$retry_status" "$score"
353
475
  exit 1
354
476
  else
355
477
  update_csv_row_with_lock "$candidate_id" "performance" "$score"
@@ -364,9 +486,7 @@ if [[ $eval_exit_code -eq 0 ]]; then
364
486
  if score=$(echo "$eval_output" | grep -o '"performance"[[:space:]]*:[[:space:]]*[0-9.]*' | cut -d: -f2 | tr -d ' '); then
365
487
  if [[ -n $score ]]; then
366
488
  if [[ $(echo "$score == 0" | bc -l) == "1" ]]; then
367
- update_csv_row_with_lock "$candidate_id" "status" "failed"
368
- update_csv_row_with_lock "$candidate_id" "performance" "$score"
369
- echo "[WORKER-$$] ✗ Evaluation failed with score 0"
489
+ handle_failure "$candidate_id" "$retry_status" "$score"
370
490
  exit 1
371
491
  else
372
492
  update_csv_row_with_lock "$candidate_id" "performance" "$score"
@@ -381,10 +501,10 @@ if [[ $eval_exit_code -eq 0 ]]; then
381
501
  echo "[ERROR] Expected: plain number (e.g., 1.23) or JSON with 'score' or 'performance' field" >&2
382
502
  echo "[ERROR] Actual evaluator output was:" >&2
383
503
  echo "$eval_output" >&2
384
- update_csv_row_with_lock "$candidate_id" "status" "failed"
504
+ handle_failure "$candidate_id" "$retry_status" "0"
385
505
  exit 1
386
506
  else
387
507
  echo "[ERROR] Evaluator failed with exit code $eval_exit_code" >&2
388
- update_csv_row_with_lock "$candidate_id" "status" "failed"
508
+ handle_failure "$candidate_id" "$retry_status" "0"
389
509
  exit 1
390
510
  fi
package/lib/config.sh CHANGED
@@ -45,6 +45,9 @@ DEFAULT_LOCK_TIMEOUT=10
45
45
  # Default auto ideation value
46
46
  DEFAULT_AUTO_IDEATE=true
47
47
 
48
+ # Default retry value
49
+ DEFAULT_MAX_RETRIES=3
50
+
48
51
  # Load configuration from config file
49
52
  load_config() {
50
53
  # Accept config file path as parameter
@@ -76,6 +79,9 @@ load_config() {
76
79
  # Set auto ideation default
77
80
  AUTO_IDEATE="$DEFAULT_AUTO_IDEATE"
78
81
 
82
+ # Set retry default
83
+ MAX_RETRIES="$DEFAULT_MAX_RETRIES"
84
+
79
85
  # Load config if found
80
86
  if [[ -f "$config_file" ]]; then
81
87
  echo "[DEBUG] Loading configuration from: $config_file" >&2
@@ -151,6 +157,7 @@ load_config() {
151
157
  parent_selection) PARENT_SELECTION="$value" ;;
152
158
  python_cmd) PYTHON_CMD="$value" ;;
153
159
  auto_ideate) AUTO_IDEATE="$value" ;;
160
+ max_retries) MAX_RETRIES="$value" ;;
154
161
  evolution_dir)
155
162
  echo "[WARN] evolution_dir in config is ignored - automatically inferred from config file location" >&2
156
163
  ;;
@@ -245,4 +252,5 @@ show_config() {
245
252
  echo " Max workers: $MAX_WORKERS"
246
253
  echo " Lock timeout: $LOCK_TIMEOUT"
247
254
  echo " Auto ideate: $AUTO_IDEATE"
255
+ echo " Max retries: $MAX_RETRIES"
248
256
  }
package/lib/csv-lock.sh CHANGED
@@ -230,29 +230,39 @@ find_next_pending_with_lock() {
230
230
  return 1
231
231
  fi
232
232
 
233
- # Find oldest pending candidate and update to running using Python
233
+ # Find oldest pending candidate (including retries) and update to running using Python
234
234
  local candidate=$("$PYTHON_CMD" -c "
235
235
  import csv
236
236
  import sys
237
+ import re
238
+
239
+ def is_pending_retry(status):
240
+ '''Check if status is pending (empty, pending, or retry status).'''
241
+ if not status or status == 'pending':
242
+ return True
243
+ return status.startswith('failed-retry')
237
244
 
238
245
  # Read CSV
239
246
  with open('$csv_file', 'r') as f:
240
247
  reader = csv.reader(f)
241
248
  rows = list(reader)
242
249
 
243
- # Find first pending candidate
250
+ # Find first pending candidate (including retries)
244
251
  candidate_id = None
252
+ original_status = None
245
253
  for i in range(1, len(rows)):
246
254
  # If row has fewer than 5 fields, it's pending
247
255
  if len(rows[i]) < 5:
248
256
  candidate_id = rows[i][0]
257
+ original_status = '' # Empty status means pending
249
258
  # Ensure row has 5 fields before setting status
250
259
  while len(rows[i]) < 5:
251
260
  rows[i].append('')
252
261
  rows[i][4] = 'running' # Update status
253
262
  break
254
- elif len(rows[i]) >= 5 and (rows[i][4] == 'pending' or rows[i][4] == ''):
263
+ elif len(rows[i]) >= 5 and is_pending_retry(rows[i][4]):
255
264
  candidate_id = rows[i][0]
265
+ original_status = rows[i][4] # Save original status before overwriting
256
266
  rows[i][4] = 'running' # Update status
257
267
  break
258
268
 
@@ -261,7 +271,7 @@ if candidate_id:
261
271
  with open('${csv_file}.tmp', 'w', newline='') as f:
262
272
  writer = csv.writer(f)
263
273
  writer.writerows(rows)
264
- print(candidate_id)
274
+ print(f'{candidate_id}|{original_status}') # Return both ID and original status
265
275
  ")
266
276
 
267
277
  if [ -n "$candidate" ]; then
package/lib/csv_helper.py CHANGED
@@ -8,6 +8,7 @@ import csv
8
8
  import json
9
9
  import sys
10
10
  import os
11
+ import re
11
12
  from typing import Dict, List, Any
12
13
 
13
14
 
@@ -50,6 +51,40 @@ def ensure_columns(headers: list[str], rows: list[list[str]], new_fields: dict)
50
51
  return headers, rows
51
52
 
52
53
 
54
+ def parse_retry_status(status: str) -> tuple[str, int]:
55
+ """Parse retry status and return (base_status, retry_count).
56
+
57
+ Examples:
58
+ 'failed' -> ('failed', 0)
59
+ 'failed-retry1' -> ('failed', 1)
60
+ 'failed-retry3' -> ('failed', 3)
61
+ 'complete' -> ('complete', 0)
62
+ """
63
+ if not status:
64
+ return ('', 0)
65
+
66
+ match = re.match(r'^(.*)-retry(\d+)$', status)
67
+ if match:
68
+ base_status = match.group(1)
69
+ retry_count = int(match.group(2))
70
+ return (base_status, retry_count)
71
+ else:
72
+ return (status, 0)
73
+
74
+
75
+ def is_retry_candidate(status: str) -> bool:
76
+ """Check if a status represents a retry candidate."""
77
+ base_status, _ = parse_retry_status(status)
78
+ return base_status == 'failed' and status.startswith('failed-retry')
79
+
80
+
81
+ def is_pending_retry(status: str) -> bool:
82
+ """Check if status is pending (empty, 'pending', or retry status)."""
83
+ if not status or status == 'pending':
84
+ return True
85
+ return is_retry_candidate(status)
86
+
87
+
53
88
  def update_row_with_fields(headers: list[str], rows: list[list[str]], target_id: str, fields: dict):
54
89
  """Update a specific row with multiple fields."""
55
90
  # Find column indices
@@ -162,9 +197,12 @@ def main():
162
197
  try:
163
198
  headers, rows = read_csv(csv_file)
164
199
 
165
- # Find first row with empty status or status == "pending"
200
+ # Find first row with empty status, "pending", or retry status
166
201
  for i, row in enumerate(rows, start=2): # Start at 2 (1-indexed, skip header)
167
- if len(row) < 5 or row[4] == '' or row[4] == 'pending':
202
+ if len(row) < 5:
203
+ print(i)
204
+ sys.exit(0)
205
+ elif len(row) >= 5 and is_pending_retry(row[4]):
168
206
  print(i)
169
207
  sys.exit(0)
170
208
 
@@ -14,7 +14,8 @@ def should_skip_processing(id_val, based_on_id, parent_file, output_file):
14
14
  """
15
15
  Determine if evolution processing should be skipped.
16
16
 
17
- Simple rule: If file exists, skip everything. This handles all edge cases cleanly.
17
+ Simple rule: If file exists, skip everything UNLESS this is a retry candidate.
18
+ For retry candidates, we want Claude to process the existing file to fix bugs.
18
19
 
19
20
  Returns tuple: (skip_copy, skip_claude, reason)
20
21
  """
@@ -23,9 +24,15 @@ def should_skip_processing(id_val, based_on_id, parent_file, output_file):
23
24
  return True, True, "Baseline algorithm - no processing needed"
24
25
 
25
26
  # File existence check - if file exists, skip both copy and Claude
26
- # This automatically handles self-parent cases and re-runs
27
+ # EXCEPT for retry candidates which need Claude to fix the existing file
27
28
  if os.path.exists(output_file):
28
- return True, True, "File already exists - skipping all processing"
29
+ # Check if this might be a retry candidate by looking for retry status in environment
30
+ # The worker sets RETRY_CANDIDATE=true for retry processing
31
+ retry_env = os.environ.get('RETRY_CANDIDATE')
32
+ if retry_env == 'true':
33
+ return True, False, "Retry candidate - skip copy but run Claude for bug fixing"
34
+ else:
35
+ return True, True, "File already exists - skipping all processing"
29
36
 
30
37
  # File doesn't exist - proceed with copy and Claude
31
38
  return False, False, None
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-evolve",
3
- "version": "1.3.40",
3
+ "version": "1.3.42",
4
4
  "bin": {
5
5
  "claude-evolve": "./bin/claude-evolve",
6
6
  "claude-evolve-main": "./bin/claude-evolve-main",
@@ -42,6 +42,10 @@ python_cmd: "python3"
42
42
  # When true, automatically generate new ideas when no pending candidates remain
43
43
  auto_ideate: true
44
44
 
45
+ # Retry configuration
46
+ # Maximum number of retries for failed candidates before marking as permanently failed
47
+ max_retries: 3
48
+
45
49
  # Parallel execution configuration
46
50
  parallel:
47
51
  # Enable parallel execution of evolution candidates