claude-evolve 1.11.8 → 1.11.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,124 @@
1
+ #!/usr/bin/env bash
2
+ # claude-evolve-check - Health check for AI model configurations
3
+ # Tests all configured AI models to verify they're working before starting evolution runs
4
+
5
+ set -e
6
+
7
+ # Colors for output
8
+ GREEN='\033[0;32m'
9
+ YELLOW='\033[0;33m'
10
+ RED='\033[0;31m'
11
+ CYAN='\033[0;36m'
12
+ NC='\033[0m' # No Color
13
+
14
+ # Get script directory
15
+ SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}" 2>/dev/null || echo "${BASH_SOURCE[0]}")")" && pwd)"
16
+ LIB_DIR="$(dirname "$SCRIPT_DIR")/lib"
17
+
18
+ # Source configuration
19
+ source "$LIB_DIR/config.sh"
20
+ load_config
21
+
22
+ # Source AI CLI for model calls
23
+ source "$LIB_DIR/ai-cli.sh"
24
+
25
+ # Test timeout in seconds (short to fail fast)
26
+ TEST_TIMEOUT=30
27
+
28
+ # Simple test prompt
29
+ TEST_PROMPT="Say hello in exactly 3 words."
30
+
31
+ echo -e "${CYAN}🔍 Claude Evolve - AI Model Health Check${NC}"
32
+ echo "============================================"
33
+ echo
34
+
35
+ # Collect all unique models into a simple list
36
+ ALL_MODELS=""
37
+ for model in $LLM_RUN $LLM_RUN_FALLBACK $LLM_IDEATE $LLM_IDEATE_FALLBACK; do
38
+ # Check if model already in list
39
+ if ! echo "$ALL_MODELS" | grep -q -w "$model"; then
40
+ ALL_MODELS="$ALL_MODELS $model"
41
+ fi
42
+ done
43
+ ALL_MODELS=$(echo "$ALL_MODELS" | xargs) # Trim whitespace
44
+
45
+ echo "Models to test: $ALL_MODELS"
46
+ echo
47
+
48
+ # Track results
49
+ PASSED=0
50
+ FAILED=0
51
+ FAILED_MODELS=""
52
+ TIMEOUT_MODELS=""
53
+
54
+ # Test each model
55
+ for model in $ALL_MODELS; do
56
+ echo -n "Testing $model... "
57
+
58
+ # Run the test with timeout
59
+ START_TIME=$(date +%s)
60
+
61
+ # Call the model directly using the configured command
62
+ set +e
63
+ OUTPUT=$(timeout -k 5 $TEST_TIMEOUT bash -c "
64
+ source '$LIB_DIR/config.sh'
65
+ load_config
66
+ source '$LIB_DIR/ai-cli.sh'
67
+ call_ai_model_configured '$model' '$TEST_PROMPT' 2>/dev/null
68
+ " 2>&1)
69
+ EXIT_CODE=$?
70
+ set -e
71
+
72
+ END_TIME=$(date +%s)
73
+ DURATION=$((END_TIME - START_TIME))
74
+
75
+ # Check result
76
+ if [[ $EXIT_CODE -eq 0 ]] && [[ -n "$OUTPUT" ]] && [[ ${#OUTPUT} -gt 10 ]]; then
77
+ echo -e "${GREEN}✓ OK${NC} (${DURATION}s, ${#OUTPUT} chars)"
78
+ ((PASSED++))
79
+ elif [[ $EXIT_CODE -eq 124 ]]; then
80
+ echo -e "${RED}✗ TIMEOUT${NC} (${TEST_TIMEOUT}s - likely permission dialog or hanging)"
81
+ TIMEOUT_MODELS="$TIMEOUT_MODELS $model"
82
+ ((FAILED++))
83
+ else
84
+ echo -e "${RED}✗ FAILED${NC} (exit=$EXIT_CODE, ${#OUTPUT} chars)"
85
+ if [[ -n "$OUTPUT" ]] && [[ ${#OUTPUT} -lt 200 ]]; then
86
+ echo " Output: $OUTPUT"
87
+ fi
88
+ FAILED_MODELS="$FAILED_MODELS $model"
89
+ ((FAILED++))
90
+ fi
91
+ done
92
+
93
+ echo
94
+ echo "============================================"
95
+ echo -e "${CYAN}Summary${NC}"
96
+ echo "============================================"
97
+ echo -e "Passed: ${GREEN}$PASSED${NC}"
98
+ echo -e "Failed: ${RED}$FAILED${NC}"
99
+ echo
100
+
101
+ # Show model lists with status
102
+ echo "Model Configuration:"
103
+ echo -e " LLM_RUN: $LLM_RUN"
104
+ echo -e " LLM_RUN_FALLBACK: $LLM_RUN_FALLBACK"
105
+ echo -e " LLM_IDEATE: $LLM_IDEATE"
106
+ echo -e " LLM_IDEATE_FALLBACK: $LLM_IDEATE_FALLBACK"
107
+ echo
108
+
109
+ # Recommendations
110
+ if [[ $FAILED -gt 0 ]]; then
111
+ echo -e "${YELLOW}⚠️ Recommendations:${NC}"
112
+ for model in $TIMEOUT_MODELS; do
113
+ echo " - $model: Check for permission dialogs (try running interactively first)"
114
+ done
115
+ for model in $FAILED_MODELS; do
116
+ echo " - $model: Check authentication and CLI installation"
117
+ done
118
+ echo
119
+ echo "Consider reordering your model lists to put working models first."
120
+ exit 1
121
+ else
122
+ echo -e "${GREEN}✅ All models are working!${NC}"
123
+ exit 0
124
+ fi
@@ -59,6 +59,7 @@ USAGE:
59
59
 
60
60
  COMMANDS:
61
61
  setup Initialize evolution workspace
62
+ check Test all configured AI models (health check)
62
63
  ideate Generate new algorithm ideas
63
64
  run Execute evolution candidates
64
65
  analyze Analyze evolution results
@@ -95,14 +96,15 @@ show_menu() {
95
96
  echo "What would you like to do?"
96
97
  echo
97
98
  echo " 1) setup - Initialize evolution workspace"
98
- echo " 2) ideate - Generate new algorithm ideas"
99
- echo " 3) run - Execute evolution candidates"
100
- echo " 4) analyze - Analyze evolution results"
101
- echo " 5) edit - Manage candidate statuses by generation"
102
- echo " 6) status - Show evolution progress and current leader"
103
- echo " 7) autostatus - Auto-updating status display (real-time)"
104
- echo " 8) clean-corrupted - Remove corrupted records from CSV"
105
- echo " 9) config - Manage configuration settings"
99
+ echo " 2) check - Test all configured AI models"
100
+ echo " 3) ideate - Generate new algorithm ideas"
101
+ echo " 4) run - Execute evolution candidates"
102
+ echo " 5) analyze - Analyze evolution results"
103
+ echo " 6) edit - Manage candidate statuses by generation"
104
+ echo " 7) status - Show evolution progress and current leader"
105
+ echo " 8) autostatus - Auto-updating status display (real-time)"
106
+ echo " 9) clean-corrupted - Remove corrupted records from CSV"
107
+ echo " c) config - Manage configuration settings"
106
108
  echo " k) killall - Kill all running evolution processes"
107
109
  echo " h) help - Show help message"
108
110
  echo " 0) exit - Exit"
@@ -156,18 +158,19 @@ check_for_updates
156
158
  # Main logic
157
159
  if [[ $# -eq 0 ]]; then
158
160
  show_menu
159
- read -r -p "Enter your choice (1-9, k, h, 0): " choice
161
+ read -r -p "Enter your choice (1-9, c, k, h, 0): " choice
160
162
 
161
163
  case $choice in
162
164
  1) exec "$SCRIPT_DIR/claude-evolve-setup" ;;
163
- 2) exec "$SCRIPT_DIR/claude-evolve-ideate" ;;
164
- 3) exec "$SCRIPT_DIR/claude-evolve-run" ;;
165
- 4) exec "$SCRIPT_DIR/claude-evolve-analyze" ;;
166
- 5) exec "$SCRIPT_DIR/claude-evolve-edit" ;;
167
- 6) exec "$SCRIPT_DIR/claude-evolve-status" ;;
168
- 7) exec "$SCRIPT_DIR/claude-evolve-autostatus" ;;
169
- 8) exec "$SCRIPT_DIR/claude-evolve-clean-corrupted" ;;
170
- 9) exec "$SCRIPT_DIR/claude-evolve-config" ;;
165
+ 2) exec "$SCRIPT_DIR/claude-evolve-check" ;;
166
+ 3) exec "$SCRIPT_DIR/claude-evolve-ideate" ;;
167
+ 4) exec "$SCRIPT_DIR/claude-evolve-run" ;;
168
+ 5) exec "$SCRIPT_DIR/claude-evolve-analyze" ;;
169
+ 6) exec "$SCRIPT_DIR/claude-evolve-edit" ;;
170
+ 7) exec "$SCRIPT_DIR/claude-evolve-status" ;;
171
+ 8) exec "$SCRIPT_DIR/claude-evolve-autostatus" ;;
172
+ 9) exec "$SCRIPT_DIR/claude-evolve-clean-corrupted" ;;
173
+ c|C) exec "$SCRIPT_DIR/claude-evolve-config" ;;
171
174
  k|K) exec "$SCRIPT_DIR/claude-evolve-killall" ;;
172
175
  h|H) show_help ;;
173
176
  0)
@@ -175,7 +178,7 @@ if [[ $# -eq 0 ]]; then
175
178
  exit 0
176
179
  ;;
177
180
  *)
178
- echo -e "${RED}Invalid choice. Please select 1-9, k, h, or 0.${NC}"
181
+ echo -e "${RED}Invalid choice. Please select 1-9, c, k, h, or 0.${NC}"
179
182
  exit 1
180
183
  ;;
181
184
  esac
@@ -192,6 +195,10 @@ setup)
192
195
  shift
193
196
  exec "$SCRIPT_DIR/claude-evolve-setup" "$@"
194
197
  ;;
198
+ check)
199
+ shift
200
+ exec "$SCRIPT_DIR/claude-evolve-check" "$@"
201
+ ;;
195
202
  ideate)
196
203
  shift
197
204
  exec "$SCRIPT_DIR/claude-evolve-ideate" "$@"
package/lib/ai-cli.sh CHANGED
@@ -151,12 +151,26 @@ $prompt"
151
151
  ai_output=$(timeout -k 30 600 opencode -m openrouter/z-ai/glm-4.7 run "$prompt" 2>&1)
152
152
  local ai_exit_code=$?
153
153
  ;;
154
+ glm-5)
155
+ local ai_output
156
+ # GLM-5: 744B MoE model, very cheap ($0.80/$2.56 per 1M tokens), 200K context
157
+ # Released Feb 2026 - scores 77.8% SWE-bench, MIT license
158
+ ai_output=$(timeout -k 30 600 opencode -m openrouter/z-ai/glm-5 run "$prompt" 2>&1)
159
+ local ai_exit_code=$?
160
+ ;;
154
161
  glm-zai)
155
- # GLM -- can be slow sometimes
162
+ # GLM 4.7 via Z.AI agentic mode -- can be slow sometimes
156
163
  local ai_output
157
164
  ai_output=$(timeout -k 30 1800 opencode -m zai-coding-plan/glm-4.7 run "$prompt" 2>&1)
158
165
  local ai_exit_code=$?
159
166
  ;;
167
+ glm-5-zai)
168
+ # GLM-5 via Z.AI agentic mode - supports file editing for ideation
169
+ # 744B MoE, strong reasoning, can edit files
170
+ local ai_output
171
+ ai_output=$(timeout -k 30 1800 opencode -m zai-coding-plan/glm-5 run "$prompt" 2>&1)
172
+ local ai_exit_code=$?
173
+ ;;
160
174
  deepseek-openrouter)
161
175
  local ai_output
162
176
  ai_output=$(timeout -k 30 600 opencode -m openrouter/deepseek/deepseek-v3.2 run "$prompt" 2>&1)
@@ -297,13 +311,13 @@ clean_ai_output() {
297
311
  echo "$output"
298
312
  }
299
313
 
300
- # Get models for a specific command (run or ideate)
314
+ # Get primary models for a specific command (run or ideate)
301
315
  # Usage: get_models_for_command <command>
302
- # Returns: Array of model names
316
+ # Returns: Space-separated list of model names
303
317
  get_models_for_command() {
304
318
  local command="$1"
305
319
  local model_list=""
306
-
320
+
307
321
  case "$command" in
308
322
  run)
309
323
  model_list="$LLM_RUN"
@@ -316,8 +330,30 @@ get_models_for_command() {
316
330
  return 1
317
331
  ;;
318
332
  esac
319
-
320
- # Convert space-separated list to array
333
+
334
+ echo "$model_list"
335
+ }
336
+
337
+ # Get fallback models for a specific command (run or ideate)
338
+ # Usage: get_fallback_models_for_command <command>
339
+ # Returns: Space-separated list of fallback model names
340
+ get_fallback_models_for_command() {
341
+ local command="$1"
342
+ local model_list=""
343
+
344
+ case "$command" in
345
+ run)
346
+ model_list="$LLM_RUN_FALLBACK"
347
+ ;;
348
+ ideate)
349
+ model_list="$LLM_IDEATE_FALLBACK"
350
+ ;;
351
+ *)
352
+ echo "[ERROR] Unknown command: $command" >&2
353
+ return 1
354
+ ;;
355
+ esac
356
+
321
357
  echo "$model_list"
322
358
  }
323
359
 
package/lib/ai_cli.py CHANGED
@@ -166,7 +166,7 @@ def call_ai(
166
166
 
167
167
  def get_models_for_command(command: str) -> List[str]:
168
168
  """
169
- Get the list of available models for a command.
169
+ Get the list of primary models for a command.
170
170
 
171
171
  Args:
172
172
  command: Either "run" or "ideate"
@@ -199,6 +199,41 @@ def get_models_for_command(command: str) -> List[str]:
199
199
  return model_list.split()
200
200
 
201
201
 
202
+ def get_fallback_models_for_command(command: str) -> List[str]:
203
+ """
204
+ Get the list of fallback models for a command.
205
+
206
+ Args:
207
+ command: Either "run" or "ideate"
208
+
209
+ Returns:
210
+ List of fallback model names
211
+ """
212
+ bash_script = f'''
213
+ source "{SCRIPT_DIR}/config.sh"
214
+ load_config
215
+ case "$1" in
216
+ run) echo "$LLM_RUN_FALLBACK" ;;
217
+ ideate) echo "$LLM_IDEATE_FALLBACK" ;;
218
+ esac
219
+ '''
220
+
221
+ result = subprocess.run(
222
+ ["bash", "-c", bash_script, "bash", command],
223
+ capture_output=True,
224
+ text=True
225
+ )
226
+
227
+ if result.returncode != 0:
228
+ return []
229
+
230
+ model_list = result.stdout.strip()
231
+ if not model_list:
232
+ return []
233
+
234
+ return model_list.split()
235
+
236
+
202
237
  def call_ai_model(
203
238
  prompt: str,
204
239
  model_name: str,
@@ -268,6 +303,54 @@ def call_ai_model(
268
303
  raise AIError(f"Failed to call AI: {e}")
269
304
 
270
305
 
306
+ def _try_models_with_backoff(
307
+ prompt: str,
308
+ models: List[str],
309
+ tier_name: str,
310
+ working_dir: Optional[str],
311
+ env_vars: Optional[dict],
312
+ max_rounds: int,
313
+ initial_wait: int,
314
+ max_wait: int
315
+ ) -> Tuple[Optional[str], Optional[str], dict]:
316
+ """
317
+ Try a list of models with round-based retries and exponential backoff.
318
+
319
+ Returns:
320
+ Tuple of (output, model_name, last_errors)
321
+ output/model_name are None if all models failed
322
+ """
323
+ wait_time = initial_wait
324
+ last_errors = {}
325
+
326
+ for round_num in range(max_rounds):
327
+ shuffled_models = models.copy()
328
+ random.shuffle(shuffled_models)
329
+
330
+ _log(f"{tier_name} round {round_num + 1}/{max_rounds}: trying {len(shuffled_models)} models")
331
+
332
+ for model in shuffled_models:
333
+ try:
334
+ _log(f"Trying {model}...")
335
+ output, model_name = call_ai_model(prompt, model, working_dir, env_vars)
336
+ if round_num > 0:
337
+ _log(f"Succeeded on round {round_num + 1} with {model}")
338
+ else:
339
+ _log(f"Success with {model}")
340
+ return output, model_name, last_errors
341
+ except AIError as e:
342
+ _log(f"{model} failed: {str(e)[:60]}...")
343
+ last_errors[model] = str(e)
344
+
345
+ # All models failed in this round
346
+ if round_num < max_rounds - 1:
347
+ _log(f"All {tier_name} models failed in round {round_num + 1}, waiting {wait_time}s...")
348
+ time.sleep(wait_time)
349
+ wait_time = min(wait_time * 2, max_wait)
350
+
351
+ return None, None, last_errors
352
+
353
+
271
354
  def call_ai_with_backoff(
272
355
  prompt: str,
273
356
  command: str = "ideate",
@@ -275,69 +358,68 @@ def call_ai_with_backoff(
275
358
  env_vars: Optional[dict] = None,
276
359
  max_rounds: int = 10,
277
360
  initial_wait: int = 60,
278
- max_wait: int = 600
361
+ max_wait: int = 600,
362
+ use_fallback: bool = True
279
363
  ) -> Tuple[str, str]:
280
364
  """
281
- Call AI with round-based retries and exponential backoff.
365
+ Call AI with tiered fallback and round-based retries with exponential backoff.
282
366
 
283
- AIDEV-NOTE: This is the robust retry mechanism for handling rate limits.
284
- - Tries each model in the pool (shuffled order)
285
- - If all models fail in a round, waits with exponential backoff
286
- - Keeps going until success or max_rounds exhausted
367
+ AIDEV-NOTE: Tiered fallback system:
368
+ 1. First tries all primary models with backoff
369
+ 2. If primary exhausted and use_fallback=True, tries fallback models
370
+ 3. Fallback models are cheaper/simpler backups (haiku, flash, etc.)
287
371
 
288
372
  Args:
289
373
  prompt: The prompt to send
290
374
  command: "run" or "ideate" - determines model pool
291
375
  working_dir: Directory for file operations
292
376
  env_vars: Additional environment variables
293
- max_rounds: Maximum number of full rounds to attempt
377
+ max_rounds: Maximum number of full rounds per tier
294
378
  initial_wait: Initial wait time in seconds after first failed round
295
379
  max_wait: Maximum wait time in seconds between rounds
380
+ use_fallback: Whether to try fallback tier if primary fails
296
381
 
297
382
  Returns:
298
383
  Tuple of (output, model_name)
299
384
 
300
385
  Raises:
301
- AIError: If all rounds exhausted without success
386
+ AIError: If all tiers exhausted without success
302
387
  """
303
- models = get_models_for_command(command)
304
- if not models:
305
- raise AIError(f"No models configured for command: {command}")
388
+ # Try primary tier first
389
+ primary_models = get_models_for_command(command)
390
+ if not primary_models:
391
+ raise AIError(f"No primary models configured for command: {command}")
392
+
393
+ output, model_name, primary_errors = _try_models_with_backoff(
394
+ prompt, primary_models, "Primary",
395
+ working_dir, env_vars, max_rounds, initial_wait, max_wait
396
+ )
306
397
 
307
- wait_time = initial_wait
308
- last_errors = {}
398
+ if output is not None:
399
+ return output, model_name
309
400
 
310
- for round_num in range(max_rounds):
311
- # Shuffle models each round for fairness
312
- shuffled_models = models.copy()
313
- random.shuffle(shuffled_models)
401
+ # Primary exhausted - try fallback if enabled
402
+ if use_fallback:
403
+ fallback_models = get_fallback_models_for_command(command)
404
+ if fallback_models:
405
+ _log(f"Primary tier exhausted, trying {len(fallback_models)} fallback models...")
314
406
 
315
- _log(f"Round {round_num + 1}/{max_rounds}: trying {len(shuffled_models)} models")
407
+ output, model_name, fallback_errors = _try_models_with_backoff(
408
+ prompt, fallback_models, "Fallback",
409
+ working_dir, env_vars, max_rounds, initial_wait, max_wait
410
+ )
316
411
 
317
- for model in shuffled_models:
318
- try:
319
- _log(f"Trying {model}...")
320
- output, model_name = call_ai_model(prompt, model, working_dir, env_vars)
321
- if round_num > 0:
322
- _log(f"Succeeded on round {round_num + 1} with {model}")
323
- else:
324
- _log(f"Success with {model}")
412
+ if output is not None:
325
413
  return output, model_name
326
- except AIError as e:
327
- _log(f"{model} failed: {str(e)[:60]}...")
328
- last_errors[model] = str(e)
329
- # Continue to next model
330
414
 
331
- # All models failed in this round
332
- if round_num < max_rounds - 1:
333
- _log(f"All models failed in round {round_num + 1}, waiting {wait_time}s...")
334
- time.sleep(wait_time)
335
- # Exponential backoff: 60 -> 120 -> 240 -> 480 (capped at max_wait)
336
- wait_time = min(wait_time * 2, max_wait)
415
+ # Both tiers exhausted
416
+ all_errors = {**primary_errors, **fallback_errors}
417
+ error_summary = "; ".join(f"{m}: {e[:50]}" for m, e in list(all_errors.items())[:3])
418
+ raise AIError(f"All tiers exhausted ({max_rounds} rounds each). Last errors: {error_summary}")
337
419
 
338
- # All rounds exhausted
339
- error_summary = "; ".join(f"{m}: {e[:50]}" for m, e in list(last_errors.items())[:3])
340
- raise AIError(f"All {max_rounds} rounds exhausted. Last errors: {error_summary}")
420
+ # Primary exhausted, no fallback
421
+ error_summary = "; ".join(f"{m}: {e[:50]}" for m, e in list(primary_errors.items())[:3])
422
+ raise AIError(f"Primary tier exhausted ({max_rounds} rounds). Last errors: {error_summary}")
341
423
 
342
424
 
343
425
  def call_ai_for_file_edit(
package/lib/config.sh CHANGED
@@ -57,14 +57,20 @@ DEFAULT_MEMORY_LIMIT_MB=12288
57
57
  # Workers will exit after processing this many candidates to pick up library updates
58
58
  DEFAULT_WORKER_MAX_CANDIDATES=3
59
59
 
60
- # Default LLM CLI configuration
61
- # Run: Kimi K2.5 is now the primary model (stronger than GLM-4.7 as of Jan 2025)
62
- DEFAULT_LLM_RUN="kimi-k2.5 kimi-k2.5 kimi-k2.5 glm-zai glm-zai gemini-3-flash codex-oss-local haiku"
60
+ # Default LLM CLI configuration - tiered fallback system
61
+ # Primary: Strong models used in normal operation
62
+ # Fallback: Cheap/backup models used only when primary tier exhausted
63
+ #
64
+ # Run: GLM-5 is now the primary model (744B MoE, $0.80/M tokens, 77.8% SWE-bench)
65
+ DEFAULT_LLM_RUN="glm-5 glm-5 glm-5 kimi-k2.5 kimi-k2.5"
66
+ DEFAULT_LLM_RUN_FALLBACK="gemini-3-flash codex-oss-local haiku"
67
+ #
63
68
  # Ideate: Only agentic models that can edit files reliably
64
69
  # AIDEV-NOTE: Ideation REQUIRES file editing - non-agentic models (opencode run, codex) return text
65
- # but don't actually edit files. Only use claude/gemini CLI with -y flag or cursor-agent.
70
+ # but don't actually edit files. Only use claude/gemini CLI, cursor-agent, or zai-coding-plan models.
66
71
  # OpenRouter models (via opencode) are chat-only and CANNOT edit files for ideation.
67
- DEFAULT_LLM_IDEATE="opus-think sonnet-think gemini-pro kimi-coder"
72
+ DEFAULT_LLM_IDEATE="opus-think sonnet-think glm-5-zai gemini-pro kimi-coder"
73
+ DEFAULT_LLM_IDEATE_FALLBACK="sonnet glm-zai haiku"
68
74
 
69
75
  # Load configuration from a YAML file and update variables
70
76
  _load_yaml_config() {
@@ -141,10 +147,12 @@ _load_yaml_config() {
141
147
  lock_timeout) LOCK_TIMEOUT="$value" ;;
142
148
  esac
143
149
  elif [[ $in_llm_cli_section == true ]]; then
144
- if [[ $key == "run" || $key == "ideate" ]]; then
150
+ if [[ $key == "run" || $key == "ideate" || $key == "run_fallback" || $key == "ideate_fallback" ]]; then
145
151
  case $key in
146
152
  run) LLM_RUN="$value" ;;
153
+ run_fallback) LLM_RUN_FALLBACK="$value" ;;
147
154
  ideate) LLM_IDEATE="$value" ;;
155
+ ideate_fallback) LLM_IDEATE_FALLBACK="$value" ;;
148
156
  esac
149
157
  else
150
158
  value=$(echo "$value" | sed "s/^'//;s/'$//")
@@ -215,7 +223,9 @@ load_config() {
215
223
  WORKER_MAX_CANDIDATES="$DEFAULT_WORKER_MAX_CANDIDATES"
216
224
 
217
225
  LLM_RUN="$DEFAULT_LLM_RUN"
226
+ LLM_RUN_FALLBACK="$DEFAULT_LLM_RUN_FALLBACK"
218
227
  LLM_IDEATE="$DEFAULT_LLM_IDEATE"
228
+ LLM_IDEATE_FALLBACK="$DEFAULT_LLM_IDEATE_FALLBACK"
219
229
 
220
230
  # Determine local config file path relative to EVOLUTION_DIR
221
231
  local local_config_file="$EVOLUTION_DIR/config.yaml"
@@ -319,5 +329,7 @@ show_config() {
319
329
  fi
320
330
  done
321
331
  echo " LLM for run: $LLM_RUN"
332
+ echo " LLM for run (fallback): $LLM_RUN_FALLBACK"
322
333
  echo " LLM for ideate: $LLM_IDEATE"
334
+ echo " LLM for ideate (fallback): $LLM_IDEATE_FALLBACK"
323
335
  }
@@ -213,13 +213,16 @@ class IdeationStrategy(ABC):
213
213
  return ideas
214
214
  else:
215
215
  # AI returned but didn't edit the file - show what it returned
216
- output_preview = output[:500] if output else "(empty)"
217
- if '\n' in output_preview:
218
- # Show first few lines
219
- lines = output_preview.split('\n')[:5]
220
- output_preview = '\n'.join(lines)
221
- if len(output.split('\n')) > 5:
222
- output_preview += f"\n... ({len(output)} chars total)"
216
+ if output:
217
+ all_lines = output.split('\n')
218
+ if len(all_lines) > 5:
219
+ # Show last 5 lines (first lines are usually just the git warning banner)
220
+ lines = all_lines[-5:]
221
+ output_preview = f"... ({len(output)} chars total)\n" + '\n'.join(lines)
222
+ else:
223
+ output_preview = output[:500]
224
+ else:
225
+ output_preview = "(empty)"
223
226
 
224
227
  if parse_attempt < max_parse_retries - 1:
225
228
  print(f"[IDEATE] {model} didn't edit file (attempt {parse_attempt + 1}/{max_parse_retries})", file=sys.stderr, flush=True)
package/package.json CHANGED
@@ -1,10 +1,11 @@
1
1
  {
2
2
  "name": "claude-evolve",
3
- "version": "1.11.8",
3
+ "version": "1.11.11",
4
4
  "bin": {
5
5
  "claude-evolve": "bin/claude-evolve",
6
6
  "claude-evolve-main": "bin/claude-evolve-main",
7
7
  "claude-evolve-setup": "bin/claude-evolve-setup",
8
+ "claude-evolve-check": "bin/claude-evolve-check",
8
9
  "claude-evolve-ideate": "bin/claude-evolve-ideate",
9
10
  "claude-evolve-run": "bin/claude-evolve-run",
10
11
  "claude-evolve-worker": "bin/claude-evolve-worker",