claude-evolve 1.6.29 → 1.6.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/ai-cli.sh CHANGED
@@ -55,7 +55,7 @@ $prompt"
55
55
  ai_output=$(timeout 500 codex exec -m o3-mini -c model_reasoning_effort="high" --dangerously-bypass-approvals-and-sandbox "$prompt" 2>&1)
56
56
  local ai_exit_code=$?
57
57
  ;;
58
- gemini)
58
+ gemini-pro)
59
59
  local ai_output
60
60
  # Gemini needs longer timeout as it streams output while working (20 minutes)
61
61
  ai_output=$(timeout 1200 gemini -y -m gemini-2.5-pro -p "$prompt" 2>&1)
@@ -82,7 +82,7 @@ $prompt"
82
82
  ai_output=$(timeout 300 opencode -m openrouter/z-ai/glm-4.6 run "$prompt" 2>&1)
83
83
  local ai_exit_code=$?
84
84
  ;;
85
- deepseek)
85
+ deepseek-v31)
86
86
  local ai_output
87
87
  ai_output=$(timeout 300 opencode -m openrouter/deepseek/deepseek-v3.1-terminus run "$prompt" 2>&1)
88
88
  local ai_exit_code=$?
@@ -97,73 +97,23 @@ $prompt"
97
97
  ai_output=$(timeout 300 opencode -m openrouter/x-ai/grok-4 run "$prompt" 2>&1)
98
98
  local ai_exit_code=$?
99
99
  ;;
100
- codex-qwen3)
101
- # Qwen3-Coder via Codex CLI with Ollama backend (only mildly agentic)
102
- local ai_output
103
- ai_output=$(timeout 1200 codex exec --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check --oss --model qwen3-coder:30b "$prompt" 2>&1)
104
- local ai_exit_code=$?
105
- ;;
106
100
  codex-oss)
107
- # Codex-OSS via Codex CLI with Ollama backend (longer timeout)
101
+ # Codex-OSS via Codex CLI with Ollama backend
108
102
  local ai_output
109
103
  ai_output=$(timeout 1200 codex exec --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check --oss "$prompt" 2>&1)
110
104
  local ai_exit_code=$?
111
105
  ;;
112
- aider-qwen3|qwen3)
113
- # Qwen3-Coder via Aider + Ollama
114
- # Extract the target filename from the prompt (e.g., "Modify the algorithm in evolution_gen01-001.py...")
115
- local target_file
116
- target_file=$(echo "$prompt" | sed -n 's/.*algorithm in \([^ ]*\.py\).*/\1/p' | head -1)
117
-
118
- if [[ -z "$target_file" ]]; then
119
- echo "[ERROR] Could not extract target filename from prompt" >&2
120
- return 1
121
- fi
122
-
123
- # Check if file exists in current directory
124
- if [[ ! -f "$target_file" ]]; then
125
- echo "[ERROR] Target file not found: $target_file" >&2
126
- return 1
127
- fi
128
-
129
- # Build context files list (read-only)
130
- local context_args=""
131
-
132
- # Add BRIEF.md if it exists
133
- if [[ -f "BRIEF.md" ]]; then
134
- context_args="$context_args --read BRIEF.md"
135
- fi
136
-
137
- # Add base algorithm.py as reference
138
- if [[ -f "algorithm.py" ]]; then
139
- context_args="$context_args --read algorithm.py"
140
- fi
141
-
142
- # IMPORTANT: Detect and add parent file for comparison
143
- # Extract parent ID from target filename (e.g., evolution_gen05-013.py came from gen04-005)
144
- # The file being edited is already a COPY of the parent, but we want the original
145
- # parent file as read-only context so the AI can understand what it's building on
146
- local target_basename=$(basename "$target_file" .py)
147
- if [[ "$target_basename" =~ ^evolution_gen([0-9]+)-([0-9]+)$ ]]; then
148
- local current_gen="${BASH_REMATCH[1]}"
149
- # Look for parent in previous generation or same generation
150
- # We can't easily determine the exact parent, so include all recent evolution files
151
- # Actually, the file being edited IS the parent content already (it was copied)
152
- # So we don't need to add the parent separately
153
- :
154
- fi
155
-
156
- # Run aider with context files and the target file to edit
106
+ kimi-k2-cloud)
107
+ # Kimi K2 via Codex CLI with Ollama cloud backend
157
108
  local ai_output
158
- ai_output=$(timeout 600 aider --yes --no-git --model ollama/qwen3-coder:30b --no-show-model-warnings $context_args --message "$prompt" "$target_file" 2>&1)
109
+ ai_output=$(timeout 600 codex exec --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check --oss -m kimi-k2:1t-cloud "$prompt" 2>&1)
159
110
  local ai_exit_code=$?
160
-
161
- # Aider modifies the file in place, so we don't need to output anything
162
- # The file has been edited directly
163
111
  ;;
164
- *)
165
- echo "[ERROR] Unknown model: $model_name" >&2
166
- return 1
112
+ deepseek-v3-cloud)
113
+ # Deepseek via Codex CLI with Ollama cloud backend
114
+ local ai_output
115
+ ai_output=$(timeout 600 codex exec --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check --oss -m deepseek-v3.1:671b-cloud "$prompt" 2>&1)
116
+ local ai_exit_code=$?
167
117
  ;;
168
118
  esac
169
119
 
package/lib/config.sh CHANGED
@@ -53,11 +53,10 @@ DEFAULT_MAX_RETRIES=3
53
53
  # Set to reasonable limit for ML workloads - about half of available system RAM
54
54
  DEFAULT_MEMORY_LIMIT_MB=12288
55
55
 
56
- # Default LLM CLI configuration - use simple variables instead of arrays
57
- # Run: 100% local with qwen3 via Codex+Ollama (more reliable than aider)
58
- DEFAULT_LLM_RUN="codex-qwen3 codex-oss gemini-flash"
56
+ # Default LLM CLI configuration
57
+ DEFAULT_LLM_RUN="codex-oss gemini-flash kimi-k2-cloud"
59
58
  # Ideate: Commercial models for idea generation + local fallback
60
- DEFAULT_LLM_IDEATE="gemini sonnet-think gpt5high glm grok-4 codex-qwen3 codex-oss"
59
+ DEFAULT_LLM_IDEATE="gemini-pro sonnet-think gpt5high glm grok-4 deepseek-v31 codex-qwen3 codex-oss deepseek-v3-cloud"
61
60
 
62
61
  # Load configuration from a YAML file and update variables
63
62
  _load_yaml_config() {
@@ -214,19 +213,6 @@ load_config() {
214
213
  MAX_RETRIES="$DEFAULT_MAX_RETRIES"
215
214
  MEMORY_LIMIT_MB="$DEFAULT_MEMORY_LIMIT_MB"
216
215
 
217
- LLM_CLI_gpt5high='codex exec --profile gpt5high --dangerously-bypass-approvals-and-sandbox "{{PROMPT}}"'
218
- LLM_CLI_o3high='codex exec --profile o3high --dangerously-bypass-approvals-and-sandbox "{{PROMPT}}"'
219
- LLM_CLI_codex='codex exec --dangerously-bypass-approvals-and-sandbox "{{PROMPT}}"'
220
- LLM_CLI_gemini='gemini -y -p "{{PROMPT}}"'
221
- LLM_CLI_gemini_flash='gemini -y -p "{{PROMPT}}" --model gemini-2.5-flash'
222
- LLM_CLI_opus='claude --dangerously-skip-permissions --mcp-config "" --model opus -p "{{PROMPT}}"'
223
- LLM_CLI_opus_think='claude --dangerously-skip-permissions --mcp-config "" --model opus -p "ultrathink\n\n{{PROMPT}}"'
224
- LLM_CLI_sonnet='claude --dangerously-skip-permissions --mcp-config "" --model sonnet -p "{{PROMPT}}"'
225
- LLM_CLI_sonnet_think='claude --dangerously-skip-permissions --mcp-config "" --model sonnet -p "ultrathink\n\n{{PROMPT}}"'
226
- LLM_CLI_cursor_sonnet='cursor-agent sonnet -p "{{PROMPT}}"'
227
- LLM_CLI_cursor_opus='cursor-agent opus -p "{{PROMPT}}"'
228
- LLM_CLI_glm='opencode -m openrouter/z-ai/glm-4.6 run "{{PROMPT}}"'
229
- LLM_CLI_deepseek='opencode -m openrouter/deepseek/deepseek-v3.1-terminus run "{{PROMPT}}"'
230
216
  LLM_RUN="$DEFAULT_LLM_RUN"
231
217
  LLM_IDEATE="$DEFAULT_LLM_IDEATE"
232
218
 
@@ -0,0 +1,337 @@
1
+ #!/bin/bash
2
+ # Configuration loader for claude-evolve
3
+
4
+ # Default configuration values
5
+ DEFAULT_EVOLUTION_DIR="evolution"
6
+ DEFAULT_ALGORITHM_FILE="algorithm.py"
7
+ DEFAULT_EVALUATOR_FILE="evaluator.py"
8
+ DEFAULT_BRIEF_FILE="BRIEF.md"
9
+ DEFAULT_EVOLUTION_CSV="evolution.csv"
10
+ DEFAULT_OUTPUT_DIR=""
11
+ DEFAULT_PARENT_SELECTION="best"
12
+ # Detect Python command based on platform
13
+ detect_python_cmd() {
14
+ # Try python3 first (macOS, Linux)
15
+ if command -v python3 >/dev/null 2>&1; then
16
+ echo "python3"
17
+ # Try python (Windows, some Linux)
18
+ elif command -v python >/dev/null 2>&1; then
19
+ # Verify it's Python 3
20
+ if python -c "import sys; sys.exit(0 if sys.version_info[0] >= 3 else 1)" 2>/dev/null; then
21
+ echo "python"
22
+ else
23
+ echo "python3" # Fallback
24
+ fi
25
+ else
26
+ echo "python3" # Default fallback
27
+ fi
28
+ }
29
+
30
+ DEFAULT_PYTHON_CMD="$(detect_python_cmd)"
31
+
32
+ # Default ideation strategy values
33
+ DEFAULT_TOTAL_IDEAS=15
34
+ DEFAULT_NOVEL_EXPLORATION=3
35
+ DEFAULT_HILL_CLIMBING=5
36
+ DEFAULT_STRUCTURAL_MUTATION=3
37
+ DEFAULT_CROSSOVER_HYBRID=4
38
+ DEFAULT_NUM_ELITES=3
39
+ DEFAULT_NUM_REVOLUTION=2 # Number of top novel candidates to include
40
+
41
+ # Default parallel execution values
42
+ DEFAULT_PARALLEL_ENABLED=false
43
+ DEFAULT_MAX_WORKERS=4
44
+ DEFAULT_LOCK_TIMEOUT=10
45
+
46
+ # Default auto ideation value
47
+ DEFAULT_AUTO_IDEATE=true
48
+
49
+ # Default retry value
50
+ DEFAULT_MAX_RETRIES=3
51
+
52
+ # Default memory limit (in MB, 0 means no limit)
53
+ # Set to reasonable limit for ML workloads - about half of available system RAM
54
+ DEFAULT_MEMORY_LIMIT_MB=12288
55
+
56
+ # Default LLM CLI configuration - use simple variables instead of arrays
57
+ # Run: 100% local with qwen3 via Codex+Ollama (more reliable than aider)
58
+ DEFAULT_LLM_RUN="codex-qwen3 codex-oss gemini-flash"
59
+ # Ideate: Commercial models for idea generation + local fallback
60
+ DEFAULT_LLM_IDEATE="gemini sonnet-think gpt5high glm grok-4 codex-qwen3 codex-oss"
61
+
62
+ # Load configuration from a YAML file and update variables
63
+ _load_yaml_config() {
64
+ local config_file="$1"
65
+ if [[ ! -f "$config_file" ]]; then
66
+ return 0 # File does not exist, nothing to load
67
+ fi
68
+
69
+ echo "[DEBUG] Loading configuration from: $config_file" >&2
70
+
71
+ local in_ideation_section=false
72
+ local in_parallel_section=false
73
+ local in_llm_cli_section=false
74
+ local llm_cli_subsection=""
75
+
76
+ while IFS='' read -r line; do
77
+ [[ $line =~ ^[[:space:]]*# ]] || [[ -z $line ]] && continue
78
+
79
+ if [[ ! $line =~ ^([^:]+):(.*)$ ]]; then
80
+ continue
81
+ fi
82
+ local key="${BASH_REMATCH[1]}"
83
+ local value="${BASH_REMATCH[2]}"
84
+
85
+ local is_indented=false
86
+ [[ $key =~ ^[[:space:]]+ ]] && is_indented=true
87
+
88
+ key=$(echo "$key" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
89
+ value=$(echo "$value" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
90
+
91
+ if [[ "${DEBUG_CONFIG:-}" == "true" ]]; then
92
+ echo "[CONFIG DEBUG] Before comment removal: key='$key' value='$value'" >&2
93
+ fi
94
+
95
+ value=$(echo "$value" | sed 's/[[:space:]]*#.*$//')
96
+ value=$(echo "$value" | sed 's/^"//;s/"$//')
97
+
98
+ if [[ $key == "ideation_strategies" ]]; then
99
+ in_ideation_section=true
100
+ in_parallel_section=false
101
+ in_llm_cli_section=false
102
+ continue
103
+ elif [[ $key == "parallel" ]]; then
104
+ in_parallel_section=true
105
+ in_ideation_section=false
106
+ in_llm_cli_section=false
107
+ continue
108
+ elif [[ $key == "llm_cli" ]]; then
109
+ in_llm_cli_section=true
110
+ in_ideation_section=false
111
+ in_parallel_section=false
112
+ llm_cli_subsection=""
113
+ continue
114
+ elif [[ $is_indented == false ]] && [[ $in_ideation_section == true || $in_parallel_section == true || $in_llm_cli_section == true ]]; then
115
+ in_ideation_section=false
116
+ in_parallel_section=false
117
+ in_llm_cli_section=false
118
+ llm_cli_subsection=""
119
+ fi
120
+
121
+ if [[ $in_ideation_section == true ]]; then
122
+ case $key in
123
+ total_ideas) TOTAL_IDEAS="$value" ;;
124
+ novel_exploration) NOVEL_EXPLORATION="$value" ;;
125
+ hill_climbing) HILL_CLIMBING="$value" ;;
126
+ structural_mutation) STRUCTURAL_MUTATION="$value" ;;
127
+ crossover_hybrid) CROSSOVER_HYBRID="$value" ;;
128
+ num_elites) NUM_ELITES="$value" ;;
129
+ num_revolution) NUM_REVOLUTION="$value" ;;
130
+ esac
131
+ elif [[ $in_parallel_section == true ]]; then
132
+ case $key in
133
+ enabled) PARALLEL_ENABLED="$value" ;;
134
+ max_workers) MAX_WORKERS="$value" ;;
135
+ lock_timeout) LOCK_TIMEOUT="$value" ;;
136
+ esac
137
+ elif [[ $in_llm_cli_section == true ]]; then
138
+ if [[ $key == "run" || $key == "ideate" ]]; then
139
+ case $key in
140
+ run) LLM_RUN="$value" ;;
141
+ ideate) LLM_IDEATE="$value" ;;
142
+ esac
143
+ else
144
+ value=$(echo "$value" | sed "s/^'//;s/'$//")
145
+ local var_key=$(echo "$key" | sed 's/-/_/g')
146
+ if [[ "${DEBUG_CONFIG:-}" == "true" ]]; then
147
+ echo "[CONFIG DEBUG] Setting LLM_CLI_${var_key} = '$value'" >&2
148
+ fi
149
+ eval "LLM_CLI_${var_key}=\"$value\""
150
+ fi
151
+ else
152
+ case $key in
153
+ algorithm_file) ALGORITHM_FILE="$value" ;;
154
+ evaluator_file) EVALUATOR_FILE="$value" ;;
155
+ brief_file) BRIEF_FILE="$value" ;;
156
+ evolution_csv) EVOLUTION_CSV="$value" ;;
157
+ output_dir) OUTPUT_DIR="$value" ;;
158
+ parent_selection) PARENT_SELECTION="$value" ;;
159
+ python_cmd) PYTHON_CMD="$value" ;;
160
+ auto_ideate) AUTO_IDEATE="$value" ;;
161
+ max_retries) MAX_RETRIES="$value" ;;
162
+ memory_limit_mb) MEMORY_LIMIT_MB="$value" ;;
163
+ evolution_dir):
164
+ echo "[WARN] evolution_dir in config is ignored - automatically inferred from config file location" >&2
165
+ ;;
166
+ esac
167
+ fi
168
+ done < "$config_file"
169
+ # Keep track of the last config file loaded to infer evolution_dir
170
+ LAST_CONFIG_FILE_LOADED="$config_file"
171
+ }
172
+
173
+ load_config() {
174
+ echo "[DEBUG] $1 at start of load_config: '$1'" >&2
175
+ echo "[DEBUG] DEFAULT_EVOLUTION_DIR: $DEFAULT_EVOLUTION_DIR" >&2
176
+ # Set defaults first
177
+ EVOLUTION_DIR="$DEFAULT_EVOLUTION_DIR" # Initialize with default
178
+ ALGORITHM_FILE="$DEFAULT_ALGORITHM_FILE"
179
+ EVALUATOR_FILE="$DEFAULT_EVALUATOR_FILE"
180
+ BRIEF_FILE="$DEFAULT_BRIEF_FILE"
181
+ EVOLUTION_CSV="$DEFAULT_EVOLUTION_CSV"
182
+ OUTPUT_DIR="$DEFAULT_OUTPUT_DIR"
183
+ PARENT_SELECTION="$DEFAULT_PARENT_SELECTION"
184
+ PYTHON_CMD="$DEFAULT_PYTHON_CMD"
185
+
186
+ # Determine EVOLUTION_DIR based on specified logic, overriding default if found
187
+ if [[ -n "$CLAUDE_EVOLVE_WORKING_DIR" ]]; then
188
+ echo "[DEBUG] EVOLUTION_DIR set by CLAUDE_EVOLVE_WORKING_DIR: $CLAUDE_EVOLVE_WORKING_DIR" >&2
189
+ EVOLUTION_DIR="$CLAUDE_EVOLVE_WORKING_DIR"
190
+ elif [[ -f "evolution/evolution.csv" ]]; then
191
+ echo "[DEBUG] EVOLUTION_DIR set by evolution/evolution.csv: evolution" >&2
192
+ EVOLUTION_DIR="evolution"
193
+ elif [[ -f "./evolution.csv" ]]; then
194
+ echo "[DEBUG] EVOLUTION_DIR set by ./evolution.csv: ." >&2
195
+ EVOLUTION_DIR="."
196
+ else
197
+ echo "[DEBUG] EVOLUTION_DIR defaulting to: $DEFAULT_EVOLUTION_DIR" >&2
198
+ fi
199
+ echo "[DEBUG] EVOLUTION_DIR after initial determination: $EVOLUTION_DIR" >&2
200
+
201
+ TOTAL_IDEAS="$DEFAULT_TOTAL_IDEAS"
202
+ NOVEL_EXPLORATION="$DEFAULT_NOVEL_EXPLORATION"
203
+ HILL_CLIMBING="$DEFAULT_HILL_CLIMBING"
204
+ STRUCTURAL_MUTATION="$DEFAULT_STRUCTURAL_MUTATION"
205
+ CROSSOVER_HYBRID="$DEFAULT_CROSSOVER_HYBRID"
206
+ NUM_ELITES="$DEFAULT_NUM_ELITES"
207
+ NUM_REVOLUTION="$DEFAULT_NUM_REVOLUTION"
208
+
209
+ PARALLEL_ENABLED="$DEFAULT_PARALLEL_ENABLED"
210
+ MAX_WORKERS="$DEFAULT_MAX_WORKERS"
211
+ LOCK_TIMEOUT="$DEFAULT_LOCK_TIMEOUT"
212
+
213
+ AUTO_IDEATE="$DEFAULT_AUTO_IDEATE"
214
+ MAX_RETRIES="$DEFAULT_MAX_RETRIES"
215
+ MEMORY_LIMIT_MB="$DEFAULT_MEMORY_LIMIT_MB"
216
+
217
+ LLM_CLI_gpt5high='codex exec --profile gpt5high --dangerously-bypass-approvals-and-sandbox "{{PROMPT}}"'
218
+ LLM_CLI_o3high='codex exec --profile o3high --dangerously-bypass-approvals-and-sandbox "{{PROMPT}}"'
219
+ LLM_CLI_codex='codex exec --dangerously-bypass-approvals-and-sandbox "{{PROMPT}}"'
220
+ LLM_CLI_gemini='gemini -y -p "{{PROMPT}}"'
221
+ LLM_CLI_gemini_flash='gemini -y -p "{{PROMPT}}" --model gemini-2.5-flash'
222
+ LLM_CLI_opus='claude --dangerously-skip-permissions --mcp-config "" --model opus -p "{{PROMPT}}"'
223
+ LLM_CLI_opus_think='claude --dangerously-skip-permissions --mcp-config "" --model opus -p "ultrathink\n\n{{PROMPT}}"'
224
+ LLM_CLI_sonnet='claude --dangerously-skip-permissions --mcp-config "" --model sonnet -p "{{PROMPT}}"'
225
+ LLM_CLI_sonnet_think='claude --dangerously-skip-permissions --mcp-config "" --model sonnet -p "ultrathink\n\n{{PROMPT}}"'
226
+ LLM_CLI_cursor_sonnet='cursor-agent sonnet -p "{{PROMPT}}"'
227
+ LLM_CLI_cursor_opus='cursor-agent opus -p "{{PROMPT}}"'
228
+ LLM_CLI_glm='opencode -m openrouter/z-ai/glm-4.6 run "{{PROMPT}}"'
229
+ LLM_CLI_deepseek='opencode -m openrouter/deepseek/deepseek-v3.1-terminus run "{{PROMPT}}"'
230
+ LLM_CLI_ollama-cloud-gpt-oss='codex exec --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check --oss --model gpt-oss:20b-cloud "{{PROMPT}}"' LLM_CLI_ollama-cloud-kimi-k2='codex exec --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check --oss --model kimi-k2:1t-cloud "{{PROMPT}}"' LLM_RUN="$DEFAULT_LLM_RUN"
231
+ LLM_IDEATE="$DEFAULT_LLM_IDEATE"
232
+
233
+ # Determine local config file path relative to EVOLUTION_DIR
234
+ local local_config_file="$EVOLUTION_DIR/config.yaml"
235
+
236
+ # Load local config
237
+ _load_yaml_config "$local_config_file"
238
+
239
+ # Load global config (overrides local config)
240
+ local global_config_file="$HOME/.config/claude-evolve/config.yaml"
241
+ _load_yaml_config "$global_config_file"
242
+
243
+ echo "[DEBUG] EVOLUTION_DIR before FULL_EVOLUTION_DIR calculation: $EVOLUTION_DIR" >&2
244
+
245
+ # Create full paths - ALL paths are relative to EVOLUTION_DIR
246
+ # Make EVOLUTION_DIR absolute if it\'s relative
247
+ if [[ "$EVOLUTION_DIR" = /* ]]; then
248
+ FULL_EVOLUTION_DIR="$EVOLUTION_DIR"
249
+ else
250
+ FULL_EVOLUTION_DIR="$(cd "$EVOLUTION_DIR" 2>/dev/null && pwd)" || FULL_EVOLUTION_DIR="$EVOLUTION_DIR"
251
+ fi
252
+
253
+ FULL_ALGORITHM_PATH="$FULL_EVOLUTION_DIR/$ALGORITHM_FILE"
254
+ FULL_EVALUATOR_PATH="$FULL_EVOLUTION_DIR/$EVALUATOR_FILE"
255
+ FULL_BRIEF_PATH="$FULL_EVOLUTION_DIR/$BRIEF_FILE"
256
+ FULL_CSV_PATH="$FULL_EVOLUTION_DIR/$EVOLUTION_CSV"
257
+
258
+ if [[ -n $OUTPUT_DIR ]]; then
259
+ FULL_OUTPUT_DIR="$FULL_EVOLUTION_DIR/$OUTPUT_DIR"
260
+ else
261
+ FULL_OUTPUT_DIR="$FULL_EVOLUTION_DIR"
262
+ fi
263
+ echo "[DEBUG] FULL_EVOLUTION_DIR at end of load_config: $FULL_EVOLUTION_DIR" >&2
264
+ }
265
+
266
+ # Validate configuration
267
+ validate_config() {
268
+ local errors=0
269
+
270
+ if [[ ! -d "$FULL_EVOLUTION_DIR" ]]; then
271
+ echo "[ERROR] Evolution directory not found: $FULL_EVOLUTION_DIR" >&2
272
+ ((errors++))
273
+ fi
274
+
275
+ if [[ ! -f "$FULL_ALGORITHM_PATH" ]]; then
276
+ echo "[ERROR] Algorithm file not found: $FULL_ALGORITHM_PATH" >&2
277
+ ((errors++))
278
+ fi
279
+
280
+ if [[ ! -f "$FULL_EVALUATOR_PATH" ]]; then
281
+ echo "[ERROR] Evaluator file not found: $FULL_EVALUATOR_PATH" >&2
282
+ ((errors++))
283
+ fi
284
+
285
+ if [[ ! -f "$FULL_BRIEF_PATH" ]]; then
286
+ echo "[ERROR] Brief file not found: $FULL_BRIEF_PATH" >&2
287
+ ((errors++))
288
+ fi
289
+
290
+ if ! command -v "$PYTHON_CMD" >/dev/null 2>&1; then
291
+ echo "[ERROR] Python command not found: $PYTHON_CMD" >&2
292
+ echo "[ERROR] Please install Python 3.x or set python_cmd in config.yaml" >&2
293
+ echo "[ERROR] Examples: python_cmd: \"python\" or python_cmd: \"C:\\Python39\\python.exe\"" >&2
294
+ ((errors++))
295
+ else
296
+ # Verify Python version is 3.x
297
+ if ! "$PYTHON_CMD" -c "import sys; sys.exit(0 if sys.version_info[0] >= 3 else 1)" 2>/dev/null; then
298
+ echo "[ERROR] Python 3.x required, but $PYTHON_CMD appears to be Python 2" >&2
299
+ echo "[ERROR] Please set python_cmd in config.yaml to point to Python 3" >&2
300
+ ((errors++))
301
+ fi
302
+ fi
303
+
304
+ return $errors
305
+ }
306
+
307
+ # Show current configuration
308
+ show_config() {
309
+ echo "Current claude-evolve configuration:"
310
+ echo " Evolution directory: $FULL_EVOLUTION_DIR"
311
+ echo " Algorithm file: $FULL_ALGORITHM_PATH"
312
+ echo " Evaluator file: $FULL_EVALUATOR_PATH"
313
+ echo " Brief file: $FULL_BRIEF_PATH"
314
+ echo " CSV file: $FULL_CSV_PATH"
315
+ echo " Output directory: $FULL_OUTPUT_DIR"
316
+ echo " Parent selection: $PARENT_SELECTION"
317
+ echo " Python command: $PYTHON_CMD"
318
+ echo " Parallel enabled: $PARALLEL_ENABLED"
319
+ echo " Max workers: $MAX_WORKERS"
320
+ echo " Lock timeout: $LOCK_TIMEOUT"
321
+ echo " Auto ideate: $AUTO_IDEATE"
322
+ echo " Max retries: $MAX_RETRIES"
323
+ echo " Memory limit: ${MEMORY_LIMIT_MB}MB"
324
+ echo " LLM configuration:"
325
+ # Show LLM configurations using dynamic variable names
326
+ for model in gpt5high o3high codex gemini opus opus_think sonnet sonnet_think cursor_sonnet cursor_opus glm deepseek; do
327
+ var_name="LLM_CLI_${model}"
328
+ var_value=$(eval echo "\$$var_name")
329
+ if [[ -n "$var_value" ]]; then
330
+ # Convert underscore back to dash for display
331
+ display_name=$(echo "$model" | sed 's/_/-/g')
332
+ echo " $display_name: $var_value"
333
+ fi
334
+ done
335
+ echo " LLM for run: $LLM_RUN"
336
+ echo " LLM for ideate: $LLM_IDEATE"
337
+ }