claude-evolve 1.9.5 → 1.9.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/__pycache__/ai_cli.cpython-314.pyc +0 -0
- package/lib/__pycache__/evolve_ideate.cpython-314.pyc +0 -0
- package/lib/__pycache__/evolve_run.cpython-314.pyc +0 -0
- package/lib/__pycache__/evolve_worker.cpython-314.pyc +0 -0
- package/lib/ai_cli.py +175 -1
- package/lib/evolve_ideate.py +40 -10
- package/lib/evolve_run.py +3 -14
- package/lib/evolve_worker.py +47 -64
- package/package.json +1 -1
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
package/lib/ai_cli.py
CHANGED
|
@@ -5,11 +5,13 @@ AIDEV-NOTE: This keeps ai-cli.sh as the source of truth for model configs and ti
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
import os
|
|
8
|
+
import random
|
|
8
9
|
import subprocess
|
|
9
10
|
import sys
|
|
10
11
|
import tempfile
|
|
12
|
+
import time
|
|
11
13
|
from pathlib import Path
|
|
12
|
-
from typing import Optional, Tuple
|
|
14
|
+
from typing import Optional, Tuple, List
|
|
13
15
|
|
|
14
16
|
# Path to ai-cli.sh relative to this file
|
|
15
17
|
SCRIPT_DIR = Path(__file__).parent
|
|
@@ -155,6 +157,178 @@ def call_ai(
|
|
|
155
157
|
raise AIError(f"Failed to call AI: {e}")
|
|
156
158
|
|
|
157
159
|
|
|
160
|
+
def get_models_for_command(command: str) -> List[str]:
|
|
161
|
+
"""
|
|
162
|
+
Get the list of available models for a command.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
command: Either "run" or "ideate"
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
List of model names
|
|
169
|
+
"""
|
|
170
|
+
bash_script = f'''
|
|
171
|
+
source "{SCRIPT_DIR}/config.sh"
|
|
172
|
+
load_config
|
|
173
|
+
case "$1" in
|
|
174
|
+
run) echo "$LLM_RUN" ;;
|
|
175
|
+
ideate) echo "$LLM_IDEATE" ;;
|
|
176
|
+
esac
|
|
177
|
+
'''
|
|
178
|
+
|
|
179
|
+
result = subprocess.run(
|
|
180
|
+
["bash", "-c", bash_script, "bash", command],
|
|
181
|
+
capture_output=True,
|
|
182
|
+
text=True
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
if result.returncode != 0:
|
|
186
|
+
return []
|
|
187
|
+
|
|
188
|
+
model_list = result.stdout.strip()
|
|
189
|
+
if not model_list:
|
|
190
|
+
return []
|
|
191
|
+
|
|
192
|
+
return model_list.split()
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def call_ai_model(
|
|
196
|
+
prompt: str,
|
|
197
|
+
model_name: str,
|
|
198
|
+
working_dir: Optional[str] = None,
|
|
199
|
+
env_vars: Optional[dict] = None
|
|
200
|
+
) -> Tuple[str, str]:
|
|
201
|
+
"""
|
|
202
|
+
Call a specific AI model.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
prompt: The prompt to send to the AI
|
|
206
|
+
model_name: The specific model to use
|
|
207
|
+
working_dir: Directory to run the command in
|
|
208
|
+
env_vars: Additional environment variables
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
Tuple of (output, model_name)
|
|
212
|
+
|
|
213
|
+
Raises:
|
|
214
|
+
TimeoutError, RateLimitError, APIExhaustedError, AIError
|
|
215
|
+
"""
|
|
216
|
+
bash_script = f'''
|
|
217
|
+
source "{SCRIPT_DIR}/config.sh"
|
|
218
|
+
load_config
|
|
219
|
+
source "{AI_CLI_PATH}"
|
|
220
|
+
call_ai_model_configured "$1" "$2"
|
|
221
|
+
'''
|
|
222
|
+
|
|
223
|
+
env = os.environ.copy()
|
|
224
|
+
if working_dir:
|
|
225
|
+
env['CLAUDE_EVOLVE_WORKING_DIR'] = working_dir
|
|
226
|
+
if env_vars:
|
|
227
|
+
env.update(env_vars)
|
|
228
|
+
|
|
229
|
+
try:
|
|
230
|
+
result = subprocess.run(
|
|
231
|
+
["bash", "-c", bash_script, "bash", model_name, prompt],
|
|
232
|
+
capture_output=True,
|
|
233
|
+
text=True,
|
|
234
|
+
cwd=working_dir,
|
|
235
|
+
env=env
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
output = result.stdout
|
|
239
|
+
stderr = result.stderr
|
|
240
|
+
exit_code = result.returncode
|
|
241
|
+
|
|
242
|
+
# Print stderr (contains debug info)
|
|
243
|
+
if stderr:
|
|
244
|
+
for line in stderr.strip().split('\n'):
|
|
245
|
+
if line:
|
|
246
|
+
print(f" {line}", file=sys.stderr)
|
|
247
|
+
|
|
248
|
+
# Handle exit codes
|
|
249
|
+
if exit_code == 124:
|
|
250
|
+
raise TimeoutError(f"AI call timed out (model: {model_name})")
|
|
251
|
+
elif exit_code == 2:
|
|
252
|
+
raise RateLimitError(f"Rate limit hit (model: {model_name})")
|
|
253
|
+
elif exit_code == 3:
|
|
254
|
+
raise APIExhaustedError(f"API quota exhausted (model: {model_name})")
|
|
255
|
+
elif exit_code != 0:
|
|
256
|
+
raise AIError(f"AI call failed with exit code {exit_code}: {stderr}")
|
|
257
|
+
|
|
258
|
+
return output, model_name
|
|
259
|
+
|
|
260
|
+
except subprocess.SubprocessError as e:
|
|
261
|
+
raise AIError(f"Failed to call AI: {e}")
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def call_ai_with_backoff(
|
|
265
|
+
prompt: str,
|
|
266
|
+
command: str = "ideate",
|
|
267
|
+
working_dir: Optional[str] = None,
|
|
268
|
+
env_vars: Optional[dict] = None,
|
|
269
|
+
max_rounds: int = 10,
|
|
270
|
+
initial_wait: int = 60,
|
|
271
|
+
max_wait: int = 600
|
|
272
|
+
) -> Tuple[str, str]:
|
|
273
|
+
"""
|
|
274
|
+
Call AI with round-based retries and exponential backoff.
|
|
275
|
+
|
|
276
|
+
AIDEV-NOTE: This is the robust retry mechanism for handling rate limits.
|
|
277
|
+
- Tries each model in the pool (shuffled order)
|
|
278
|
+
- If all models fail in a round, waits with exponential backoff
|
|
279
|
+
- Keeps going until success or max_rounds exhausted
|
|
280
|
+
|
|
281
|
+
Args:
|
|
282
|
+
prompt: The prompt to send
|
|
283
|
+
command: "run" or "ideate" - determines model pool
|
|
284
|
+
working_dir: Directory for file operations
|
|
285
|
+
env_vars: Additional environment variables
|
|
286
|
+
max_rounds: Maximum number of full rounds to attempt
|
|
287
|
+
initial_wait: Initial wait time in seconds after first failed round
|
|
288
|
+
max_wait: Maximum wait time in seconds between rounds
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
Tuple of (output, model_name)
|
|
292
|
+
|
|
293
|
+
Raises:
|
|
294
|
+
AIError: If all rounds exhausted without success
|
|
295
|
+
"""
|
|
296
|
+
models = get_models_for_command(command)
|
|
297
|
+
if not models:
|
|
298
|
+
raise AIError(f"No models configured for command: {command}")
|
|
299
|
+
|
|
300
|
+
wait_time = initial_wait
|
|
301
|
+
last_errors = {}
|
|
302
|
+
|
|
303
|
+
for round_num in range(max_rounds):
|
|
304
|
+
# Shuffle models each round for fairness
|
|
305
|
+
shuffled_models = models.copy()
|
|
306
|
+
random.shuffle(shuffled_models)
|
|
307
|
+
|
|
308
|
+
print(f"[AI] Round {round_num + 1}/{max_rounds}: trying {len(shuffled_models)} models", file=sys.stderr)
|
|
309
|
+
|
|
310
|
+
for model in shuffled_models:
|
|
311
|
+
try:
|
|
312
|
+
output, model_name = call_ai_model(prompt, model, working_dir, env_vars)
|
|
313
|
+
if round_num > 0:
|
|
314
|
+
print(f"[AI] Succeeded on round {round_num + 1} with {model}", file=sys.stderr)
|
|
315
|
+
return output, model_name
|
|
316
|
+
except AIError as e:
|
|
317
|
+
last_errors[model] = str(e)
|
|
318
|
+
# Continue to next model
|
|
319
|
+
|
|
320
|
+
# All models failed in this round
|
|
321
|
+
if round_num < max_rounds - 1:
|
|
322
|
+
print(f"[AI] All models failed in round {round_num + 1}, waiting {wait_time}s before retry...", file=sys.stderr)
|
|
323
|
+
time.sleep(wait_time)
|
|
324
|
+
# Exponential backoff: 60 -> 120 -> 240 -> 480 (capped at max_wait)
|
|
325
|
+
wait_time = min(wait_time * 2, max_wait)
|
|
326
|
+
|
|
327
|
+
# All rounds exhausted
|
|
328
|
+
error_summary = "; ".join(f"{m}: {e[:50]}" for m, e in list(last_errors.items())[:3])
|
|
329
|
+
raise AIError(f"All {max_rounds} rounds exhausted. Last errors: {error_summary}")
|
|
330
|
+
|
|
331
|
+
|
|
158
332
|
def call_ai_for_file_edit(
|
|
159
333
|
prompt: str,
|
|
160
334
|
file_path: str,
|
package/lib/evolve_ideate.py
CHANGED
|
@@ -23,7 +23,7 @@ SCRIPT_DIR = Path(__file__).parent
|
|
|
23
23
|
sys.path.insert(0, str(SCRIPT_DIR.parent))
|
|
24
24
|
|
|
25
25
|
from lib.evolution_csv import EvolutionCSV
|
|
26
|
-
from lib.ai_cli import
|
|
26
|
+
from lib.ai_cli import call_ai_with_backoff, get_git_protection_warning, AIError
|
|
27
27
|
from lib.embedding import check_novelty as check_embedding_novelty, get_embedding, set_cache_file, save_cache
|
|
28
28
|
|
|
29
29
|
|
|
@@ -47,6 +47,13 @@ class IdeationConfig:
|
|
|
47
47
|
novelty_enabled: bool = True
|
|
48
48
|
novelty_threshold: float = 0.92
|
|
49
49
|
|
|
50
|
+
# Retry configuration with exponential backoff
|
|
51
|
+
# AIDEV-NOTE: This implements round-based retries like the shell version.
|
|
52
|
+
# Each round tries ALL models. If all fail, wait and retry.
|
|
53
|
+
max_rounds: int = 10 # Max full rounds of all models
|
|
54
|
+
initial_wait: int = 60 # Seconds to wait after first failed round
|
|
55
|
+
max_wait: int = 600 # Max wait between rounds (10 minutes)
|
|
56
|
+
|
|
50
57
|
|
|
51
58
|
@dataclass
|
|
52
59
|
class Idea:
|
|
@@ -85,8 +92,13 @@ class IdeationStrategy(ABC):
|
|
|
85
92
|
"""Build the AI prompt."""
|
|
86
93
|
pass
|
|
87
94
|
|
|
88
|
-
def generate(self, context: IdeationContext, count: int
|
|
89
|
-
|
|
95
|
+
def generate(self, context: IdeationContext, count: int,
|
|
96
|
+
max_rounds: int = 10, initial_wait: int = 60, max_wait: int = 600) -> List[Idea]:
|
|
97
|
+
"""Generate ideas using this strategy with round-based retry and backoff.
|
|
98
|
+
|
|
99
|
+
AIDEV-NOTE: Uses call_ai_with_backoff for robust retry handling.
|
|
100
|
+
Each round tries ALL models. If all fail, waits with exponential backoff.
|
|
101
|
+
"""
|
|
90
102
|
if count <= 0:
|
|
91
103
|
return []
|
|
92
104
|
|
|
@@ -110,8 +122,15 @@ class IdeationStrategy(ABC):
|
|
|
110
122
|
# Build prompt
|
|
111
123
|
prompt = self.build_prompt(context, ids, temp_csv.name)
|
|
112
124
|
|
|
113
|
-
# Call AI
|
|
114
|
-
output, model =
|
|
125
|
+
# Call AI with round-based retry and backoff
|
|
126
|
+
output, model = call_ai_with_backoff(
|
|
127
|
+
prompt,
|
|
128
|
+
command="ideate",
|
|
129
|
+
working_dir=self.config.evolution_dir,
|
|
130
|
+
max_rounds=max_rounds,
|
|
131
|
+
initial_wait=initial_wait,
|
|
132
|
+
max_wait=max_wait
|
|
133
|
+
)
|
|
115
134
|
|
|
116
135
|
# Parse results from modified CSV
|
|
117
136
|
ideas = self._parse_results(temp_csv, ids)
|
|
@@ -120,12 +139,15 @@ class IdeationStrategy(ABC):
|
|
|
120
139
|
# Record model used
|
|
121
140
|
for idea in ideas:
|
|
122
141
|
idea.strategy = f"{self.name} ({model})"
|
|
123
|
-
|
|
124
|
-
|
|
142
|
+
return ideas
|
|
143
|
+
else:
|
|
144
|
+
print(f"[IDEATE] AI completed but no ideas parsed from output", file=sys.stderr)
|
|
145
|
+
return []
|
|
125
146
|
|
|
126
147
|
except AIError as e:
|
|
127
|
-
print(f"[IDEATE]
|
|
148
|
+
print(f"[IDEATE] All retries exhausted in {self.name}: {e}", file=sys.stderr)
|
|
128
149
|
return []
|
|
150
|
+
|
|
129
151
|
finally:
|
|
130
152
|
temp_csv.unlink(missing_ok=True)
|
|
131
153
|
|
|
@@ -382,7 +404,12 @@ class Ideator:
|
|
|
382
404
|
if count <= 0:
|
|
383
405
|
continue
|
|
384
406
|
|
|
385
|
-
ideas = strategy.generate(
|
|
407
|
+
ideas = strategy.generate(
|
|
408
|
+
context, count,
|
|
409
|
+
max_rounds=self.config.max_rounds,
|
|
410
|
+
initial_wait=self.config.initial_wait,
|
|
411
|
+
max_wait=self.config.max_wait
|
|
412
|
+
)
|
|
386
413
|
|
|
387
414
|
if ideas:
|
|
388
415
|
strategies_succeeded += 1
|
|
@@ -472,7 +499,10 @@ def load_config(config_path: Optional[str] = None) -> IdeationConfig:
|
|
|
472
499
|
crossover_hybrid=ideation.get('crossover_hybrid', 4),
|
|
473
500
|
num_elites=ideation.get('num_elites', 3),
|
|
474
501
|
novelty_enabled=novelty.get('enabled', True),
|
|
475
|
-
novelty_threshold=novelty.get('threshold', 0.92)
|
|
502
|
+
novelty_threshold=novelty.get('threshold', 0.92),
|
|
503
|
+
max_rounds=ideation.get('max_rounds', 10),
|
|
504
|
+
initial_wait=ideation.get('initial_wait', 60),
|
|
505
|
+
max_wait=ideation.get('max_wait', 600)
|
|
476
506
|
)
|
|
477
507
|
|
|
478
508
|
|
package/lib/evolve_run.py
CHANGED
|
@@ -63,12 +63,9 @@ class WorkerPool:
|
|
|
63
63
|
cmd.extend(['--timeout', str(self.timeout)])
|
|
64
64
|
|
|
65
65
|
try:
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
stderr=subprocess.STDOUT,
|
|
70
|
-
text=True
|
|
71
|
-
)
|
|
66
|
+
# Don't capture output - let it stream directly to terminal
|
|
67
|
+
# This provides real-time visibility into which models are being used
|
|
68
|
+
proc = subprocess.Popen(cmd)
|
|
72
69
|
self.workers[proc.pid] = proc
|
|
73
70
|
print(f"[RUN] Spawned worker {proc.pid}", file=sys.stderr)
|
|
74
71
|
return proc.pid
|
|
@@ -86,14 +83,6 @@ class WorkerPool:
|
|
|
86
83
|
if ret is not None:
|
|
87
84
|
finished_pids.append(pid)
|
|
88
85
|
exit_codes.append(ret)
|
|
89
|
-
|
|
90
|
-
# Log output
|
|
91
|
-
if proc.stdout:
|
|
92
|
-
output = proc.stdout.read()
|
|
93
|
-
if output:
|
|
94
|
-
for line in output.strip().split('\n'):
|
|
95
|
-
print(f"[WORKER-{pid}] {line}", file=sys.stderr)
|
|
96
|
-
|
|
97
86
|
print(f"[RUN] Worker {pid} exited with code {ret}", file=sys.stderr)
|
|
98
87
|
|
|
99
88
|
for pid in finished_pids:
|
package/lib/evolve_worker.py
CHANGED
|
@@ -31,7 +31,7 @@ SCRIPT_DIR = Path(__file__).parent
|
|
|
31
31
|
sys.path.insert(0, str(SCRIPT_DIR.parent))
|
|
32
32
|
|
|
33
33
|
from lib.evolution_csv import EvolutionCSV
|
|
34
|
-
from lib.ai_cli import
|
|
34
|
+
from lib.ai_cli import call_ai_with_backoff, get_git_protection_warning, AIError
|
|
35
35
|
|
|
36
36
|
|
|
37
37
|
@dataclass
|
|
@@ -46,8 +46,11 @@ class Config:
|
|
|
46
46
|
python_cmd: str = "python3"
|
|
47
47
|
memory_limit_mb: int = 0
|
|
48
48
|
timeout_seconds: int = 600
|
|
49
|
-
max_ai_retries: int = 3
|
|
50
49
|
max_candidates: int = 5
|
|
50
|
+
# Retry configuration with exponential backoff
|
|
51
|
+
max_rounds: int = 10
|
|
52
|
+
initial_wait: int = 60
|
|
53
|
+
max_wait: int = 600
|
|
51
54
|
|
|
52
55
|
|
|
53
56
|
@dataclass
|
|
@@ -139,52 +142,42 @@ This is especially important for models with smaller context windows (like GLM).
|
|
|
139
142
|
|
|
140
143
|
CRITICAL: If you do not know how to implement what was asked for, or if the requested change is unclear or not feasible, you MUST refuse to make any changes. DO NOT modify the code if you are uncertain about the implementation. Simply respond that you cannot implement the requested change and explain why. It is better to refuse than to make incorrect or random changes."""
|
|
141
144
|
|
|
142
|
-
def
|
|
145
|
+
def _call_ai_with_backoff(self, prompt: str, target_file: Path) -> Tuple[bool, str]:
|
|
143
146
|
"""
|
|
144
|
-
Call AI with
|
|
147
|
+
Call AI with round-based retry and exponential backoff.
|
|
148
|
+
|
|
149
|
+
AIDEV-NOTE: Uses call_ai_with_backoff which tries all models in the pool,
|
|
150
|
+
then waits with exponential backoff if all fail, and repeats.
|
|
145
151
|
|
|
146
152
|
Returns:
|
|
147
153
|
Tuple of (success, model_name)
|
|
148
154
|
"""
|
|
149
|
-
|
|
150
|
-
|
|
155
|
+
# Get file hash before AI call
|
|
156
|
+
hash_before = self._file_hash(target_file) if target_file.exists() else None
|
|
151
157
|
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
158
|
+
try:
|
|
159
|
+
output, model = call_ai_with_backoff(
|
|
160
|
+
prompt,
|
|
161
|
+
command="run",
|
|
162
|
+
working_dir=self.config.evolution_dir,
|
|
163
|
+
max_rounds=self.config.max_rounds,
|
|
164
|
+
initial_wait=self.config.initial_wait,
|
|
165
|
+
max_wait=self.config.max_wait
|
|
166
|
+
)
|
|
156
167
|
|
|
157
|
-
#
|
|
158
|
-
|
|
168
|
+
# Check if file was modified
|
|
169
|
+
hash_after = self._file_hash(target_file) if target_file.exists() else None
|
|
159
170
|
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
print(f"[WORKER-{os.getpid()}] AI did not modify file", file=sys.stderr)
|
|
171
|
-
|
|
172
|
-
except RateLimitError as e:
|
|
173
|
-
print(f"[WORKER-{os.getpid()}] Rate limit: {e}", file=sys.stderr)
|
|
174
|
-
raise # Propagate to caller
|
|
175
|
-
except APIExhaustedError as e:
|
|
176
|
-
print(f"[WORKER-{os.getpid()}] API exhausted: {e}", file=sys.stderr)
|
|
177
|
-
raise # Propagate to caller
|
|
178
|
-
except TimeoutError as e:
|
|
179
|
-
print(f"[WORKER-{os.getpid()}] Timeout: {e}", file=sys.stderr)
|
|
180
|
-
except AIError as e:
|
|
181
|
-
print(f"[WORKER-{os.getpid()}] AI error: {e}", file=sys.stderr)
|
|
182
|
-
|
|
183
|
-
if attempt < self.config.max_ai_retries:
|
|
184
|
-
print(f"[WORKER-{os.getpid()}] Will retry with different model...", file=sys.stderr)
|
|
185
|
-
time.sleep(2)
|
|
186
|
-
|
|
187
|
-
return False, ""
|
|
171
|
+
if hash_before != hash_after and hash_after is not None:
|
|
172
|
+
print(f"[WORKER-{os.getpid()}] AI successfully modified file (model: {model})", file=sys.stderr)
|
|
173
|
+
return True, model
|
|
174
|
+
else:
|
|
175
|
+
print(f"[WORKER-{os.getpid()}] AI completed but did not modify file", file=sys.stderr)
|
|
176
|
+
return False, model
|
|
177
|
+
|
|
178
|
+
except AIError as e:
|
|
179
|
+
print(f"[WORKER-{os.getpid()}] All AI retries exhausted: {e}", file=sys.stderr)
|
|
180
|
+
return False, ""
|
|
188
181
|
|
|
189
182
|
def _file_hash(self, path: Path) -> Optional[str]:
|
|
190
183
|
"""Get file hash."""
|
|
@@ -329,33 +322,19 @@ CRITICAL: If you do not know how to implement what was asked for, or if the requ
|
|
|
329
322
|
print(f"[WORKER-{os.getpid()}] Copying {source_file} to {target_file}", file=sys.stderr)
|
|
330
323
|
shutil.copy(source_file, target_file)
|
|
331
324
|
|
|
332
|
-
# Call AI to modify
|
|
325
|
+
# Call AI to modify (uses round-based retry with backoff)
|
|
333
326
|
prompt = self._build_prompt(candidate, target_file.name)
|
|
327
|
+
success, model = self._call_ai_with_backoff(prompt, target_file)
|
|
334
328
|
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
if not success:
|
|
339
|
-
print(f"[WORKER-{os.getpid()}] AI failed after all retries", file=sys.stderr)
|
|
340
|
-
target_file.unlink(missing_ok=True)
|
|
341
|
-
return 77 # AI generation failed
|
|
342
|
-
|
|
343
|
-
# Record model used
|
|
344
|
-
if model:
|
|
345
|
-
with EvolutionCSV(self.config.csv_path) as csv:
|
|
346
|
-
csv.update_candidate_field(candidate.id, 'run-LLM', model)
|
|
347
|
-
|
|
348
|
-
except RateLimitError:
|
|
329
|
+
if not success:
|
|
330
|
+
print(f"[WORKER-{os.getpid()}] AI failed after all retries", file=sys.stderr)
|
|
349
331
|
target_file.unlink(missing_ok=True)
|
|
350
|
-
|
|
351
|
-
csv.update_candidate_status(candidate.id, 'pending')
|
|
352
|
-
return 2 # Rate limit
|
|
332
|
+
return 77 # AI generation failed
|
|
353
333
|
|
|
354
|
-
|
|
355
|
-
|
|
334
|
+
# Record model used
|
|
335
|
+
if model:
|
|
356
336
|
with EvolutionCSV(self.config.csv_path) as csv:
|
|
357
|
-
csv.
|
|
358
|
-
return 3 # API exhausted
|
|
337
|
+
csv.update_candidate_field(candidate.id, 'run-LLM', model)
|
|
359
338
|
|
|
360
339
|
# Check syntax
|
|
361
340
|
if not self._check_syntax(target_file):
|
|
@@ -475,6 +454,8 @@ def load_config_from_yaml(config_path: Optional[str] = None) -> Config:
|
|
|
475
454
|
p = base_dir / p
|
|
476
455
|
return str(p.resolve())
|
|
477
456
|
|
|
457
|
+
ideation = data.get('ideation', {})
|
|
458
|
+
|
|
478
459
|
return Config(
|
|
479
460
|
csv_path=resolve(data.get('csv_file', 'evolution.csv')),
|
|
480
461
|
evolution_dir=str(base_dir.resolve()),
|
|
@@ -485,8 +466,10 @@ def load_config_from_yaml(config_path: Optional[str] = None) -> Config:
|
|
|
485
466
|
python_cmd=data.get('python_cmd', 'python3'),
|
|
486
467
|
memory_limit_mb=data.get('memory_limit_mb', 0),
|
|
487
468
|
timeout_seconds=data.get('timeout_seconds', 600),
|
|
488
|
-
|
|
489
|
-
|
|
469
|
+
max_candidates=data.get('worker_max_candidates', 5),
|
|
470
|
+
max_rounds=ideation.get('max_rounds', 10),
|
|
471
|
+
initial_wait=ideation.get('initial_wait', 60),
|
|
472
|
+
max_wait=ideation.get('max_wait', 600)
|
|
490
473
|
)
|
|
491
474
|
|
|
492
475
|
|