weco 0.2.23__py3-none-any.whl → 0.2.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
weco/api.py CHANGED
@@ -4,6 +4,7 @@ import requests
4
4
  from rich.console import Console
5
5
 
6
6
  from weco import __pkg_version__, __base_url__
7
+ from .constants import DEFAULT_API_TIMEOUT
7
8
 
8
9
 
9
10
  def handle_api_error(e: requests.exceptions.HTTPError, console: Console) -> None:
@@ -30,7 +31,7 @@ def start_optimization_run(
30
31
  additional_instructions: str = None,
31
32
  api_keys: Dict[str, Any] = {},
32
33
  auth_headers: dict = {},
33
- timeout: Union[int, Tuple[int, int]] = 800,
34
+ timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
34
35
  ) -> Dict[str, Any]:
35
36
  """Start the optimization run."""
36
37
  with console.status("[bold green]Starting Optimization..."):
@@ -53,22 +54,29 @@ def start_optimization_run(
53
54
  timeout=timeout,
54
55
  )
55
56
  response.raise_for_status()
56
- return response.json()
57
+ result = response.json()
58
+ # Handle None values for code and plan fields
59
+ if result.get("plan") is None:
60
+ result["plan"] = ""
61
+ if result.get("code") is None:
62
+ result["code"] = ""
63
+ return result
57
64
  except requests.exceptions.HTTPError as e:
58
65
  handle_api_error(e, console)
59
- sys.exit(1)
66
+ raise
60
67
  except Exception as e:
61
68
  console.print(f"[bold red]Error starting run: {e}[/]")
62
- sys.exit(1)
69
+ raise
63
70
 
64
71
 
65
72
  def evaluate_feedback_then_suggest_next_solution(
73
+ console: Console,
66
74
  run_id: str,
67
75
  execution_output: str,
68
76
  additional_instructions: str = None,
69
77
  api_keys: Dict[str, Any] = {},
70
78
  auth_headers: dict = {},
71
- timeout: Union[int, Tuple[int, int]] = 800,
79
+ timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
72
80
  ) -> Dict[str, Any]:
73
81
  """Evaluate the feedback and suggest the next solution."""
74
82
  try:
@@ -83,10 +91,17 @@ def evaluate_feedback_then_suggest_next_solution(
83
91
  timeout=timeout,
84
92
  )
85
93
  response.raise_for_status()
86
- return response.json()
94
+ result = response.json()
95
+ # Handle None values for code and plan fields
96
+ if result.get("plan") is None:
97
+ result["plan"] = ""
98
+ if result.get("code") is None:
99
+ result["code"] = ""
100
+
101
+ return result
87
102
  except requests.exceptions.HTTPError as e:
88
103
  # Allow caller to handle suggest errors, maybe retry or terminate
89
- handle_api_error(e, Console()) # Use default console if none passed
104
+ handle_api_error(e, console) # Use default console if none passed
90
105
  raise # Re-raise the exception
91
106
  except Exception as e:
92
107
  print(f"Error: {e}") # Use print as console might not be available
@@ -94,7 +109,11 @@ def evaluate_feedback_then_suggest_next_solution(
94
109
 
95
110
 
96
111
  def get_optimization_run_status(
97
- run_id: str, include_history: bool = False, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] = 800
112
+ console: Console,
113
+ run_id: str,
114
+ include_history: bool = False,
115
+ auth_headers: dict = {},
116
+ timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
98
117
  ) -> Dict[str, Any]:
99
118
  """Get the current status of the optimization run."""
100
119
  try:
@@ -102,16 +121,30 @@ def get_optimization_run_status(
102
121
  f"{__base_url__}/runs/{run_id}", params={"include_history": include_history}, headers=auth_headers, timeout=timeout
103
122
  )
104
123
  response.raise_for_status()
105
- return response.json()
124
+ result = response.json()
125
+ # Handle None values for code and plan fields in best_result and nodes
126
+ if result.get("best_result"):
127
+ if result["best_result"].get("code") is None:
128
+ result["best_result"]["code"] = ""
129
+ if result["best_result"].get("plan") is None:
130
+ result["best_result"]["plan"] = ""
131
+ # Handle None values for code and plan fields in nodes array
132
+ if result.get("nodes"):
133
+ for i, node in enumerate(result["nodes"]):
134
+ if node.get("plan") is None:
135
+ result["nodes"][i]["plan"] = ""
136
+ if node.get("code") is None:
137
+ result["nodes"][i]["code"] = ""
138
+ return result
106
139
  except requests.exceptions.HTTPError as e:
107
- handle_api_error(e, Console()) # Use default console
140
+ handle_api_error(e, console) # Use default console
108
141
  raise # Re-raise
109
142
  except Exception as e:
110
143
  print(f"Error getting run status: {e}")
111
144
  raise # Re-raise
112
145
 
113
146
 
114
- def send_heartbeat(run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] = 10) -> bool:
147
+ def send_heartbeat(run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] = (10, 10)) -> bool:
115
148
  """Send a heartbeat signal to the backend."""
116
149
  try:
117
150
  response = requests.put(f"{__base_url__}/runs/{run_id}/heartbeat", headers=auth_headers, timeout=timeout)
@@ -119,9 +152,9 @@ def send_heartbeat(run_id: str, auth_headers: dict = {}, timeout: Union[int, Tup
119
152
  return True
120
153
  except requests.exceptions.HTTPError as e:
121
154
  if e.response.status_code == 409:
122
- print(f"Heartbeat ignored: Run {run_id} is not running.", file=sys.stderr)
155
+ print(f"Polling ignore: Run {run_id} is not running.", file=sys.stderr)
123
156
  else:
124
- print(f"Heartbeat failed for run {run_id}: HTTP {e.response.status_code}", file=sys.stderr)
157
+ print(f"Polling failed for run {run_id}: HTTP {e.response.status_code}", file=sys.stderr)
125
158
  return False
126
159
  except Exception as e:
127
160
  print(f"Error sending heartbeat for run {run_id}: {e}", file=sys.stderr)
@@ -134,7 +167,7 @@ def report_termination(
134
167
  reason: str,
135
168
  details: Optional[str] = None,
136
169
  auth_headers: dict = {},
137
- timeout: Union[int, Tuple[int, int]] = 30,
170
+ timeout: Union[int, Tuple[int, int]] = (10, 30),
138
171
  ) -> bool:
139
172
  """Report the termination reason to the backend."""
140
173
  try:
@@ -172,22 +205,22 @@ def _determine_model_and_api_key() -> tuple[str, dict[str, str]]:
172
205
  api_key_dict = {"GEMINI_API_KEY": llm_api_keys["GEMINI_API_KEY"]}
173
206
  else:
174
207
  # This should never happen if determine_default_model works correctly
175
- raise ValueError(f"Unknown model returned: {model}")
208
+ raise ValueError(f"Unknown default model choice: {model}")
176
209
 
177
210
  return model, api_key_dict
178
211
 
179
212
 
180
213
  def get_optimization_suggestions_from_codebase(
214
+ console: Console,
181
215
  gitingest_summary: str,
182
216
  gitingest_tree: str,
183
217
  gitingest_content_str: str,
184
- console: Console,
185
218
  auth_headers: dict = {},
186
- timeout: Union[int, Tuple[int, int]] = 800,
219
+ timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
187
220
  ) -> Optional[List[Dict[str, Any]]]:
188
221
  """Analyze codebase and get optimization suggestions using the model-agnostic backend API."""
222
+ model, api_key_dict = _determine_model_and_api_key()
189
223
  try:
190
- model, api_key_dict = _determine_model_and_api_key()
191
224
  response = requests.post(
192
225
  f"{__base_url__}/onboard/analyze-codebase",
193
226
  json={
@@ -213,16 +246,16 @@ def get_optimization_suggestions_from_codebase(
213
246
 
214
247
 
215
248
  def generate_evaluation_script_and_metrics(
249
+ console: Console,
216
250
  target_file: str,
217
251
  description: str,
218
252
  gitingest_content_str: str,
219
- console: Console,
220
253
  auth_headers: dict = {},
221
- timeout: Union[int, Tuple[int, int]] = 800,
254
+ timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
222
255
  ) -> Tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:
223
256
  """Generate evaluation script and determine metrics using the model-agnostic backend API."""
257
+ model, api_key_dict = _determine_model_and_api_key()
224
258
  try:
225
- model, api_key_dict = _determine_model_and_api_key()
226
259
  response = requests.post(
227
260
  f"{__base_url__}/onboard/generate-script",
228
261
  json={
@@ -247,18 +280,18 @@ def generate_evaluation_script_and_metrics(
247
280
 
248
281
 
249
282
  def analyze_evaluation_environment(
283
+ console: Console,
250
284
  target_file: str,
251
285
  description: str,
252
286
  gitingest_summary: str,
253
287
  gitingest_tree: str,
254
288
  gitingest_content_str: str,
255
- console: Console,
256
289
  auth_headers: dict = {},
257
- timeout: Union[int, Tuple[int, int]] = 800,
290
+ timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
258
291
  ) -> Optional[Dict[str, Any]]:
259
292
  """Analyze existing evaluation scripts and environment using the model-agnostic backend API."""
293
+ model, api_key_dict = _determine_model_and_api_key()
260
294
  try:
261
- model, api_key_dict = _determine_model_and_api_key()
262
295
  response = requests.post(
263
296
  f"{__base_url__}/onboard/analyze-environment",
264
297
  json={
@@ -285,16 +318,16 @@ def analyze_evaluation_environment(
285
318
 
286
319
 
287
320
  def analyze_script_execution_requirements(
321
+ console: Console,
288
322
  script_content: str,
289
323
  script_path: str,
290
324
  target_file: str,
291
- console: Console,
292
325
  auth_headers: dict = {},
293
- timeout: Union[int, Tuple[int, int]] = 800,
326
+ timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
294
327
  ) -> Optional[str]:
295
328
  """Analyze script to determine proper execution command using the model-agnostic backend API."""
329
+ model, api_key_dict = _determine_model_and_api_key()
296
330
  try:
297
- model, api_key_dict = _determine_model_and_api_key()
298
331
  response = requests.post(
299
332
  f"{__base_url__}/onboard/analyze-script",
300
333
  json={
weco/auth.py CHANGED
@@ -35,7 +35,7 @@ def save_api_key(api_key: str):
35
35
  # Set file permissions to read/write for owner only (600)
36
36
  os.chmod(CREDENTIALS_FILE, stat.S_IRUSR | stat.S_IWUSR)
37
37
  except OSError as e:
38
- print(f"Error: Could not write credentials file or set permissions on {CREDENTIALS_FILE}: {e}")
38
+ print(f"Error: Unable to save credentials file or set permissions on {CREDENTIALS_FILE}: {e}")
39
39
 
40
40
 
41
41
  def load_weco_api_key() -> str | None:
@@ -53,7 +53,7 @@ def load_weco_api_key() -> str | None:
53
53
  credentials = json.load(f)
54
54
  return credentials.get("api_key")
55
55
  except (IOError, json.JSONDecodeError, OSError) as e:
56
- print(f"Warning: Could not read or parse credentials file at {CREDENTIALS_FILE}: {e}")
56
+ print(f"Warning: Unable to read credentials file at {CREDENTIALS_FILE}: {e}")
57
57
  return None
58
58
 
59
59
 
@@ -64,7 +64,7 @@ def clear_api_key():
64
64
  os.remove(CREDENTIALS_FILE)
65
65
  print("Logged out successfully.")
66
66
  except OSError as e:
67
- print(f"Error: Could not remove credentials file at {CREDENTIALS_FILE}: {e}")
67
+ print(f"Error: Unable to remove credentials file at {CREDENTIALS_FILE}: {e}")
68
68
  else:
69
69
  print("Already logged out.")
70
70
 
@@ -129,7 +129,9 @@ def perform_login(console: Console):
129
129
  continue # Continue polling
130
130
  else:
131
131
  # Unexpected 202 response format
132
- console.print(f"\n[bold red]Error:[/] Received unexpected 202 response: {token_data}")
132
+ console.print(
133
+ f"\n[bold red]Error:[/] Received unexpected response from authentication server: {token_data}"
134
+ )
133
135
  return False
134
136
  # Check for standard OAuth2 errors (often 400 Bad Request)
135
137
  elif token_response.status_code == 400:
@@ -146,7 +148,7 @@ def perform_login(console: Console):
146
148
  console.print("\n[bold red]Error:[/] Authorization denied by user.")
147
149
  return False
148
150
  else: # invalid_grant, etc.
149
- error_desc = token_data.get("error_description", "Unknown error during polling.")
151
+ error_desc = token_data.get("error_description", "Unknown authentication error occurred.")
150
152
  console.print(f"\n[bold red]Error:[/] {error_desc} ({error_code})")
151
153
  return False
152
154
 
weco/chatbot.py CHANGED
@@ -50,7 +50,7 @@ class UserInteractionHelper:
50
50
 
51
51
  if attempts >= max_retries:
52
52
  self.console.print(f"[red]Maximum retry attempts ({max_retries}) reached. Exiting.[/]")
53
- raise Exception("Maximum retry attempts exceeded")
53
+ raise Exception("Maximum retry attempts exceeded. Please try again.")
54
54
 
55
55
  # Show available options without the full prompt
56
56
  if choices:
@@ -66,7 +66,7 @@ class UserInteractionHelper:
66
66
  continue
67
67
 
68
68
  # This should never be reached due to the exception above, but just in case
69
- raise Exception("Unexpected error in choice selection")
69
+ raise Exception("Unexpected error while selecting a choice")
70
70
 
71
71
  def get_choice_numeric(self, prompt: str, max_number: int, default: int = None, max_retries: int = 5) -> int:
72
72
  """Get numeric choice with validation and error handling."""
@@ -87,7 +87,7 @@ class UserInteractionHelper:
87
87
 
88
88
  if attempts >= max_retries:
89
89
  self.console.print(f"[red]Maximum retry attempts ({max_retries}) reached. Exiting.[/]")
90
- raise Exception("Maximum retry attempts exceeded")
90
+ raise Exception("Maximum retry attempts exceeded. Please try again.")
91
91
 
92
92
  # Show valid range
93
93
  self.console.print(f"Please enter a number between [bold]1[/] and [bold]{max_number}[/]")
@@ -115,7 +115,7 @@ class UserInteractionHelper:
115
115
 
116
116
  if attempts >= max_retries:
117
117
  self.console.print(f"[red]Maximum retry attempts ({max_retries}) reached. Exiting.[/]")
118
- raise Exception("Maximum retry attempts exceeded")
118
+ raise Exception("Maximum retry attempts exceeded. Please try again.")
119
119
 
120
120
  self.console.print("Valid options: [bold]y[/] / [bold]n[/]")
121
121
  if default:
@@ -123,7 +123,7 @@ class UserInteractionHelper:
123
123
 
124
124
  continue
125
125
 
126
- raise Exception("Unexpected error in yes/no selection")
126
+ raise Exception("Unexpected error while selecting an option")
127
127
 
128
128
  def display_optimization_options_table(self, options: List[Dict[str, str]]) -> None:
129
129
  """Display optimization options in a formatted table."""
@@ -215,7 +215,10 @@ class Chatbot:
215
215
 
216
216
  with self.console.status("[bold green]Generating optimization suggestions...[/]"):
217
217
  result = get_optimization_suggestions_from_codebase(
218
- self.gitingest_summary, self.gitingest_tree, self.gitingest_content_str, self.console
218
+ console=self.console,
219
+ gitingest_summary=self.gitingest_summary,
220
+ gitingest_tree=self.gitingest_tree,
221
+ gitingest_content_str=self.gitingest_content_str,
219
222
  )
220
223
 
221
224
  if result and isinstance(result, list):
@@ -224,7 +227,7 @@ class Chatbot:
224
227
  options = None
225
228
 
226
229
  if not options or not isinstance(options, list):
227
- self.console.print("[red]Failed to get valid optimization options.[/]")
230
+ self.console.print("[red]Unable to retrieve valid optimization options from the backend.[/]")
228
231
  return None
229
232
 
230
233
  if not options:
@@ -324,17 +327,19 @@ class Chatbot:
324
327
  elif action == "g" or action == "r":
325
328
  with self.console.status("[bold green]Generating evaluation script and determining metrics...[/]"):
326
329
  result = generate_evaluation_script_and_metrics(
327
- selected_option["target_file"],
328
- selected_option["description"],
329
- self.gitingest_content_str,
330
- self.console,
330
+ console=self.console,
331
+ target_file=selected_option["target_file"],
332
+ description=selected_option["description"],
333
+ gitingest_content_str=self.gitingest_content_str,
331
334
  )
332
335
  if result and result[0]:
333
336
  eval_script_content, metric_name, goal, reasoning = result
334
337
  if reasoning:
335
338
  self.console.print(f"[dim]Reasoning: {reasoning}[/]")
336
339
  else:
337
- self.console.print("[red]Failed to generate an evaluation script.[/]")
340
+ self.console.print(
341
+ "[red]Unable to generate an evaluation script. Please try providing a custom script path instead.[/]"
342
+ )
338
343
  eval_script_content = None
339
344
  metric_name = None
340
345
  goal = None
@@ -371,7 +376,10 @@ class Chatbot:
371
376
  # Analyze the script to determine the proper execution command
372
377
  with self.console.status("[bold green]Analyzing script execution requirements...[/]"):
373
378
  eval_command = analyze_script_execution_requirements(
374
- eval_script_content, eval_script_path_str, selected_option["target_file"], self.console
379
+ console=self.console,
380
+ script_content=eval_script_content,
381
+ script_path=eval_script_path_str,
382
+ target_file=selected_option["target_file"],
375
383
  )
376
384
 
377
385
  return {
@@ -386,16 +394,16 @@ class Chatbot:
386
394
  """Get or create evaluation script configuration using intelligent conversation-guided approach."""
387
395
  with self.console.status("[bold green]Analyzing evaluation environment...[/]"):
388
396
  analysis = analyze_evaluation_environment(
389
- selected_option["target_file"],
390
- selected_option["description"],
391
- self.gitingest_summary,
392
- self.gitingest_tree,
393
- self.gitingest_content_str,
394
- self.console,
397
+ console=self.console,
398
+ target_file=selected_option["target_file"],
399
+ description=selected_option["description"],
400
+ gitingest_summary=self.gitingest_summary,
401
+ gitingest_tree=self.gitingest_tree,
402
+ gitingest_content_str=self.gitingest_content_str,
395
403
  )
396
404
 
397
405
  if not analysis:
398
- self.console.print("[yellow]Failed to analyze evaluation environment. Falling back to generation.[/]")
406
+ self.console.print("[yellow]Unable to analyze evaluation environment. Falling back to script generation.[/]")
399
407
  return self.handle_script_generation_workflow(selected_option)
400
408
 
401
409
  self.evaluation_analysis = analysis
@@ -529,7 +537,10 @@ class Chatbot:
529
537
  if not eval_command or eval_command == f"python {script_path}":
530
538
  with self.console.status("[bold green]Analyzing script execution requirements...[/]"):
531
539
  eval_command = analyze_script_execution_requirements(
532
- script_content, script_path, selected_option["target_file"], self.console
540
+ console=self.console,
541
+ script_content=script_content,
542
+ script_path=script_path,
543
+ target_file=selected_option["target_file"],
533
544
  )
534
545
 
535
546
  self.current_step = "confirmation"
@@ -711,7 +722,7 @@ class Chatbot:
711
722
  """Setup evaluation environment for the selected optimization."""
712
723
  eval_config = self.get_evaluation_configuration(selected_option)
713
724
  if not eval_config:
714
- self.console.print("[red]Evaluation script setup failed.[/]")
725
+ self.console.print("[red]Evaluation script setup failed. Please check your script configuration and try again.[/]")
715
726
  return None
716
727
 
717
728
  eval_config = self.confirm_and_finalize_evaluation_config(eval_config)
@@ -791,7 +802,7 @@ def run_onboarding_chatbot(
791
802
  chatbot = Chatbot(project_path, console, run_parser, model)
792
803
  chatbot.start()
793
804
  except Exception as e:
794
- console.print(f"[bold red]An unexpected error occurred in the chatbot: {e}[/]")
805
+ console.print(f"[bold red]An unexpected error occurred: {e}[/]")
795
806
  import traceback
796
807
 
797
808
  traceback.print_exc()
weco/cli.py CHANGED
@@ -61,6 +61,12 @@ def configure_run_parser(run_parser: argparse.ArgumentParser) -> None:
61
61
  type=str,
62
62
  help="Description of additional instruction or path to a file containing additional instructions. Defaults to None.",
63
63
  )
64
+ run_parser.add_argument(
65
+ "--eval-timeout",
66
+ type=int,
67
+ default=None,
68
+ help="Timeout in seconds for each evaluation. No timeout by default. Example: --eval-timeout 3600",
69
+ )
64
70
 
65
71
 
66
72
  def execute_run_command(args: argparse.Namespace) -> None:
@@ -77,6 +83,7 @@ def execute_run_command(args: argparse.Namespace) -> None:
77
83
  log_dir=args.log_dir,
78
84
  additional_instructions=args.additional_instructions,
79
85
  console=console,
86
+ eval_timeout=args.eval_timeout,
80
87
  )
81
88
  exit_code = 0 if success else 1
82
89
  sys.exit(exit_code)
@@ -177,7 +184,9 @@ def main() -> None:
177
184
 
178
185
  project_path = pathlib.Path(filtered_args[0]) if filtered_args else pathlib.Path.cwd()
179
186
  if not project_path.is_dir():
180
- console.print(f"[bold red]Error:[/] Path '{project_path}' is not a valid directory.")
187
+ console.print(
188
+ f"[bold red]Error:[/] The path '{project_path}' is not a valid directory. Please provide a valid directory path."
189
+ )
181
190
  sys.exit(1)
182
191
 
183
192
  # Pass the run_parser and model to the chatbot
weco/constants.py ADDED
@@ -0,0 +1,7 @@
1
+ # weco/constants.py
2
+ """
3
+ Constants for the Weco CLI package.
4
+ """
5
+
6
+ # API timeout configuration (connect_timeout, read_timeout) in seconds
7
+ DEFAULT_API_TIMEOUT = (10, 800)
weco/optimizer.py CHANGED
@@ -20,7 +20,6 @@ from .api import (
20
20
  from .auth import handle_authentication
21
21
  from .panels import (
22
22
  SummaryPanel,
23
- PlanPanel,
24
23
  Node,
25
24
  MetricTreePanel,
26
25
  EvaluationOutputPanel,
@@ -37,6 +36,7 @@ from .utils import (
37
36
  smooth_update,
38
37
  format_number,
39
38
  )
39
+ from .constants import DEFAULT_API_TIMEOUT
40
40
 
41
41
 
42
42
  # --- Heartbeat Sender Class ---
@@ -63,7 +63,7 @@ class HeartbeatSender(threading.Thread):
63
63
 
64
64
  except Exception as e:
65
65
  # Catch any unexpected error in the loop to prevent silent thread death
66
- print(f"[ERROR HeartbeatSender] Unhandled exception in run loop for run {self.run_id}: {e}", file=sys.stderr)
66
+ print(f"[ERROR HeartbeatSender] Unexpected error in heartbeat thread for run {self.run_id}: {e}", file=sys.stderr)
67
67
  traceback.print_exc(file=sys.stderr)
68
68
  # The loop will break due to the exception, and thread will terminate via finally.
69
69
 
@@ -78,6 +78,7 @@ def execute_optimization(
78
78
  log_dir: str = ".runs",
79
79
  additional_instructions: Optional[str] = None,
80
80
  console: Optional[Console] = None,
81
+ eval_timeout: Optional[int] = None,
81
82
  ) -> bool:
82
83
  """
83
84
  Execute the core optimization logic.
@@ -153,14 +154,13 @@ def execute_optimization(
153
154
  "debug_prob": 0.5,
154
155
  "max_debug_depth": max(1, math.ceil(0.1 * steps)),
155
156
  }
156
- timeout = 800
157
+ api_timeout = DEFAULT_API_TIMEOUT
157
158
  processed_additional_instructions = read_additional_instructions(additional_instructions=additional_instructions)
158
159
  source_fp = pathlib.Path(source)
159
160
  source_code = read_from_path(fp=source_fp, is_json=False)
160
161
 
161
162
  # --- Panel Initialization ---
162
163
  summary_panel = SummaryPanel(maximize=maximize, metric_name=metric, total_steps=steps, model=model, runs_dir=log_dir)
163
- plan_panel = PlanPanel()
164
164
  solution_panels = SolutionPanels(metric_name=metric, source_fp=source_fp)
165
165
  eval_output_panel = EvaluationOutputPanel()
166
166
  tree_panel = MetricTreePanel(maximize=maximize)
@@ -181,9 +181,10 @@ def execute_optimization(
181
181
  additional_instructions=processed_additional_instructions,
182
182
  api_keys=llm_api_keys,
183
183
  auth_headers=auth_headers,
184
- timeout=timeout,
184
+ timeout=api_timeout,
185
185
  )
186
186
  run_id = run_response["run_id"]
187
+ run_name = run_response["run_name"]
187
188
  current_run_id_for_heartbeat = run_id
188
189
 
189
190
  # --- Start Heartbeat Thread ---
@@ -203,12 +204,14 @@ def execute_optimization(
203
204
  write_to_path(fp=source_fp, content=run_response["code"])
204
205
 
205
206
  # Update the panels with the initial solution
206
- summary_panel.set_run_id(run_id=run_id) # Add run id now that we have it
207
+ # Add run id and run name now that we have it
208
+ summary_panel.set_run_id(run_id=run_id)
209
+ summary_panel.set_run_name(run_name=run_name)
207
210
  # Set the step of the progress bar
208
211
  summary_panel.set_step(step=0)
209
212
  # Update the token counts
210
213
  summary_panel.update_token_counts(usage=run_response["usage"])
211
- plan_panel.update(plan=run_response["plan"])
214
+ summary_panel.update_thinking(thinking=run_response["plan"])
212
215
  # Build the metric tree
213
216
  tree_panel.build_metric_tree(
214
217
  nodes=[
@@ -218,7 +221,7 @@ def execute_optimization(
218
221
  "code": run_response["code"],
219
222
  "step": 0,
220
223
  "metric_value": None,
221
- "is_buggy": False,
224
+ "is_buggy": None,
222
225
  }
223
226
  ]
224
227
  )
@@ -227,7 +230,7 @@ def execute_optimization(
227
230
  # Update the solution panels with the initial solution and get the panel displays
228
231
  solution_panels.update(
229
232
  current_node=Node(
230
- id=run_response["solution_id"], parent_id=None, code=run_response["code"], metric=None, is_buggy=False
233
+ id=run_response["solution_id"], parent_id=None, code=run_response["code"], metric=None, is_buggy=None
231
234
  ),
232
235
  best_node=None,
233
236
  )
@@ -238,7 +241,6 @@ def execute_optimization(
238
241
  layout=layout,
239
242
  sections_to_update=[
240
243
  ("summary", summary_panel.get_display()),
241
- ("plan", plan_panel.get_display()),
242
244
  ("tree", tree_panel.get_display(is_done=False)),
243
245
  ("current_solution", current_solution_panel),
244
246
  ("best_solution", best_solution_panel),
@@ -248,7 +250,7 @@ def execute_optimization(
248
250
  )
249
251
 
250
252
  # Run evaluation on the initial solution
251
- term_out = run_evaluation(eval_command=eval_command)
253
+ term_out = run_evaluation(eval_command=eval_command, timeout=eval_timeout)
252
254
  # Update the evaluation output panel
253
255
  eval_output_panel.update(output=term_out)
254
256
  smooth_update(
@@ -265,7 +267,7 @@ def execute_optimization(
265
267
  if run_id:
266
268
  try:
267
269
  current_status_response = get_optimization_run_status(
268
- run_id=run_id, include_history=False, timeout=30, auth_headers=auth_headers
270
+ console=console, run_id=run_id, include_history=False, timeout=(10, 30), auth_headers=auth_headers
269
271
  )
270
272
  current_run_status_val = current_status_response.get("status")
271
273
  if current_run_status_val == "stopping":
@@ -273,30 +275,31 @@ def execute_optimization(
273
275
  user_stop_requested_flag = True
274
276
  break
275
277
  except requests.exceptions.RequestException as e:
276
- console.print(f"\n[bold red]Warning: Could not check run status: {e}. Continuing optimization...[/]")
278
+ console.print(f"\n[bold red]Warning: Unable to check run status: {e}. Continuing optimization...[/]")
277
279
  except Exception as e:
278
280
  console.print(f"\n[bold red]Warning: Error checking run status: {e}. Continuing optimization...[/]")
279
281
 
280
282
  # Send feedback and get next suggestion
281
283
  eval_and_next_solution_response = evaluate_feedback_then_suggest_next_solution(
284
+ console=console,
282
285
  run_id=run_id,
283
286
  execution_output=term_out,
284
287
  additional_instructions=current_additional_instructions,
285
288
  api_keys=llm_api_keys,
286
289
  auth_headers=auth_headers,
287
- timeout=timeout,
290
+ timeout=api_timeout,
288
291
  )
289
292
  # Save next solution (.runs/<run-id>/step_<step>.<extension>)
290
293
  write_to_path(fp=runs_dir / f"step_{step}{source_fp.suffix}", content=eval_and_next_solution_response["code"])
291
294
  # Write the next solution to the source file
292
295
  write_to_path(fp=source_fp, content=eval_and_next_solution_response["code"])
293
296
  status_response = get_optimization_run_status(
294
- run_id=run_id, include_history=True, timeout=timeout, auth_headers=auth_headers
297
+ console=console, run_id=run_id, include_history=True, timeout=api_timeout, auth_headers=auth_headers
295
298
  )
296
299
  # Update the step of the progress bar, token counts, plan and metric tree
297
300
  summary_panel.set_step(step=step)
298
301
  summary_panel.update_token_counts(usage=eval_and_next_solution_response["usage"])
299
- plan_panel.update(plan=eval_and_next_solution_response["plan"])
302
+ summary_panel.update_thinking(thinking=eval_and_next_solution_response["plan"])
300
303
 
301
304
  nodes_list_from_status = status_response.get("nodes")
302
305
  tree_panel.build_metric_tree(nodes=nodes_list_from_status if nodes_list_from_status is not None else [])
@@ -327,7 +330,9 @@ def execute_optimization(
327
330
  is_buggy=node_data["is_buggy"],
328
331
  )
329
332
  if current_solution_node is None:
330
- raise ValueError("Current solution node not found in nodes list from status response")
333
+ raise ValueError(
334
+ "Current solution node not found in the optimization status response. This may indicate a synchronization issue with the backend."
335
+ )
331
336
 
332
337
  # Update the solution panels with the current and best solution
333
338
  solution_panels.update(current_node=current_solution_node, best_node=best_solution_node)
@@ -339,7 +344,6 @@ def execute_optimization(
339
344
  layout=layout,
340
345
  sections_to_update=[
341
346
  ("summary", summary_panel.get_display()),
342
- ("plan", plan_panel.get_display()),
343
347
  ("tree", tree_panel.get_display(is_done=False)),
344
348
  ("current_solution", current_solution_panel),
345
349
  ("best_solution", best_solution_panel),
@@ -347,7 +351,7 @@ def execute_optimization(
347
351
  ],
348
352
  transition_delay=0.08, # Slightly longer delay for more noticeable transitions
349
353
  )
350
- term_out = run_evaluation(eval_command=eval_command)
354
+ term_out = run_evaluation(eval_command=eval_command, timeout=eval_timeout)
351
355
  eval_output_panel.update(output=term_out)
352
356
  smooth_update(
353
357
  live=live,
@@ -361,17 +365,18 @@ def execute_optimization(
361
365
  current_additional_instructions = read_additional_instructions(additional_instructions=additional_instructions)
362
366
  # Evaluate the final solution thats been generated
363
367
  eval_and_next_solution_response = evaluate_feedback_then_suggest_next_solution(
368
+ console=console,
364
369
  run_id=run_id,
365
370
  execution_output=term_out,
366
371
  additional_instructions=current_additional_instructions,
367
372
  api_keys=llm_api_keys,
368
- timeout=timeout,
373
+ timeout=api_timeout,
369
374
  auth_headers=auth_headers,
370
375
  )
371
376
  summary_panel.set_step(step=steps)
372
377
  summary_panel.update_token_counts(usage=eval_and_next_solution_response["usage"])
373
378
  status_response = get_optimization_run_status(
374
- run_id=run_id, include_history=True, timeout=timeout, auth_headers=auth_headers
379
+ console=console, run_id=run_id, include_history=True, timeout=api_timeout, auth_headers=auth_headers
375
380
  )
376
381
  # No need to update the plan panel since we have finished the optimization
377
382
  # Get the optimization run status for
weco/panels.py CHANGED
@@ -7,30 +7,40 @@ from rich.syntax import Syntax
7
7
  from rich import box
8
8
  from typing import Dict, List, Optional, Union, Tuple
9
9
  from .utils import format_number
10
- import pathlib
10
+ from pathlib import Path
11
11
  from .__init__ import __dashboard_url__
12
12
 
13
13
 
14
14
  class SummaryPanel:
15
15
  """Holds a summary of the optimization run."""
16
16
 
17
- def __init__(self, maximize: bool, metric_name: str, total_steps: int, model: str, runs_dir: str, run_id: str = None):
17
+ def __init__(
18
+ self,
19
+ maximize: bool,
20
+ metric_name: str,
21
+ total_steps: int,
22
+ model: str,
23
+ runs_dir: str,
24
+ run_id: str = None,
25
+ run_name: str = None,
26
+ ):
18
27
  self.maximize = maximize
19
28
  self.metric_name = metric_name
20
- self.goal = ("Maximizing" if self.maximize else "Minimizing") + f" {self.metric_name}..."
21
29
  self.total_input_tokens = 0
22
30
  self.total_output_tokens = 0
23
31
  self.total_steps = total_steps
24
32
  self.model = model
25
33
  self.runs_dir = runs_dir
26
34
  self.run_id = run_id if run_id is not None else "N/A"
35
+ self.run_name = run_name if run_name is not None else "N/A"
27
36
  self.dashboard_url = "N/A"
37
+ self.thinking_content = ""
28
38
  self.progress = Progress(
29
39
  TextColumn("[progress.description]{task.description}"),
30
40
  BarColumn(bar_width=20),
31
41
  TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
32
42
  TextColumn("•"),
33
- TextColumn("[bold]{task.completed}/{task.total} Steps"),
43
+ TextColumn("[bold]{task.completed}/{task.total} Steps "),
34
44
  expand=False,
35
45
  )
36
46
  self.task_id = self.progress.add_task("", total=total_steps)
@@ -40,6 +50,10 @@ class SummaryPanel:
40
50
  self.run_id = run_id
41
51
  self.set_dashboard_url(run_id=run_id)
42
52
 
53
+ def set_run_name(self, run_name: str):
54
+ """Set the run name."""
55
+ self.run_name = run_name
56
+
43
57
  def set_dashboard_url(self, run_id: str):
44
58
  """Set the dashboard URL."""
45
59
  self.dashboard_url = f"{__dashboard_url__}/runs/{run_id}"
@@ -51,69 +65,100 @@ class SummaryPanel:
51
65
  def update_token_counts(self, usage: Dict[str, int]):
52
66
  """Update token counts from usage data."""
53
67
  if not isinstance(usage, dict) or "input_tokens" not in usage or "output_tokens" not in usage:
54
- raise ValueError("Invalid token usage response from API.")
68
+ raise ValueError("Invalid token usage data received.")
55
69
  self.total_input_tokens += usage["input_tokens"]
56
70
  self.total_output_tokens += usage["output_tokens"]
57
71
 
72
+ def update_thinking(self, thinking: str):
73
+ """Update the thinking content."""
74
+ self.thinking_content = thinking
75
+
76
+ def clear_thinking(self):
77
+ """Clear the thinking content."""
78
+ self.thinking_content = ""
79
+
58
80
  def get_display(self, final_message: Optional[str] = None) -> Panel:
59
- """Create a summary panel with the relevant information."""
60
- layout = Layout(name="summary")
61
- summary_table = Table(show_header=False, box=None, padding=(0, 1))
81
+ """Return a Rich panel summarising the current run."""
82
+ # ───────────────────── summary grid ──────────────────────
83
+ summary_table = Table.grid(expand=True, padding=(0, 1))
84
+ summary_table.add_column(ratio=1)
85
+ summary_table.add_column(justify="right")
86
+ summary_table.add_row("")
62
87
 
88
+ # Dashboard url
89
+ summary_table.add_row(f" Dashboard: [underline blue]{self.dashboard_url}[/]")
63
90
  summary_table.add_row("")
64
- # Goal
91
+
65
92
  if final_message is not None:
66
- summary_table.add_row(f"[bold cyan]Result:[/] {final_message}")
67
- else:
68
- summary_table.add_row(f"[bold cyan]Goal:[/] {self.goal}")
69
- summary_table.add_row("")
70
- # Model used
71
- summary_table.add_row(f"[bold cyan]Model:[/] {self.model}")
72
- summary_table.add_row("")
73
- # Log directory
74
- summary_table.add_row(f"[bold cyan]Logs:[/] [blue underline]{self.runs_dir}/{self.run_id}[/]")
75
- summary_table.add_row("")
76
- # Dashboard link
77
- summary_table.add_row(f"[bold cyan]Dashboard:[/] [blue underline]{self.dashboard_url}[/]")
78
- summary_table.add_row("")
79
- # Token counts
80
- summary_table.add_row(
81
- f"[bold cyan]Tokens:[/] ↑[yellow]{format_number(self.total_input_tokens)}[/] ↓[yellow]{format_number(self.total_output_tokens)}[/] = [green]{format_number(self.total_input_tokens + self.total_output_tokens)}[/]"
93
+ # Add the final message
94
+ summary_table.add_row(f"[bold cyan] Result:[/] {final_message}", "")
95
+ summary_table.add_row("")
96
+
97
+ # Token info
98
+ token_info = (
99
+ f"[bold cyan] {self.model}:[/] "
100
+ f"↑[yellow]{format_number(self.total_input_tokens)}[/] "
101
+ f"[yellow]{format_number(self.total_output_tokens)}[/] = "
102
+ f"[green]{format_number(self.total_input_tokens + self.total_output_tokens)} Tokens[/]"
82
103
  )
104
+ summary_table.add_row(token_info)
83
105
  summary_table.add_row("")
106
+
84
107
  # Progress bar
85
108
  summary_table.add_row(self.progress)
109
+ summary_table.add_row("")
86
110
 
87
- # Update layout
88
- layout.update(summary_table)
89
-
90
- return Panel(layout, title="[bold]📊 Summary", border_style="magenta", expand=True, padding=(0, 1))
91
-
92
-
93
- class PlanPanel:
94
- """Displays the optimization plan with truncation for long plans."""
95
-
96
- def __init__(self):
97
- self.plan = ""
111
+ # Logs url
112
+ logs_url = Path(self.runs_dir) / self.run_id
113
+ summary_table.add_row(f" Logs: [underline blue]{logs_url}[/]")
114
+ summary_table.add_row("")
98
115
 
99
- def update(self, plan: str):
100
- """Update the plan text."""
101
- self.plan = plan
116
+ if final_message is not None:
117
+ # Don't include the thinking section
118
+ return Panel(
119
+ summary_table,
120
+ title=f"[bold]📊 {'Maximizing' if self.maximize else 'Minimizing'} {self.run_name}",
121
+ border_style="magenta",
122
+ expand=True,
123
+ padding=(0, 1),
124
+ )
102
125
 
103
- def clear(self):
104
- """Clear the plan text."""
105
- self.plan = ""
126
+ # Include the thinking section
127
+ layout = Layout(name="summary")
128
+ layout.split_column(
129
+ Layout(summary_table, name="main_summary", ratio=1),
130
+ Layout(
131
+ Panel(
132
+ self.thinking_content or "[dim]No thinking content yet...[/]",
133
+ title="[bold]📝 Thinking...",
134
+ border_style="cyan",
135
+ expand=True,
136
+ padding=(0, 1),
137
+ ),
138
+ name="thinking_section",
139
+ ratio=1,
140
+ ),
141
+ )
106
142
 
107
- def get_display(self) -> Panel:
108
- """Create a panel displaying the plan with truncation if needed."""
109
- return Panel(self.plan, title="[bold]📝 Thinking...", border_style="cyan", expand=True, padding=(0, 1))
143
+ return Panel(
144
+ layout,
145
+ title=f"[bold]📊 {'Maximizing' if self.maximize else 'Minimizing'} {self.run_name}",
146
+ border_style="magenta",
147
+ expand=True,
148
+ padding=(0, 1),
149
+ )
110
150
 
111
151
 
112
152
  class Node:
113
153
  """Represents a node in the solution tree."""
114
154
 
115
155
  def __init__(
116
- self, id: str, parent_id: Union[str, None], code: Union[str, None], metric: Union[float, None], is_buggy: bool
156
+ self,
157
+ id: str,
158
+ parent_id: Union[str, None],
159
+ code: Union[str, None],
160
+ metric: Union[float, None],
161
+ is_buggy: Union[bool, None],
117
162
  ):
118
163
  self.id = id
119
164
  self.parent_id = parent_id
@@ -144,12 +189,15 @@ class MetricTree:
144
189
  # Add node to node's parent's children
145
190
  if node.parent_id is not None:
146
191
  if node.parent_id not in self.nodes:
147
- raise ValueError("Could not construct tree: parent node not found.")
192
+ raise ValueError("Cannot construct optimization tree.")
148
193
  self.nodes[node.parent_id].children.append(node)
149
194
 
150
- def get_draft_nodes(self) -> List[Node]:
151
- """Get all draft nodes from the tree."""
152
- return [node for node in self.nodes.values() if node.parent_id is None]
195
+ def get_root_node(self) -> Node:
196
+ """Get the root node from the tree."""
197
+ nodes = [node for node in self.nodes.values() if node.parent_id is None]
198
+ if len(nodes) != 1:
199
+ raise ValueError("Cannot construct optimization tree.")
200
+ return nodes[0]
153
201
 
154
202
  def get_best_node(self) -> Optional[Node]:
155
203
  """Get the best node from the tree."""
@@ -157,7 +205,8 @@ class MetricTree:
157
205
  node
158
206
  for node in self.nodes.values()
159
207
  if node.evaluated # evaluated
160
- and not node.is_buggy # not buggy
208
+ and node.is_buggy
209
+ is False # not buggy => is_buggy can exist in 3 states: None (solution has not yet been evaluated for bugs), True (solution has bug), False (solution does not have a bug)
161
210
  and node.metric is not None # has metric
162
211
  ]
163
212
  if len(measured_nodes) == 0:
@@ -247,8 +296,8 @@ class MetricTreePanel:
247
296
  append_rec(child, subtree)
248
297
 
249
298
  tree = Tree("", hide_root=True)
250
- for n in self.metric_tree.get_draft_nodes():
251
- append_rec(n, tree)
299
+ root_node = self.metric_tree.get_root_node()
300
+ append_rec(node=root_node, tree=tree)
252
301
 
253
302
  return tree
254
303
 
@@ -286,7 +335,7 @@ class EvaluationOutputPanel:
286
335
  class SolutionPanels:
287
336
  """Displays the current and best solutions side by side."""
288
337
 
289
- def __init__(self, metric_name: str, source_fp: pathlib.Path):
338
+ def __init__(self, metric_name: str, source_fp: Path):
290
339
  # Current solution
291
340
  self.current_node = None
292
341
  # Best solution
@@ -296,7 +345,7 @@ class SolutionPanels:
296
345
  # Determine the lexer for the source file
297
346
  self.lexer = self._determine_lexer(source_fp)
298
347
 
299
- def _determine_lexer(self, source_fp: pathlib.Path) -> str:
348
+ def _determine_lexer(self, source_fp: Path) -> str:
300
349
  """Determine the lexer for the source file."""
301
350
  return Syntax.from_path(source_fp).lexer
302
351
 
@@ -346,10 +395,7 @@ def create_optimization_layout() -> Layout:
346
395
  )
347
396
 
348
397
  # Split the top section into left and right
349
- layout["top_section"].split_row(Layout(name="left_panels", ratio=1), Layout(name="tree", ratio=1))
350
-
351
- # Split the left panels into summary and thinking
352
- layout["left_panels"].split_column(Layout(name="summary", ratio=2), Layout(name="plan", ratio=1))
398
+ layout["top_section"].split_row(Layout(name="summary", ratio=1), Layout(name="tree", ratio=1))
353
399
 
354
400
  # Split the middle section into left and right
355
401
  layout["middle_section"].split_row(Layout(name="current_solution", ratio=1), Layout(name="best_solution", ratio=1))
weco/utils.py CHANGED
@@ -45,7 +45,7 @@ def determine_default_model(llm_api_keys: Dict[str, Any]) -> str:
45
45
  return "gemini-2.5-pro"
46
46
  else:
47
47
  raise ValueError(
48
- "No LLM API keys found in environment. Please set one of the following: OPENAI_API_KEY, ANTHROPIC_API_KEY, GEMINI_API_KEY."
48
+ "No LLM API keys found in environment variables. Please set one of the following: OPENAI_API_KEY, ANTHROPIC_API_KEY, or GEMINI_API_KEY based on your model of choice."
49
49
  )
50
50
 
51
51
 
@@ -84,7 +84,7 @@ def write_to_path(fp: pathlib.Path, content: Union[str, Dict[str, Any]], is_json
84
84
  elif isinstance(content, str):
85
85
  f.write(content)
86
86
  else:
87
- raise TypeError("Content must be str or Dict[str, Any]")
87
+ raise TypeError("Error writing to file. Please verify the file path and try again.")
88
88
 
89
89
 
90
90
  # Visualization helper functions
@@ -124,19 +124,54 @@ def smooth_update(
124
124
 
125
125
 
126
126
  # Other helper functions
127
- def run_evaluation(eval_command: str) -> str:
127
+ DEFAULT_MAX_LINES = 50
128
+ DEFAULT_MAX_CHARS = 5000
129
+
130
+
131
+ def truncate_output(output: str, max_lines: int = DEFAULT_MAX_LINES, max_chars: int = DEFAULT_MAX_CHARS) -> str:
132
+ """Truncate the output to a reasonable size."""
133
+ lines = output.splitlines()
134
+
135
+ # Determine what truncations are needed based on original output
136
+ lines_truncated = len(lines) > max_lines
137
+ chars_truncated = len(output) > max_chars
138
+
139
+ # Apply truncations to the original output
140
+ if lines_truncated:
141
+ output = "\n".join(lines[-max_lines:])
142
+
143
+ if chars_truncated:
144
+ output = output[-max_chars:]
145
+
146
+ # Add prefixes for truncations that were applied
147
+ prefixes = []
148
+ if lines_truncated:
149
+ prefixes.append(f"truncated to last {max_lines} lines")
150
+ if chars_truncated:
151
+ prefixes.append(f"truncated to last {max_chars} characters")
152
+
153
+ if prefixes:
154
+ prefix_text = ", ".join(prefixes)
155
+ output = f"... ({prefix_text})\n{output}"
156
+
157
+ return output
158
+
159
+
160
+ def run_evaluation(eval_command: str, timeout: int | None = None) -> str:
128
161
  """Run the evaluation command on the code and return the output."""
129
162
 
130
163
  # Run the eval command as is
131
- result = subprocess.run(eval_command, shell=True, capture_output=True, text=True, check=False)
132
-
133
- # Combine stdout and stderr for complete output
134
- output = result.stderr if result.stderr else ""
135
- if result.stdout:
136
- if len(output) > 0:
137
- output += "\n"
138
- output += result.stdout
139
- return output
164
+ try:
165
+ result = subprocess.run(eval_command, shell=True, capture_output=True, text=True, check=False, timeout=timeout)
166
+ # Combine stdout and stderr for complete output
167
+ output = result.stderr if result.stderr else ""
168
+ if result.stdout:
169
+ if len(output) > 0:
170
+ output += "\n"
171
+ output += result.stdout
172
+ return truncate_output(output)
173
+ except subprocess.TimeoutExpired:
174
+ return f"Evaluation timed out after {'an unspecified duration' if timeout is None else f'{timeout} seconds'}."
140
175
 
141
176
 
142
177
  # Update Check Function
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: weco
3
- Version: 0.2.23
3
+ Version: 0.2.25
4
4
  Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
5
5
  Author-email: Weco AI Team <contact@weco.ai>
6
6
  License: MIT
@@ -16,6 +16,8 @@ Requires-Dist: requests
16
16
  Requires-Dist: rich
17
17
  Requires-Dist: packaging
18
18
  Requires-Dist: gitingest
19
+ Requires-Dist: fastapi
20
+ Requires-Dist: slowapi
19
21
  Provides-Extra: dev
20
22
  Requires-Dist: ruff; extra == "dev"
21
23
  Requires-Dist: build; extra == "dev"
@@ -158,6 +160,7 @@ For more advanced examples, including [Triton](/examples/triton/README.md), [CUD
158
160
  | `-M, --model` | Model identifier for the LLM to use (e.g., `o4-mini`, `claude-sonnet-4-0`). | `o4-mini` when `OPENAI_API_KEY` is set; `claude-sonnet-4-0` when `ANTHROPIC_API_KEY` is set; `gemini-2.5-pro` when `GEMINI_API_KEY` is set. | `-M o4-mini` |
159
161
  | `-i, --additional-instructions`| Natural language description of specific instructions **or** path to a file containing detailed instructions to guide the LLM. | `None` | `-i instructions.md` or `-i "Optimize the model for faster inference"`|
160
162
  | `-l, --log-dir` | Path to the directory to log intermediate steps and final optimization result. | `.runs/` | `-l ./logs/` |
163
+ | `--eval-timeout` | Timeout in seconds for each step in evaluation. | No timeout (unlimited) | `--eval-timeout 3600` |
161
164
 
162
165
  ---
163
166
 
@@ -248,28 +251,38 @@ Final speedup value = 1.5
248
251
 
249
252
  Weco will parse this output to extract the numerical value (1.5 in this case) associated with the metric name ('speedup').
250
253
 
251
- ## Contributing
254
+ ## Supported Models
252
255
 
253
- We welcome your contributions! To get started:
256
+ Weco supports the following LLM models:
254
257
 
255
- 1. **Fork & Clone the Repository:**
256
- ```bash
257
- git clone https://github.com/WecoAI/weco-cli.git
258
- cd weco-cli
259
- ```
258
+ ### OpenAI Models
259
+ - `o3`
260
+ - `o3-mini`
261
+ - `o4-mini`
262
+ - `o1-pro`
263
+ - `o1`
264
+ - `gpt-4.1`
265
+ - `gpt-4.1-mini`
266
+ - `gpt-4.1-nano`
267
+ - `gpt-4o`
268
+ - `gpt-4o-mini`
260
269
 
261
- 2. **Install Dependencies:**
262
- ```bash
263
- pip install -e ".[dev]"
264
- ```
270
+ ### Anthropic Models
271
+ - `claude-opus-4-0`
272
+ - `claude-sonnet-4-0`
273
+ - `claude-3-7-sonnet-latest`
265
274
 
266
- 3. **Create a Feature Branch:**
267
- ```bash
268
- git checkout -b feature/your-feature-name
269
- ```
275
+ ### Gemini Models
276
+ - `gemini-2.5-pro`
277
+ - `gemini-2.5-flash`
278
+ - `gemini-2.5-flash-lite`
279
+
280
+ You can specify any of these models using the `-M` or `--model` flag. Ensure you have the corresponding API key set as an environment variable for the model provider you wish to use.
270
281
 
271
- 4. **Make Changes:** Ensure your code adheres to our style guidelines and includes relevant tests.
282
+ ---
283
+
284
+ ## Contributing
272
285
 
273
- 5. **Commit, Push & Open a PR**: Commit your changes, and open a pull request with a clear description of your enhancements.
286
+ We welcome contributions! Please see [contributing.md](contributing.md) for detailed guidelines on how to contribute to this project.
274
287
 
275
288
  ---
@@ -0,0 +1,15 @@
1
+ weco/__init__.py,sha256=ClO0uT6GKOA0iSptvP0xbtdycf0VpoPTq37jHtvlhtw,303
2
+ weco/api.py,sha256=saGYG-DEgexw-ykpc86h6_b-FvC0vES4QwnJaUt3dwc,13205
3
+ weco/auth.py,sha256=KMSAsN1V5wx7KUsYL1cEOOiG29Pqf4Exb3EPW4mAWC0,10003
4
+ weco/chatbot.py,sha256=MSG3yybZ0fHTpwW3If9TpOWOAE4EqrcUWpe3q16AjZE,37673
5
+ weco/cli.py,sha256=75JdYpUf0qdJW5pjycZoHUKA2n2MYj33qYM9WQwLT2s,8184
6
+ weco/constants.py,sha256=vfQGDf9_kzlN9BzEFvMsd0EeXOsRyzpvSWyxOJgRauE,168
7
+ weco/optimizer.py,sha256=E5Jii0rTdI9pxkstaM7ipjF4viX5XnSy5w71gdps4Ws,23662
8
+ weco/panels.py,sha256=jwAV_uoa0ZI9vjyey-hSY3rx4pfNNkZvPzqt-iz-RXo,16808
9
+ weco/utils.py,sha256=HecbOqD5rBuVhUkLixVrTWBMJ-ZMAhK-889N-lCk3dQ,7335
10
+ weco-0.2.25.dist-info/licenses/LICENSE,sha256=p_GQqJBvuZgkLNboYKyH-5dhpTDlKs2wq2TVM55WrWE,1065
11
+ weco-0.2.25.dist-info/METADATA,sha256=Sf4up2AoxvFs0ByA00vKacOxEHm8peZaSdtGdKLgHn8,16069
12
+ weco-0.2.25.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
+ weco-0.2.25.dist-info/entry_points.txt,sha256=ixJ2uClALbCpBvnIR6BXMNck8SHAab8eVkM9pIUowcs,39
14
+ weco-0.2.25.dist-info/top_level.txt,sha256=F0N7v6e2zBSlsorFv-arAq2yDxQbzX3KVO8GxYhPUeE,5
15
+ weco-0.2.25.dist-info/RECORD,,
@@ -1,14 +0,0 @@
1
- weco/__init__.py,sha256=ClO0uT6GKOA0iSptvP0xbtdycf0VpoPTq37jHtvlhtw,303
2
- weco/api.py,sha256=hqAyMHGQhJr8BZTUo0jznfO0ossGU3ehEZZehPu6Mu8,11838
3
- weco/auth.py,sha256=6bDQv07sx7uxA9CrN3HqUdCHV6nqXO41PGCicquvB00,9919
4
- weco/chatbot.py,sha256=H6d5yK9MB3pqpE7XVh_HAi1YAmxKy0v_xdozVSlKPCc,36959
5
- weco/cli.py,sha256=Jy7kQEsNKdV7Wds9Z0DIWBeLpEVyssIiOBiQ4zCl3Lw,7862
6
- weco/optimizer.py,sha256=z86-js_rvLMv3J8zCqvtc1xJC0EA0WqrN9_BlmX2RK4,23259
7
- weco/panels.py,sha256=Cnro4Q65n7GGh0FBXuB_OGSxRVobd4k5lOuBViTQaaM,15591
8
- weco/utils.py,sha256=5Pbhv_5wbTRv93Ws7aJfIOtcxeeqNrDRT3bV6YFOdgM,6032
9
- weco-0.2.23.dist-info/licenses/LICENSE,sha256=p_GQqJBvuZgkLNboYKyH-5dhpTDlKs2wq2TVM55WrWE,1065
10
- weco-0.2.23.dist-info/METADATA,sha256=E_u4w-WaRzBVYrlEzraepi0y7E1Cc1zHOhBu4Cp01TI,15445
11
- weco-0.2.23.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
- weco-0.2.23.dist-info/entry_points.txt,sha256=ixJ2uClALbCpBvnIR6BXMNck8SHAab8eVkM9pIUowcs,39
13
- weco-0.2.23.dist-info/top_level.txt,sha256=F0N7v6e2zBSlsorFv-arAq2yDxQbzX3KVO8GxYhPUeE,5
14
- weco-0.2.23.dist-info/RECORD,,
File without changes