weco 0.2.27__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
weco/api.py CHANGED
@@ -4,22 +4,54 @@ import requests
4
4
  from rich.console import Console
5
5
 
6
6
  from weco import __pkg_version__, __base_url__
7
- from .constants import DEFAULT_API_TIMEOUT
8
- from .utils import truncate_output
7
+ from .constants import CODEGEN_API_TIMEOUT, STATUS_API_TIMEOUT
8
+ from .utils import truncate_output, determine_model_for_onboarding
9
9
 
10
10
 
11
11
  def handle_api_error(e: requests.exceptions.HTTPError, console: Console) -> None:
12
12
  """Extract and display error messages from API responses in a structured format."""
13
+ status = getattr(e.response, "status_code", None)
13
14
  try:
14
- detail = e.response.json()["detail"]
15
- except (ValueError, KeyError): # Handle cases where response is not JSON or detail key is missing
16
- detail = f"HTTP {e.response.status_code} Error: {e.response.text}"
17
- console.print(f"[bold red]{detail}[/]")
15
+ payload = e.response.json()
16
+ detail = payload.get("detail", payload)
17
+ except (ValueError, AttributeError):
18
+ detail = getattr(e.response, "text", "") or f"HTTP {status} Error"
19
+
20
+ def _render(detail_obj: Any) -> None:
21
+ if isinstance(detail_obj, str):
22
+ console.print(f"[bold red]{detail_obj}[/]")
23
+ elif isinstance(detail_obj, dict):
24
+ # Try common message keys in order of preference
25
+ message_keys = ("message", "error", "msg", "detail")
26
+ message = next((detail_obj.get(key) for key in message_keys if detail_obj.get(key)), None)
27
+ suggestion = detail_obj.get("suggestion")
28
+ if message:
29
+ console.print(f"[bold red]{message}[/]")
30
+ else:
31
+ console.print(f"[bold red]HTTP {status} Error[/]")
32
+ if suggestion:
33
+ console.print(f"[yellow]{suggestion}[/]")
34
+ extras = {
35
+ k: v
36
+ for k, v in detail_obj.items()
37
+ if k not in {"message", "error", "msg", "detail", "suggestion"} and v not in (None, "")
38
+ }
39
+ for key, value in extras.items():
40
+ console.print(f"[dim]{key}: {value}[/]")
41
+ elif isinstance(detail_obj, list) and detail_obj:
42
+ _render(detail_obj[0])
43
+ for extra in detail_obj[1:]:
44
+ console.print(f"[yellow]{extra}[/]")
45
+ else:
46
+ console.print(f"[bold red]{detail_obj or f'HTTP {status} Error'}[/]")
47
+
48
+ _render(detail)
18
49
 
19
50
 
20
51
  def start_optimization_run(
21
52
  console: Console,
22
53
  source_code: str,
54
+ source_path: str,
23
55
  evaluation_command: str,
24
56
  metric_name: str,
25
57
  maximize: bool,
@@ -28,9 +60,11 @@ def start_optimization_run(
28
60
  evaluator_config: Dict[str, Any],
29
61
  search_policy_config: Dict[str, Any],
30
62
  additional_instructions: str = None,
31
- api_keys: Dict[str, Any] = {},
63
+ eval_timeout: Optional[int] = None,
64
+ save_logs: bool = False,
65
+ log_dir: str = ".runs",
32
66
  auth_headers: dict = {},
33
- timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
67
+ timeout: Union[int, Tuple[int, int]] = CODEGEN_API_TIMEOUT,
34
68
  ) -> Optional[Dict[str, Any]]:
35
69
  """Start the optimization run."""
36
70
  with console.status("[bold green]Starting Optimization..."):
@@ -39,6 +73,7 @@ def start_optimization_run(
39
73
  f"{__base_url__}/runs/",
40
74
  json={
41
75
  "source_code": source_code,
76
+ "source_path": source_path,
42
77
  "additional_instructions": additional_instructions,
43
78
  "objective": {"evaluation_command": evaluation_command, "metric_name": metric_name, "maximize": maximize},
44
79
  "optimizer": {
@@ -47,7 +82,10 @@ def start_optimization_run(
47
82
  "evaluator": evaluator_config,
48
83
  "search_policy": search_policy_config,
49
84
  },
50
- "metadata": {"client_name": "cli", "client_version": __pkg_version__, **api_keys},
85
+ "eval_timeout": eval_timeout,
86
+ "save_logs": save_logs,
87
+ "log_dir": log_dir,
88
+ "metadata": {"client_name": "cli", "client_version": __pkg_version__},
51
89
  },
52
90
  headers=auth_headers,
53
91
  timeout=timeout,
@@ -68,14 +106,37 @@ def start_optimization_run(
68
106
  return None
69
107
 
70
108
 
109
+ def resume_optimization_run(
110
+ console: Console, run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] = STATUS_API_TIMEOUT
111
+ ) -> Optional[Dict[str, Any]]:
112
+ """Request the backend to resume an interrupted run."""
113
+ with console.status("[bold green]Resuming run..."):
114
+ try:
115
+ response = requests.post(
116
+ f"{__base_url__}/runs/{run_id}/resume",
117
+ json={"metadata": {"client_name": "cli", "client_version": __pkg_version__}},
118
+ headers=auth_headers,
119
+ timeout=timeout,
120
+ )
121
+ response.raise_for_status()
122
+ result = response.json()
123
+ return result
124
+ except requests.exceptions.HTTPError as e:
125
+ handle_api_error(e, console)
126
+ return None
127
+ except Exception as e:
128
+ console.print(f"[bold red]Error resuming run: {e}[/]")
129
+ return None
130
+
131
+
71
132
  def evaluate_feedback_then_suggest_next_solution(
72
133
  console: Console,
73
134
  run_id: str,
135
+ step: int,
74
136
  execution_output: str,
75
137
  additional_instructions: str = None,
76
- api_keys: Dict[str, Any] = {},
77
138
  auth_headers: dict = {},
78
- timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
139
+ timeout: Union[int, Tuple[int, int]] = CODEGEN_API_TIMEOUT,
79
140
  ) -> Dict[str, Any]:
80
141
  """Evaluate the feedback and suggest the next solution."""
81
142
  try:
@@ -84,11 +145,7 @@ def evaluate_feedback_then_suggest_next_solution(
84
145
 
85
146
  response = requests.post(
86
147
  f"{__base_url__}/runs/{run_id}/suggest",
87
- json={
88
- "execution_output": truncated_output,
89
- "additional_instructions": additional_instructions,
90
- "metadata": {**api_keys},
91
- },
148
+ json={"execution_output": truncated_output, "additional_instructions": additional_instructions, "metadata": {}},
92
149
  headers=auth_headers,
93
150
  timeout=timeout,
94
151
  )
@@ -99,8 +156,44 @@ def evaluate_feedback_then_suggest_next_solution(
99
156
  result["plan"] = ""
100
157
  if result.get("code") is None:
101
158
  result["code"] = ""
102
-
103
159
  return result
160
+ except requests.exceptions.ReadTimeout as e:
161
+ # NOTE: This can occur when:
162
+ # 1. The server is busy and the request times out
163
+ # 2. When intermediaries drop the connection so even after the server responds,
164
+ # the client doesn't receive the response and times out
165
+
166
+ # Here we ONLY try to recover in the latter case i.e., server completed request but
167
+ # client didn't receive the response and timed out
168
+ run_status_recovery_response = get_optimization_run_status(
169
+ console=console, run_id=run_id, include_history=True, auth_headers=auth_headers
170
+ )
171
+ current_step = run_status_recovery_response.get("current_step")
172
+ current_status = run_status_recovery_response.get("status")
173
+ # The run should be "running" and the current step should correspond to the solution step we are attempting to generate
174
+ is_valid_run_state = current_status is not None and current_status == "running"
175
+ is_valid_step = current_step is not None and current_step == step
176
+ if is_valid_run_state and is_valid_step:
177
+ nodes = run_status_recovery_response.get("nodes") or []
178
+ # We need at least 2 nodes to reconstruct the expected response i.e., the last two nodes
179
+ if len(nodes) >= 2:
180
+ nodes_sorted_ascending = sorted(nodes, key=lambda n: n["step"])
181
+ latest_node = nodes_sorted_ascending[-1]
182
+ penultimate_node = nodes_sorted_ascending[-2]
183
+ # If the server finished generating the next candidate, it should be exactly this step
184
+ if latest_node and latest_node["step"] == step:
185
+ # Try to reconstruct the expected response from the /suggest endpoint using the run status info
186
+ reconstructed_expected_response = {
187
+ "run_id": run_id,
188
+ "previous_solution_metric_value": penultimate_node.get("metric_value"),
189
+ "solution_id": latest_node.get("solution_id"),
190
+ "code": latest_node.get("code"),
191
+ "plan": latest_node.get("plan"),
192
+ "is_done": False,
193
+ }
194
+ return reconstructed_expected_response
195
+ # If we couldn't recover, raise the timeout error so the run can be resumed by the user
196
+ raise requests.exceptions.ReadTimeout(e)
104
197
  except requests.exceptions.HTTPError as e:
105
198
  # Allow caller to handle suggest errors, maybe retry or terminate
106
199
  handle_api_error(e, console)
@@ -115,7 +208,7 @@ def get_optimization_run_status(
115
208
  run_id: str,
116
209
  include_history: bool = False,
117
210
  auth_headers: dict = {},
118
- timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
211
+ timeout: Union[int, Tuple[int, int]] = STATUS_API_TIMEOUT,
119
212
  ) -> Dict[str, Any]:
120
213
  """Get the current status of the optimization run."""
121
214
  try:
@@ -186,43 +279,17 @@ def report_termination(
186
279
  return False
187
280
 
188
281
 
189
- # --- Chatbot API Functions ---
190
- def _determine_model_and_api_key() -> tuple[str, dict[str, str]]:
191
- """Determine the model and API key to use based on available environment variables.
192
-
193
- Uses the shared model selection logic to maintain consistency.
194
- Returns (model_name, api_key_dict)
195
- """
196
- from .utils import read_api_keys_from_env, determine_default_model
197
-
198
- llm_api_keys = read_api_keys_from_env()
199
- model = determine_default_model(llm_api_keys)
200
-
201
- # Create API key dictionary with only the key for the selected model
202
- if model == "o4-mini":
203
- api_key_dict = {"OPENAI_API_KEY": llm_api_keys["OPENAI_API_KEY"]}
204
- elif model == "claude-sonnet-4-0":
205
- api_key_dict = {"ANTHROPIC_API_KEY": llm_api_keys["ANTHROPIC_API_KEY"]}
206
- elif model == "gemini-2.5-pro":
207
- api_key_dict = {"GEMINI_API_KEY": llm_api_keys["GEMINI_API_KEY"]}
208
- else:
209
- # This should never happen if determine_default_model works correctly
210
- raise ValueError(f"Unknown default model choice: {model}")
211
-
212
- return model, api_key_dict
213
-
214
-
215
282
  def get_optimization_suggestions_from_codebase(
216
283
  console: Console,
217
284
  gitingest_summary: str,
218
285
  gitingest_tree: str,
219
286
  gitingest_content_str: str,
220
287
  auth_headers: dict = {},
221
- timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
288
+ timeout: Union[int, Tuple[int, int]] = CODEGEN_API_TIMEOUT,
222
289
  ) -> Optional[List[Dict[str, Any]]]:
223
290
  """Analyze codebase and get optimization suggestions using the model-agnostic backend API."""
224
291
  try:
225
- model, api_key_dict = _determine_model_and_api_key()
292
+ model = determine_model_for_onboarding()
226
293
  response = requests.post(
227
294
  f"{__base_url__}/onboard/analyze-codebase",
228
295
  json={
@@ -230,7 +297,7 @@ def get_optimization_suggestions_from_codebase(
230
297
  "gitingest_tree": gitingest_tree,
231
298
  "gitingest_content": gitingest_content_str,
232
299
  "model": model,
233
- "metadata": api_key_dict,
300
+ "metadata": {},
234
301
  },
235
302
  headers=auth_headers,
236
303
  timeout=timeout,
@@ -253,11 +320,11 @@ def generate_evaluation_script_and_metrics(
253
320
  description: str,
254
321
  gitingest_content_str: str,
255
322
  auth_headers: dict = {},
256
- timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
323
+ timeout: Union[int, Tuple[int, int]] = CODEGEN_API_TIMEOUT,
257
324
  ) -> Tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:
258
325
  """Generate evaluation script and determine metrics using the model-agnostic backend API."""
259
326
  try:
260
- model, api_key_dict = _determine_model_and_api_key()
327
+ model = determine_model_for_onboarding()
261
328
  response = requests.post(
262
329
  f"{__base_url__}/onboard/generate-script",
263
330
  json={
@@ -265,7 +332,7 @@ def generate_evaluation_script_and_metrics(
265
332
  "description": description,
266
333
  "gitingest_content": gitingest_content_str,
267
334
  "model": model,
268
- "metadata": api_key_dict,
335
+ "metadata": {},
269
336
  },
270
337
  headers=auth_headers,
271
338
  timeout=timeout,
@@ -289,11 +356,11 @@ def analyze_evaluation_environment(
289
356
  gitingest_tree: str,
290
357
  gitingest_content_str: str,
291
358
  auth_headers: dict = {},
292
- timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
359
+ timeout: Union[int, Tuple[int, int]] = CODEGEN_API_TIMEOUT,
293
360
  ) -> Optional[Dict[str, Any]]:
294
361
  """Analyze existing evaluation scripts and environment using the model-agnostic backend API."""
295
362
  try:
296
- model, api_key_dict = _determine_model_and_api_key()
363
+ model = determine_model_for_onboarding()
297
364
  response = requests.post(
298
365
  f"{__base_url__}/onboard/analyze-environment",
299
366
  json={
@@ -303,7 +370,7 @@ def analyze_evaluation_environment(
303
370
  "gitingest_tree": gitingest_tree,
304
371
  "gitingest_content": gitingest_content_str,
305
372
  "model": model,
306
- "metadata": api_key_dict,
373
+ "metadata": {},
307
374
  },
308
375
  headers=auth_headers,
309
376
  timeout=timeout,
@@ -325,11 +392,11 @@ def analyze_script_execution_requirements(
325
392
  script_path: str,
326
393
  target_file: str,
327
394
  auth_headers: dict = {},
328
- timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
395
+ timeout: Union[int, Tuple[int, int]] = CODEGEN_API_TIMEOUT,
329
396
  ) -> Optional[str]:
330
397
  """Analyze script to determine proper execution command using the model-agnostic backend API."""
331
398
  try:
332
- model, api_key_dict = _determine_model_and_api_key()
399
+ model = determine_model_for_onboarding()
333
400
  response = requests.post(
334
401
  f"{__base_url__}/onboard/analyze-script",
335
402
  json={
@@ -337,7 +404,7 @@ def analyze_script_execution_requirements(
337
404
  "script_path": script_path,
338
405
  "target_file": target_file,
339
406
  "model": model,
340
- "metadata": api_key_dict,
407
+ "metadata": {},
341
408
  },
342
409
  headers=auth_headers,
343
410
  timeout=timeout,
weco/auth.py CHANGED
@@ -184,9 +184,9 @@ def perform_login(console: Console):
184
184
  return False
185
185
 
186
186
 
187
- def handle_authentication(console: Console, llm_api_keys: dict) -> tuple[str | None, dict]:
187
+ def handle_authentication(console: Console) -> tuple[str | None, dict]:
188
188
  """
189
- Handle the complete authentication flow.
189
+ Handle the complete authentication flow. Authentication is now mandatory.
190
190
 
191
191
  Returns:
192
192
  tuple: (weco_api_key, auth_headers)
@@ -194,13 +194,16 @@ def handle_authentication(console: Console, llm_api_keys: dict) -> tuple[str | N
194
194
  weco_api_key = load_weco_api_key()
195
195
 
196
196
  if not weco_api_key:
197
+ console.print("[bold yellow]Authentication Required[/]")
198
+ console.print("With our new credit-based billing system, authentication is required to use Weco.")
199
+ console.print("You'll receive free credits to get started!")
200
+ console.print("")
201
+
197
202
  login_choice = Prompt.ask(
198
- "Log in to Weco to save run history or use anonymously? ([bold]L[/]ogin / [bold]S[/]kip)",
199
- choices=["l", "s"],
200
- default="s",
203
+ "Would you like to log in now? ([bold]Y[/]es / [bold]N[/]o)", choices=["y", "n"], default="y"
201
204
  ).lower()
202
205
 
203
- if login_choice == "l":
206
+ if login_choice == "y":
204
207
  console.print("[cyan]Starting login process...[/]")
205
208
  if not perform_login(console):
206
209
  console.print("[bold red]Login process failed or was cancelled.[/]")
@@ -210,14 +213,9 @@ def handle_authentication(console: Console, llm_api_keys: dict) -> tuple[str | N
210
213
  if not weco_api_key:
211
214
  console.print("[bold red]Error: Login completed but failed to retrieve API key.[/]")
212
215
  return None, {}
213
-
214
- elif login_choice == "s":
215
- console.print("[yellow]Proceeding anonymously. LLM API keys must be provided via environment variables.[/]")
216
- if not llm_api_keys:
217
- console.print(
218
- "[bold red]Error:[/] No LLM API keys found in environment (e.g., OPENAI_API_KEY). Cannot proceed anonymously."
219
- )
220
- return None, {}
216
+ else:
217
+ console.print("[yellow]Authentication is required to use Weco. Please run 'weco' again when ready to log in.[/]")
218
+ return None, {}
221
219
 
222
220
  # Build auth headers
223
221
  auth_headers = {}
weco/chatbot.py CHANGED
@@ -220,6 +220,7 @@ class Chatbot:
220
220
  gitingest_summary=self.gitingest_summary,
221
221
  gitingest_tree=self.gitingest_tree,
222
222
  gitingest_content_str=self.gitingest_content_str,
223
+ auth_headers=getattr(self, "auth_headers", {}),
223
224
  )
224
225
 
225
226
  if result and isinstance(result, list):
@@ -332,6 +333,7 @@ class Chatbot:
332
333
  target_file=selected_option["target_file"],
333
334
  description=selected_option["description"],
334
335
  gitingest_content_str=self.gitingest_content_str,
336
+ auth_headers=getattr(self, "auth_headers", {}),
335
337
  )
336
338
  if result and result[0]:
337
339
  eval_script_content, metric_name, goal, reasoning = result
@@ -381,6 +383,7 @@ class Chatbot:
381
383
  script_content=eval_script_content,
382
384
  script_path=eval_script_path_str,
383
385
  target_file=selected_option["target_file"],
386
+ auth_headers=getattr(self, "auth_headers", {}),
384
387
  )
385
388
 
386
389
  return {
@@ -401,6 +404,7 @@ class Chatbot:
401
404
  gitingest_summary=self.gitingest_summary,
402
405
  gitingest_tree=self.gitingest_tree,
403
406
  gitingest_content_str=self.gitingest_content_str,
407
+ auth_headers=getattr(self, "auth_headers", {}),
404
408
  )
405
409
 
406
410
  if not analysis:
@@ -542,6 +546,7 @@ class Chatbot:
542
546
  script_content=script_content,
543
547
  script_path=script_path,
544
548
  target_file=selected_option["target_file"],
549
+ auth_headers=getattr(self, "auth_headers", {}),
545
550
  )
546
551
 
547
552
  self.current_step = "confirmation"
@@ -749,10 +754,9 @@ class Chatbot:
749
754
  self.resolved_model = self.user_specified_model
750
755
  else:
751
756
  # Use same default model selection as weco run
752
- from .utils import determine_default_model, read_api_keys_from_env
757
+ from .utils import determine_model_for_onboarding
753
758
 
754
- llm_api_keys = read_api_keys_from_env()
755
- self.resolved_model = determine_default_model(llm_api_keys)
759
+ self.resolved_model = determine_model_for_onboarding()
756
760
 
757
761
  target_file = selected_option["target_file"]
758
762
  additional_instructions = selected_option["description"]
@@ -766,6 +770,17 @@ class Chatbot:
766
770
  self.console.print("[bold cyan]Welcome to Weco![/]")
767
771
  self.console.print(f"Let's optimize your codebase in: [cyan]{self.project_path}[/]\n")
768
772
 
773
+ # Mandatory authentication as per PLAN.md
774
+ from .auth import handle_authentication
775
+
776
+ weco_api_key, auth_headers = handle_authentication(self.console)
777
+ if not weco_api_key:
778
+ self.console.print("[yellow]Authentication is required to use Weco. Exiting...[/]")
779
+ return
780
+
781
+ # Store auth headers for API calls
782
+ self.auth_headers = auth_headers
783
+
769
784
  options = self.analyze_codebase_and_get_optimization_options()
770
785
  if not options:
771
786
  return
weco/cli.py CHANGED
@@ -74,6 +74,52 @@ def configure_run_parser(run_parser: argparse.ArgumentParser) -> None:
74
74
  )
75
75
 
76
76
 
77
+ def configure_credits_parser(credits_parser: argparse.ArgumentParser) -> None:
78
+ """Configure the credits command parser and all its subcommands."""
79
+ credits_subparsers = credits_parser.add_subparsers(dest="credits_command", help="Credit management commands")
80
+
81
+ # Credits balance command
82
+ _ = credits_subparsers.add_parser("balance", help="Check your current credit balance")
83
+
84
+ # Coerce CLI input into a float with two decimal precision for the API payload.
85
+ def _parse_credit_amount(value: str) -> float:
86
+ try:
87
+ amount = float(value)
88
+ except ValueError as exc:
89
+ raise argparse.ArgumentTypeError("Amount must be a number.") from exc
90
+
91
+ return round(amount, 2)
92
+
93
+ # Credits topup command
94
+ topup_parser = credits_subparsers.add_parser("topup", help="Purchase additional credits")
95
+ topup_parser.add_argument(
96
+ "amount",
97
+ nargs="?",
98
+ type=_parse_credit_amount,
99
+ default=_parse_credit_amount("10"),
100
+ metavar="CREDITS",
101
+ help="Amount of credits to purchase (minimum 2, defaults to 10)",
102
+ )
103
+
104
+ # Credits autotopup command
105
+ autotopup_parser = credits_subparsers.add_parser("autotopup", help="Configure automatic top-up")
106
+ autotopup_parser.add_argument("--enable", action="store_true", help="Enable automatic top-up")
107
+ autotopup_parser.add_argument("--disable", action="store_true", help="Disable automatic top-up")
108
+ autotopup_parser.add_argument(
109
+ "--threshold", type=float, default=4.0, help="Balance threshold to trigger auto top-up (default: 4.0 credits)"
110
+ )
111
+ autotopup_parser.add_argument(
112
+ "--amount", type=float, default=50.0, help="Amount to top up when threshold is reached (default: 50.0 credits)"
113
+ )
114
+
115
+
116
+ def configure_resume_parser(resume_parser: argparse.ArgumentParser) -> None:
117
+ """Configure arguments for the resume command."""
118
+ resume_parser.add_argument(
119
+ "run_id", type=str, help="The UUID of the run to resume (e.g., '0002e071-1b67-411f-a514-36947f0c4b31')"
120
+ )
121
+
122
+
77
123
  def execute_run_command(args: argparse.Namespace) -> None:
78
124
  """Execute the 'weco run' command with all its logic."""
79
125
  from .optimizer import execute_optimization
@@ -95,6 +141,14 @@ def execute_run_command(args: argparse.Namespace) -> None:
95
141
  sys.exit(exit_code)
96
142
 
97
143
 
144
+ def execute_resume_command(args: argparse.Namespace) -> None:
145
+ """Execute the 'weco resume' command with all its logic."""
146
+ from .optimizer import resume_optimization
147
+
148
+ success = resume_optimization(run_id=args.run_id, console=console)
149
+ sys.exit(0 if success else 1)
150
+
151
+
98
152
  def main() -> None:
99
153
  """Main function for the Weco CLI."""
100
154
  check_for_cli_updates()
@@ -126,6 +180,19 @@ def main() -> None:
126
180
  # --- Logout Command Parser Setup ---
127
181
  _ = subparsers.add_parser("logout", help="Log out from Weco and clear saved API key.")
128
182
 
183
+ # --- Credits Command Parser Setup ---
184
+ credits_parser = subparsers.add_parser("credits", help="Manage your Weco credits")
185
+ configure_credits_parser(credits_parser) # Use the helper to add subcommands and arguments
186
+
187
+ # --- Resume Command Parser Setup ---
188
+ resume_parser = subparsers.add_parser(
189
+ "resume",
190
+ help="Resume an interrupted optimization run",
191
+ formatter_class=argparse.RawDescriptionHelpFormatter,
192
+ allow_abbrev=False,
193
+ )
194
+ configure_resume_parser(resume_parser)
195
+
129
196
  # Check if we should run the chatbot
130
197
  # This logic needs to be robust. If 'run' or 'logout' is present, or -h/--help, don't run chatbot.
131
198
  # Otherwise, if it's just 'weco' or 'weco <path>' (with optional --model), run chatbot.
@@ -157,7 +224,7 @@ def main() -> None:
157
224
  return None
158
225
 
159
226
  first_non_option = get_first_non_option_arg()
160
- is_known_command = first_non_option in ["run", "logout"]
227
+ is_known_command = first_non_option in ["run", "logout", "credits"]
161
228
  is_help_command = len(sys.argv) > 1 and sys.argv[1] in ["-h", "--help"] # Check for global help
162
229
 
163
230
  should_run_chatbot_result = should_run_chatbot(sys.argv[1:])
@@ -208,6 +275,13 @@ def main() -> None:
208
275
  sys.exit(0)
209
276
  elif args.command == "run":
210
277
  execute_run_command(args)
278
+ elif args.command == "credits":
279
+ from .credits import handle_credits_command
280
+
281
+ handle_credits_command(args, console)
282
+ sys.exit(0)
283
+ elif args.command == "resume":
284
+ execute_resume_command(args)
211
285
  else:
212
286
  # This case should be hit if 'weco' is run alone and chatbot logic didn't catch it,
213
287
  # or if an invalid command is provided.
weco/constants.py CHANGED
@@ -4,8 +4,15 @@ Constants for the Weco CLI package.
4
4
  """
5
5
 
6
6
  # API timeout configuration (connect_timeout, read_timeout) in seconds
7
- DEFAULT_API_TIMEOUT = (10, 800)
7
+ CODEGEN_API_TIMEOUT = (30, 3650)
8
+ STATUS_API_TIMEOUT = (10, 180)
8
9
 
9
10
  # Output truncation configuration
10
11
  TRUNCATION_THRESHOLD = 51000 # Maximum length before truncation
11
12
  TRUNCATION_KEEP_LENGTH = 25000 # Characters to keep from beginning and end
13
+
14
+ # Default model configuration
15
+ DEFAULT_MODEL = "o4-mini"
16
+
17
+ # Supported file extensions for additional instructions
18
+ SUPPORTED_FILE_EXTENSIONS = [".md", ".txt", ".rst"]