weco 0.2.28__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
weco/panels.py CHANGED
@@ -6,7 +6,6 @@ from rich.panel import Panel
6
6
  from rich.syntax import Syntax
7
7
  from rich import box
8
8
  from typing import Dict, List, Optional, Union, Tuple
9
- from .utils import format_number
10
9
  from pathlib import Path
11
10
  from .__init__ import __dashboard_url__
12
11
 
@@ -26,8 +25,6 @@ class SummaryPanel:
26
25
  ):
27
26
  self.maximize = maximize
28
27
  self.metric_name = metric_name
29
- self.total_input_tokens = 0
30
- self.total_output_tokens = 0
31
28
  self.total_steps = total_steps
32
29
  self.model = model
33
30
  self.runs_dir = runs_dir
@@ -62,13 +59,6 @@ class SummaryPanel:
62
59
  """Set the current step."""
63
60
  self.progress.update(self.task_id, completed=step)
64
61
 
65
- def update_token_counts(self, usage: Dict[str, int]):
66
- """Update token counts from usage data."""
67
- if not isinstance(usage, dict) or "input_tokens" not in usage or "output_tokens" not in usage:
68
- raise ValueError("Invalid token usage data received.")
69
- self.total_input_tokens += usage["input_tokens"]
70
- self.total_output_tokens += usage["output_tokens"]
71
-
72
62
  def update_thinking(self, thinking: str):
73
63
  """Update the thinking content."""
74
64
  self.thinking_content = thinking
@@ -85,6 +75,10 @@ class SummaryPanel:
85
75
  summary_table.add_column(justify="right")
86
76
  summary_table.add_row("")
87
77
 
78
+ # Run id
79
+ summary_table.add_row(f" Run ID: [bold cyan]{self.run_id}[/]")
80
+ summary_table.add_row("")
81
+
88
82
  # Dashboard url
89
83
  summary_table.add_row(f" Dashboard: [underline blue]{self.dashboard_url}[/]")
90
84
  summary_table.add_row("")
@@ -94,14 +88,8 @@ class SummaryPanel:
94
88
  summary_table.add_row(f"[bold cyan] Result:[/] {final_message}", "")
95
89
  summary_table.add_row("")
96
90
 
97
- # Token info
98
- token_info = (
99
- f"[bold cyan] {self.model}:[/] "
100
- f"↑[yellow]{format_number(self.total_input_tokens)}[/] "
101
- f"↓[yellow]{format_number(self.total_output_tokens)}[/] = "
102
- f"[green]{format_number(self.total_input_tokens + self.total_output_tokens)} Tokens[/]"
103
- )
104
- summary_table.add_row(token_info)
91
+ # Model info
92
+ summary_table.add_row(f" Model: [bold cyan]{self.model}[/]")
105
93
  summary_table.add_row("")
106
94
 
107
95
  # Progress bar
weco/utils.py CHANGED
@@ -1,6 +1,5 @@
1
1
  from typing import Any, Dict, List, Tuple, Union
2
2
  import json
3
- import os
4
3
  import time
5
4
  import subprocess
6
5
  from rich.layout import Layout
@@ -10,45 +9,13 @@ import pathlib
10
9
  import requests
11
10
  from packaging.version import parse as parse_version
12
11
 
13
- from .constants import TRUNCATION_THRESHOLD, TRUNCATION_KEEP_LENGTH
12
+ from .constants import TRUNCATION_THRESHOLD, TRUNCATION_KEEP_LENGTH, DEFAULT_MODEL, SUPPORTED_FILE_EXTENSIONS
14
13
 
15
14
 
16
15
  # Env/arg helper functions
17
- def read_api_keys_from_env() -> Dict[str, Any]:
18
- """Read API keys from environment variables."""
19
- keys = {}
20
- keys_to_check = ["OPENAI_API_KEY", "ANTHROPIC_API_KEY", "GEMINI_API_KEY"]
21
- for key in keys_to_check:
22
- value = os.getenv(key)
23
- if value is not None and len(value) > 0:
24
- keys[key] = value
25
- return keys
26
-
27
-
28
- def determine_default_model(llm_api_keys: Dict[str, Any]) -> str:
29
- """Determine the default model based on available API keys.
30
-
31
- Uses priority: OpenAI > Anthropic > Gemini
32
-
33
- Args:
34
- llm_api_keys: Dictionary of available LLM API keys
35
-
36
- Returns:
37
- str: The default model name to use
38
-
39
- Raises:
40
- ValueError: If no LLM API keys are found
41
- """
42
- if "OPENAI_API_KEY" in llm_api_keys:
43
- return "o4-mini"
44
- elif "ANTHROPIC_API_KEY" in llm_api_keys:
45
- return "claude-sonnet-4-0"
46
- elif "GEMINI_API_KEY" in llm_api_keys:
47
- return "gemini-2.5-pro"
48
- else:
49
- raise ValueError(
50
- "No LLM API keys found in environment variables. Please set one of the following: OPENAI_API_KEY, ANTHROPIC_API_KEY, or GEMINI_API_KEY based on your model of choice."
51
- )
16
+ def determine_model_for_onboarding() -> str:
17
+ """Determine which model to use for onboarding chatbot. Defaults to o4-mini."""
18
+ return DEFAULT_MODEL
52
19
 
53
20
 
54
21
  def read_additional_instructions(additional_instructions: str | None) -> str | None:
@@ -60,6 +27,11 @@ def read_additional_instructions(additional_instructions: str | None) -> str | N
60
27
  potential_path = pathlib.Path(additional_instructions)
61
28
  try:
62
29
  if potential_path.exists() and potential_path.is_file():
30
+ # If it's a valid file path, check if we support the file extension
31
+ if potential_path.suffix.lower() not in SUPPORTED_FILE_EXTENSIONS:
32
+ raise ValueError(
33
+ f"Unsupported file extension: {potential_path.suffix.lower()}. Supported extensions are: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
34
+ )
63
35
  return read_from_path(potential_path, is_json=False) # type: ignore # read_from_path returns str when is_json=False
64
36
  else:
65
37
  # If it's not a valid file path, return the string itself
@@ -90,23 +62,6 @@ def write_to_path(fp: pathlib.Path, content: Union[str, Dict[str, Any]], is_json
90
62
 
91
63
 
92
64
  # Visualization helper functions
93
- def format_number(n: Union[int, float]) -> str:
94
- """Format large numbers with K, M, B, T suffixes for better readability."""
95
- if n >= 1e12:
96
- return f"{n / 1e12:.1f}T"
97
- elif n >= 1e9:
98
- return f"{n / 1e9:.1f}B"
99
- elif n >= 1e6:
100
- return f"{n / 1e6:.1f}M"
101
- elif n >= 1e3:
102
- return f"{n / 1e3:.1f}K"
103
- # Handle potential floats that don't need suffix but might need formatting
104
- if isinstance(n, float):
105
- # Format floats nicely, avoid excessive precision unless needed
106
- return f"{n:.4g}" # Use general format, up to 4 significant digits
107
- return str(n)
108
-
109
-
110
65
  def smooth_update(
111
66
  live: Live, layout: Layout, sections_to_update: List[Tuple[str, Panel]], transition_delay: float = 0.05
112
67
  ) -> None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: weco
3
- Version: 0.2.28
3
+ Version: 0.3.1
4
4
  Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
5
5
  Author-email: Weco AI Team <contact@weco.ai>
6
6
  License:
@@ -270,16 +270,17 @@ The `weco` CLI leverages a tree search approach guided by LLMs to iteratively ex
270
270
  1. **Install the Package:**
271
271
 
272
272
  ```bash
273
- pip install weco>=0.2.18
273
+ pip install weco
274
274
  ```
275
275
 
276
- 2. **Set Up LLM API Keys (Required):**
276
+ 2. **Authenticate (Required):**
277
277
 
278
- `weco` requires API keys for the LLMs it uses internally. You **must** provide these keys via environment variables:
278
+ `weco` now uses a **credit-based billing system** with centralized LLM access. You need to authenticate to use the service:
279
279
 
280
- - **OpenAI:** `export OPENAI_API_KEY="your_key_here"` (Create your OpenAI API key [here](https://platform.openai.com/api-keys))
281
- - **Anthropic:** `export ANTHROPIC_API_KEY="your_key_here"` (Create your Anthropic API key [here](https://console.anthropic.com/settings/keys))
282
- - **Google:** `export GEMINI_API_KEY="your_key_here"` (Google AI Studio has a free API usage quota. Create your Gemini API key [here](https://aistudio.google.com/apikey) to use `weco` for free.)
280
+ - **Run the CLI**: `weco` will prompt you to authenticate via your web browser
281
+ - **Free Credits**: New users receive **free credits** upon signup
282
+ - **Centralized Keys**: All LLM provider API keys are managed by Weco (no BYOK required)
283
+ - **Credit Top-ups**: Purchase additional credits through the dashboard at [dashboard.weco.ai](https://dashboard.weco.ai)
283
284
 
284
285
  ---
285
286
 
@@ -338,6 +339,8 @@ weco run --source optimize.py \
338
339
 
339
340
  For more advanced examples, including [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md), [ML model optimization](/examples/spaceship-titanic/README.md), and [prompt engineering for math problems](examples/prompt/README.md), please see the `README.md` files within the corresponding subdirectories under the [`examples/`](examples/) folder.
340
341
 
342
+ > Note: When recommend removing any backticks from your code if any are present. We currently don't support backticks but will support this in the future.
343
+
341
344
  ---
342
345
 
343
346
  ### Arguments for `weco run`
@@ -358,8 +361,8 @@ For more advanced examples, including [Triton](/examples/triton/README.md), [CUD
358
361
  | Argument | Description | Default | Example |
359
362
  | :----------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------ | :------------------ |
360
363
  | `-n, --steps` | Number of optimization steps (LLM iterations) to run. | 100 | `-n 50` |
361
- | `-M, --model` | Model identifier for the LLM to use (e.g., `o4-mini`, `claude-sonnet-4-0`). | `o4-mini` when `OPENAI_API_KEY` is set; `claude-sonnet-4-0` when `ANTHROPIC_API_KEY` is set; `gemini-2.5-pro` when `GEMINI_API_KEY` is set. | `-M o4-mini` |
362
- | `-i, --additional-instructions`| Natural language description of specific instructions **or** path to a file containing detailed instructions to guide the LLM. | `None` | `-i instructions.md` or `-i "Optimize the model for faster inference"`|
364
+ | `-M, --model` | Model identifier for the LLM to use (e.g., `o4-mini`, `claude-sonnet-4-0`). | `o4-mini` | `-M o4-mini` |
365
+ | `-i, --additional-instructions`| Natural language description of specific instructions **or** path to a file containing detailed instructions to guide the LLM. Supported file formats include - `.txt`, `.md`, and `.rst`. | `None` | `-i instructions.md` or `-i "Optimize the model for faster inference"`|
363
366
  | `-l, --log-dir` | Path to the directory to log intermediate steps and final optimization result. | `.runs/` | `-l ./logs/` |
364
367
  | `--eval-timeout` | Timeout in seconds for each step in evaluation. | No timeout (unlimited) | `--eval-timeout 3600` |
365
368
  | `--save-logs` | Save execution output from each optimization step to disk. Creates timestamped directories with raw output files and a JSONL index for tracking execution history. | `False` | `--save-logs` |
@@ -368,24 +371,24 @@ For more advanced examples, including [Triton](/examples/triton/README.md), [CUD
368
371
 
369
372
  ### Authentication & Dashboard
370
373
 
371
- Weco offers both **anonymous** and **authenticated** usage:
372
-
373
- #### Anonymous Usage
374
- You can use Weco without creating an account by providing LLM API keys via environment variables. This is perfect for trying out Weco or for users who prefer not to create accounts.
374
+ The CLI requires a Weco account for authentication and billing.
375
375
 
376
- #### Authenticated Usage (Recommended)
377
- To save your optimization runs and view them on the Weco dashboard, you can log in using Weco's secure device authentication flow:
376
+ #### Credit-Based Authentication (Required)
377
+ Weco now requires authentication for all operations. This enables our credit-based billing system and provides access to powerful optimizations:
378
378
 
379
- 1. **During onboarding**: When you run `weco` for the first time, you'll be prompted to log in or skip
379
+ 1. **During onboarding**: When you run `weco` for the first time, you'll be prompted to log in
380
380
  2. **Manual login**: Use `weco logout` to clear credentials, then run `weco` again to re-authenticate
381
381
  3. **Device flow**: Weco will open your browser automatically and guide you through a secure OAuth-style authentication
382
382
 
383
383
  ![image (16)](https://github.com/user-attachments/assets/8a0a285b-4894-46fa-b6a2-4990017ca0c6)
384
384
 
385
- **Benefits of authenticated usage:**
386
- - **Run history**: View all your optimization runs on the Weco dashboard
387
- - **Progress tracking**: Monitor long-running optimizations remotely
388
- - **Enhanced support**: Get better assistance with your optimization challenges
385
+ **Benefits:**
386
+ - **No API Key Management**: All LLM provider keys are managed centrally
387
+ - **Cost Transparency**: See exactly how many credits each optimization consumes
388
+ - **Free Trial**: Free credits to get started with optimization projects
389
+ - **Run History**: View all your optimization runs on the Weco dashboard
390
+ - **Progress Tracking**: Monitor long-running optimizations remotely
391
+ - **Budget Control**: Set spending limits and auto top-up preferences
389
392
 
390
393
  ---
391
394
 
@@ -398,6 +401,7 @@ To save your optimization runs and view them on the Weco dashboard, you can log
398
401
  | `weco` | Launch interactive onboarding | **Recommended for beginners** - Analyzes your codebase and guides you through setup |
399
402
  | `weco /path/to/project` | Launch onboarding for specific project | When working with a project in a different directory |
400
403
  | `weco run [options]` | Direct optimization execution | **For advanced users** - When you know exactly what to optimize and how |
404
+ | `weco resume <run-id>` | Resume an interrupted run | Continue from the last completed step |
401
405
  | `weco logout` | Clear authentication credentials | To switch accounts or troubleshoot authentication issues |
402
406
 
403
407
  ### Model Selection
@@ -413,14 +417,37 @@ weco run --model claude-3.5-sonnet --source optimize.py [other options...]
413
417
  ```
414
418
 
415
419
  **Available models:**
416
- - `gpt-4o`, `o4-mini` (requires `OPENAI_API_KEY`)
417
- - `claude-3.5-sonnet`, `claude-sonnet-4-20250514` (requires `ANTHROPIC_API_KEY`)
418
- - `gemini-2.5-pro` (requires `GEMINI_API_KEY`)
420
+ - `o4-mini`, `o3-mini`, `gpt-4o` (OpenAI models)
421
+ - `claude-sonnet-4-0`, `claude-opus-4-0` (Anthropic models)
422
+ - `gemini-2.5-pro`, `gemini-2.5-flash` (Google models)
419
423
 
420
- If no model is specified, Weco automatically selects the best available model based on your API keys.
424
+ All models are available through Weco's centralized system. If no model is specified, Weco automatically selects the best model for your optimization task.
421
425
 
422
426
  ---
423
427
 
428
+ ### Resuming Interrupted Runs
429
+
430
+ If your optimization run is interrupted (network issues, restart, etc.), resume from the most recent node:
431
+
432
+ ```bash
433
+ # Resume an interrupted run
434
+ weco resume 0002e071-1b67-411f-a514-36947f0c4b31
435
+
436
+ ```
437
+
438
+ Arguments for `weco resume`:
439
+
440
+ | Argument | Description | Example |
441
+ |----------|-------------|---------|
442
+ | `run-id` | The UUID of the run to resume (shown at the start of each run) | `0002e071-1b67-411f-a514-36947f0c4b31` |
443
+
444
+ Notes:
445
+ - Works only for interrupted runs (status: `error`, `terminated`, etc.).
446
+ - You’ll be prompted to confirm that your evaluation environment (source file + evaluation command) hasn’t changed.
447
+ - The source file is restored to the most recent solution before continuing.
448
+ - All progress and metrics from the original run are preserved.
449
+ - Log directory, save-logs behavior, and evaluation timeout are reused from the original run.
450
+
424
451
  ### Performance & Expectations
425
452
 
426
453
  Weco, powered by the AIDE algorithm, optimizes code iteratively based on your evaluation results. Achieving significant improvements, especially on complex research-level tasks, often requires substantial exploration time.
@@ -493,37 +520,7 @@ Weco will parse this output to extract the numerical value (1.5 in this case) as
493
520
 
494
521
  ## Supported Models
495
522
 
496
- Weco supports the following LLM models:
497
-
498
- ### OpenAI Models
499
- - `gpt-5` (recommended)
500
- - `gpt-5-mini`
501
- - `gpt-5-nano`
502
- - `o3-pro` (recommended)
503
- - `o3` (recommended)
504
- - `o4-mini` (recommended)
505
- - `o3-mini`
506
- - `o1-pro`
507
- - `o1`
508
- - `gpt-4.1`
509
- - `gpt-4.1-mini`
510
- - `gpt-4.1-nano`
511
- - `gpt-4o`
512
- - `gpt-4o-mini`
513
- - `codex-mini-latest`
514
-
515
- ### Anthropic Models
516
- - `claude-opus-4-1`
517
- - `claude-opus-4-0`
518
- - `claude-sonnet-4-0`
519
- - `claude-3-7-sonnet-latest`
520
-
521
- ### Gemini Models
522
- - `gemini-2.5-pro`
523
- - `gemini-2.5-flash`
524
- - `gemini-2.5-flash-lite`
525
-
526
- You can specify any of these models using the `-M` or `--model` flag. Ensure you have the corresponding API key set as an environment variable for the model provider you wish to use.
523
+ A list of models we support can be found in our documentation [here](https://docs.weco.ai/cli/supported-models).
527
524
 
528
525
  ---
529
526
 
@@ -0,0 +1,16 @@
1
+ weco/__init__.py,sha256=ClO0uT6GKOA0iSptvP0xbtdycf0VpoPTq37jHtvlhtw,303
2
+ weco/api.py,sha256=dUjzuOKKvayzZ_1B4j40eK9Ofk264jsc6vOR1afsszY,18523
3
+ weco/auth.py,sha256=O31Hoj-Loi8DWJJG2LfeWgUMuNqAUeGDpd2ZGjA9Ah0,9997
4
+ weco/chatbot.py,sha256=EIK2WaOul9gn_yHLThjsZV7RnE8t3XQPwgRkO5tybSU,38415
5
+ weco/cli.py,sha256=579f6jf-ZWuFAmNXDisRY7zWr7vw2YZQuC_QX8-qxx0,11460
6
+ weco/constants.py,sha256=V6yFugTznKm5EC2_jr4I_whd7sqI80HiPggRn0az580,406
7
+ weco/credits.py,sha256=C08x-TRcLg3ccfKqMGNRY7zBn7t3r7LZ119bxgfztaI,7629
8
+ weco/optimizer.py,sha256=mJU8_0bo_6dS2PEj1E3dQHvNH9V4e8NSLNE55tmvspw,42291
9
+ weco/panels.py,sha256=fnGPtmvxpx21AuBCtCFu1f_BpSxybNr2lhjIIKIutrY,16133
10
+ weco/utils.py,sha256=TT57S0YGMuMWPFNsn0tcexNHZd-kBEjDeiOLWxANiQU,6117
11
+ weco-0.3.1.dist-info/licenses/LICENSE,sha256=9LUfoGHjLPtak2zps2kL2tm65HAZIICx_FbLaRuS4KU,11337
12
+ weco-0.3.1.dist-info/METADATA,sha256=e5xozCmFPB7ih2ntFNYQAMXAU_O8Kw3NDSiRhaNEu4c,31856
13
+ weco-0.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
+ weco-0.3.1.dist-info/entry_points.txt,sha256=ixJ2uClALbCpBvnIR6BXMNck8SHAab8eVkM9pIUowcs,39
15
+ weco-0.3.1.dist-info/top_level.txt,sha256=F0N7v6e2zBSlsorFv-arAq2yDxQbzX3KVO8GxYhPUeE,5
16
+ weco-0.3.1.dist-info/RECORD,,
@@ -1,15 +0,0 @@
1
- weco/__init__.py,sha256=ClO0uT6GKOA0iSptvP0xbtdycf0VpoPTq37jHtvlhtw,303
2
- weco/api.py,sha256=cdZEf-Zt0CxMOj_gka6rGHEK1MVwPAjG1YH16jgDEsg,13177
3
- weco/auth.py,sha256=KMSAsN1V5wx7KUsYL1cEOOiG29Pqf4Exb3EPW4mAWC0,10003
4
- weco/chatbot.py,sha256=EkzKd5Q_IlcobBbY3gsbgN0jxbJMfP5eYtzxQaNQ3fg,37747
5
- weco/cli.py,sha256=8hrlmHmaZiYQ7kotdpr4Ve-xAJZocDV6kcizPCmep0k,8380
6
- weco/constants.py,sha256=hVEIpNejUxZ6-1GrL7Qv97EqBoo5vqDdPdMfAjq-_24,345
7
- weco/optimizer.py,sha256=bXhNoa2qyC-CeqLHacy3xz2UKHuO_DVpC3z572NjFSU,26063
8
- weco/panels.py,sha256=jwAV_uoa0ZI9vjyey-hSY3rx4pfNNkZvPzqt-iz-RXo,16808
9
- weco/utils.py,sha256=P6efzBXg7m_Nnq6UUor9onCGxjE0CkTI2xYsymmCwZ4,7355
10
- weco-0.2.28.dist-info/licenses/LICENSE,sha256=9LUfoGHjLPtak2zps2kL2tm65HAZIICx_FbLaRuS4KU,11337
11
- weco-0.2.28.dist-info/METADATA,sha256=qnSbrHYphl5HLaqzAWghErM1I3gB-26czvbXfGZ_QzQ,31432
12
- weco-0.2.28.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
- weco-0.2.28.dist-info/entry_points.txt,sha256=ixJ2uClALbCpBvnIR6BXMNck8SHAab8eVkM9pIUowcs,39
14
- weco-0.2.28.dist-info/top_level.txt,sha256=F0N7v6e2zBSlsorFv-arAq2yDxQbzX3KVO8GxYhPUeE,5
15
- weco-0.2.28.dist-info/RECORD,,
File without changes