weco 0.2.20__tar.gz → 0.2.23__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. weco-0.2.23/.github/workflows/lint.yml +51 -0
  2. {weco-0.2.20 → weco-0.2.23}/PKG-INFO +111 -38
  3. {weco-0.2.20 → weco-0.2.23}/README.md +109 -37
  4. weco-0.2.23/assets/weco.svg +91 -0
  5. weco-0.2.23/examples/cuda/README.md +54 -0
  6. {weco-0.2.20 → weco-0.2.23}/examples/cuda/evaluate.py +3 -0
  7. {weco-0.2.20 → weco-0.2.23}/examples/cuda/guide.md +0 -24
  8. {weco-0.2.20 → weco-0.2.23}/examples/hello-kernel-world/evaluate.py +3 -0
  9. weco-0.2.23/examples/prompt/README.md +71 -0
  10. weco-0.2.23/examples/spaceship-titanic/README.md +50 -0
  11. weco-0.2.23/examples/spaceship-titanic/evaluate.py +75 -0
  12. weco-0.2.23/examples/spaceship-titanic/train.py +90 -0
  13. weco-0.2.23/examples/triton/README.md +53 -0
  14. {weco-0.2.20 → weco-0.2.23}/examples/triton/evaluate.py +3 -0
  15. {weco-0.2.20 → weco-0.2.23}/pyproject.toml +7 -2
  16. weco-0.2.23/weco/__init__.py +9 -0
  17. weco-0.2.23/weco/api.py +319 -0
  18. weco-0.2.23/weco/auth.py +225 -0
  19. weco-0.2.23/weco/chatbot.py +797 -0
  20. weco-0.2.23/weco/cli.py +200 -0
  21. weco-0.2.23/weco/optimizer.py +479 -0
  22. {weco-0.2.20 → weco-0.2.23}/weco/panels.py +46 -0
  23. {weco-0.2.20 → weco-0.2.23}/weco/utils.py +31 -3
  24. {weco-0.2.20 → weco-0.2.23}/weco.egg-info/PKG-INFO +111 -38
  25. {weco-0.2.20 → weco-0.2.23}/weco.egg-info/SOURCES.txt +4 -1
  26. {weco-0.2.20 → weco-0.2.23}/weco.egg-info/requires.txt +1 -0
  27. weco-0.2.20/.github/workflows/lint.yml +0 -50
  28. weco-0.2.20/examples/cuda/README.md +0 -40
  29. weco-0.2.20/examples/prompt/README.md +0 -51
  30. weco-0.2.20/examples/spaceship-titanic/README.md +0 -42
  31. weco-0.2.20/examples/spaceship-titanic/evaluate.py +0 -43
  32. weco-0.2.20/examples/spaceship-titanic/requirements-test.txt +0 -7
  33. weco-0.2.20/examples/triton/README.md +0 -38
  34. weco-0.2.20/weco/__init__.py +0 -15
  35. weco-0.2.20/weco/api.py +0 -146
  36. weco-0.2.20/weco/auth.py +0 -64
  37. weco-0.2.20/weco/cli.py +0 -714
  38. {weco-0.2.20 → weco-0.2.23}/.github/workflows/release.yml +0 -0
  39. {weco-0.2.20 → weco-0.2.23}/.gitignore +0 -0
  40. {weco-0.2.20 → weco-0.2.23}/.repomixignore +0 -0
  41. {weco-0.2.20 → weco-0.2.23}/LICENSE +0 -0
  42. {weco-0.2.20 → weco-0.2.23}/assets/example-optimization.gif +0 -0
  43. {weco-0.2.20 → weco-0.2.23}/examples/cuda/optimize.py +0 -0
  44. {weco-0.2.20 → weco-0.2.23}/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb +0 -0
  45. {weco-0.2.20 → weco-0.2.23}/examples/hello-kernel-world/optimize.py +0 -0
  46. {weco-0.2.20 → weco-0.2.23}/examples/prompt/eval.py +0 -0
  47. {weco-0.2.20 → weco-0.2.23}/examples/prompt/optimize.py +0 -0
  48. {weco-0.2.20 → weco-0.2.23}/examples/prompt/prompt_guide.md +0 -0
  49. {weco-0.2.20 → weco-0.2.23}/examples/spaceship-titanic/competition_description.md +0 -0
  50. {weco-0.2.20 → weco-0.2.23}/examples/spaceship-titanic/data/sample_submission.csv +0 -0
  51. {weco-0.2.20 → weco-0.2.23}/examples/spaceship-titanic/data/test.csv +0 -0
  52. {weco-0.2.20 → weco-0.2.23}/examples/spaceship-titanic/data/train.csv +0 -0
  53. {weco-0.2.20 → weco-0.2.23}/examples/triton/optimize.py +0 -0
  54. {weco-0.2.20 → weco-0.2.23}/setup.cfg +0 -0
  55. {weco-0.2.20 → weco-0.2.23}/weco.egg-info/dependency_links.txt +0 -0
  56. {weco-0.2.20 → weco-0.2.23}/weco.egg-info/entry_points.txt +0 -0
  57. {weco-0.2.20 → weco-0.2.23}/weco.egg-info/top_level.txt +0 -0
@@ -0,0 +1,51 @@
1
+ name: Lint and Format Code
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - dev
7
+ - main
8
+ pull_request: # Run on any pull request
9
+
10
+ jobs:
11
+ lint:
12
+ runs-on: ubuntu-latest
13
+
14
+ steps:
15
+ - name: Checkout code
16
+ uses: actions/checkout@v4
17
+ with:
18
+ token: ${{ secrets.GITHUB_TOKEN }}
19
+
20
+ - name: Set up Python
21
+ uses: actions/setup-python@v3
22
+ with:
23
+ python-version: "3.12.0"
24
+
25
+ - name: Install dependencies
26
+ run: |
27
+ python -m pip install --upgrade pip
28
+ pip install ruff
29
+
30
+ - name: Run Linter & Formatter
31
+ run: |
32
+ # Check if this is an external fork PR
33
+ if [[ "${{ github.event.pull_request.head.repo.full_name }}" != "${{ github.repository }}" ]] && [[ "${{ github.event_name }}" == "pull_request" ]]; then
34
+ echo "External fork PR detected. Running format check only."
35
+ ruff check .
36
+ ruff format --check .
37
+ else
38
+ echo "Internal PR or push event. Running format and commit."
39
+ ruff check . --fix
40
+ ruff format .
41
+
42
+ git config --local user.email "action@github.com"
43
+ git config --local user.name "GitHub Action"
44
+ git add -A
45
+ if git diff --exit-code --staged; then
46
+ echo "No changes to commit"
47
+ else
48
+ git commit -m "[GitHub Action] Lint and format code with Ruff"
49
+ git push https://${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}
50
+ fi
51
+ fi
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: weco
3
- Version: 0.2.20
3
+ Version: 0.2.23
4
4
  Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
5
5
  Author-email: Weco AI Team <contact@weco.ai>
6
6
  License: MIT
@@ -15,6 +15,7 @@ License-File: LICENSE
15
15
  Requires-Dist: requests
16
16
  Requires-Dist: rich
17
17
  Requires-Dist: packaging
18
+ Requires-Dist: gitingest
18
19
  Provides-Extra: dev
19
20
  Requires-Dist: ruff; extra == "dev"
20
21
  Requires-Dist: build; extra == "dev"
@@ -23,13 +24,17 @@ Dynamic: license-file
23
24
 
24
25
  <div align="center">
25
26
 
26
- # Weco: The Platform for Self-Improving Code
27
+ <div align="center">
28
+ <img src="assets/weco.svg" alt="Weco Logo" width="120" height="120" style="margin-bottom: 20px;">
29
+ <h1>Weco: The Platform for Self-Improving Code</h1>
30
+ </div>
27
31
 
28
32
  [![Python](https://img.shields.io/badge/Python-3.8.0+-blue)](https://www.python.org)
33
+ [![PyPI version](https://img.shields.io/pypi/v/weco?label=PyPI%20version&color=f05138&labelColor=555555)](https://badge.fury.io/py/weco)
29
34
  [![docs](https://img.shields.io/website?url=https://docs.weco.ai/&label=docs)](https://docs.weco.ai/)
30
- [![PyPI version](https://badge.fury.io/py/weco.svg)](https://badge.fury.io/py/weco)
31
- [![AIDE](https://img.shields.io/badge/AI--Driven_Exploration-arXiv-orange?style=flat-square&logo=arxiv)](https://arxiv.org/abs/2502.13138)
32
- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/WecoAI/weco-cli/blob/main/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb)
35
+ [![PyPI Downloads](https://static.pepy.tech/badge/weco?color=4c1)](https://pepy.tech/projects/weco)
36
+ [![arXiv on AIDE](https://img.shields.io/badge/arXiv-AIDE-b31b1b?logo=arxiv&logoColor=white)](https://arxiv.org/abs/2502.13138)
37
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg?labelColor=ffffff&color=F17E01)](https://colab.research.google.com/github/WecoAI/weco-cli/blob/main/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb)
33
38
 
34
39
  `pip install weco`
35
40
 
@@ -51,7 +56,7 @@ Example applications include:
51
56
 
52
57
  ## Overview
53
58
 
54
- The `weco` CLI leverages a tree search approach guided by Large Language Models (LLMs) to iteratively explore and refine your code. It automatically applies changes, runs your evaluation script, parses the results, and proposes further improvements based on the specified goal.
59
+ The `weco` CLI leverages a tree search approach guided by LLMs to iteratively explore and refine your code. It automatically applies changes, runs your evaluation script, parses the results, and proposes further improvements based on the specified goal.
55
60
 
56
61
  ![image](https://github.com/user-attachments/assets/a6ed63fa-9c40-498e-aa98-a873e5786509)
57
62
 
@@ -67,28 +72,48 @@ The `weco` CLI leverages a tree search approach guided by Large Language Models
67
72
 
68
73
  2. **Set Up LLM API Keys (Required):**
69
74
 
70
- `weco` requires API keys for the Large Language Models (LLMs) it uses internally. You **must** provide these keys via environment variables:
75
+ `weco` requires API keys for the LLMs it uses internally. You **must** provide these keys via environment variables:
71
76
 
72
- - **OpenAI:** `export OPENAI_API_KEY="your_key_here"`
73
- - **Anthropic:** `export ANTHROPIC_API_KEY="your_key_here"`
74
- - **Google DeepMind:** `export GEMINI_API_KEY="your_key_here"` (Google AI Studio has a free API usage quota. Create a key [here](https://aistudio.google.com/apikey) to use `weco` for free.)
77
+ - **OpenAI:** `export OPENAI_API_KEY="your_key_here"` (Create your OpenAI API key [here](https://platform.openai.com/api-keys))
78
+ - **Anthropic:** `export ANTHROPIC_API_KEY="your_key_here"` (Create your Anthropic API key [here](https://console.anthropic.com/settings/keys))
79
+ - **Google:** `export GEMINI_API_KEY="your_key_here"` (Google AI Studio has a free API usage quota. Create your Gemini API key [here](https://aistudio.google.com/apikey) to use `weco` for free.)
75
80
 
76
81
  ---
77
82
 
78
83
  ## Get Started
79
84
 
85
+ ### Quick Start (Recommended for New Users)
86
+
87
+ The easiest way to get started with Weco is to use the **interactive copilot**. Simply navigate to your project directory and run:
88
+
89
+ ```bash
90
+ weco
91
+ ```
92
+
93
+ Or specify a project path:
94
+
95
+ ```bash
96
+ weco /path/to/your/project
97
+ ```
98
+
99
+ This launches Weco's interactive copilot that will:
100
+
101
+ 1. **Analyze your codebase** using AI to understand your project structure and identify optimization opportunities
102
+ 2. **Suggest specific optimizations** tailored to your code (e.g., GPU kernel optimization, model improvements, prompt engineering)
103
+ 3. **Generate evaluation scripts** automatically or help you configure existing ones
104
+ 4. **Set up the complete optimization pipeline** with appropriate metrics and commands
105
+ 5. **Run the optimization** or provide you with the exact command to execute
106
+
80
107
  <div style="background-color: #fff3cd; border: 1px solid #ffeeba; padding: 15px; border-radius: 4px; margin-bottom: 15px;">
81
108
  <strong>⚠️ Warning: Code Modification</strong><br>
82
109
  <code>weco</code> directly modifies the file specified by <code>--source</code> during the optimization process. It is <strong>strongly recommended</strong> to use version control (like Git) to track changes and revert if needed. Alternatively, ensure you have a backup of your original file before running the command. Upon completion, the file will contain the best-performing version of the code found during the run.
83
110
  </div>
84
111
 
85
- ---
86
-
87
- **Example: Optimizing Simple PyTorch Operations**
112
+ ### Manual Setup
88
113
 
89
- This basic example shows how to optimize a simple PyTorch function for speedup.
114
+ **Configure optimization parameters yourself** - If you need precise control over the optimization parameters, you can use the direct `weco run` command:
90
115
 
91
- For more advanced examples, including [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md), [ML model optimization](/examples/spaceship-titanic/README.md), and [prompt engineering for math problems](https://github.com/WecoAI/weco-cli/tree/main/examples/prompt), please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
116
+ **Example: Optimizing Simple PyTorch Operations**
92
117
 
93
118
  ```bash
94
119
  # Navigate to the example directory
@@ -97,7 +122,7 @@ cd examples/hello-kernel-world
97
122
  # Install dependencies
98
123
  pip install torch
99
124
 
100
- # Run Weco
125
+ # Run Weco with manual configuration
101
126
  weco run --source optimize.py \
102
127
  --eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
103
128
  --metric speedup \
@@ -108,36 +133,87 @@ weco run --source optimize.py \
108
133
 
109
134
  **Note:** If you have an NVIDIA GPU, change the device in the `--eval-command` to `cuda`. If you are running this on Apple Silicon, set it to `mps`.
110
135
 
136
+ For more advanced examples, including [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md), [ML model optimization](/examples/spaceship-titanic/README.md), and [prompt engineering for math problems](examples/prompt/README.md), please see the `README.md` files within the corresponding subdirectories under the [`examples/`](examples/) folder.
137
+
111
138
  ---
112
139
 
113
140
  ### Arguments for `weco run`
114
141
 
115
142
  **Required:**
116
143
 
117
- | Argument | Description |
118
- | :------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
119
- | `-s, --source` | Path to the source code file that will be optimized (e.g., `optimize.py`). |
120
- | `-c, --eval-command`| Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. |
121
- | `-m, --metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. |
122
- | `-g, --goal` | `maximize`/`max` to maximize the `--metric` or `minimize`/`min` to minimize it. |
144
+ | Argument | Description | Example |
145
+ | :------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------- |
146
+ | `-s, --source` | Path to the source code file that will be optimized. | `-s model.py` |
147
+ | `-c, --eval-command`| Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | `-c "python eval.py"` |
148
+ | `-m, --metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name does not need to match what's printed by your `--eval-command` exactly (e.g., its okay to use "speedup" instead of "Speedup:"). | `-m speedup` |
149
+ | `-g, --goal` | `maximize`/`max` to maximize the `--metric` or `minimize`/`min` to minimize it. | `-g maximize` |
123
150
 
124
151
  <br>
125
152
 
126
153
  **Optional:**
127
154
 
128
- | Argument | Description | Default |
129
- | :----------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------ |
130
- | `-n, --steps` | Number of optimization steps (LLM iterations) to run. | 100 |
131
- | `-M, --model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). | `o4-mini` when `OPENAI_API_KEY` is set; `claude-3-7-sonnet-20250219` when `ANTHROPIC_API_KEY` is set; `gemini-2.5-pro-exp-03-25` when `GEMINI_API_KEY` is set (priority: `OPENAI_API_KEY` > `ANTHROPIC_API_KEY` > `GEMINI_API_KEY`). |
132
- | `-i, --additional-instructions`| Natural language description of specific instructions **or** path to a file containing detailed instructions to guide the LLM. | `None` |
133
- | `-l, --log-dir` | Path to the directory to log intermediate steps and final optimization result. | `.runs/` |
155
+ | Argument | Description | Default | Example |
156
+ | :----------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------ | :------------------ |
157
+ | `-n, --steps` | Number of optimization steps (LLM iterations) to run. | 100 | `-n 50` |
158
+ | `-M, --model` | Model identifier for the LLM to use (e.g., `o4-mini`, `claude-sonnet-4-0`). | `o4-mini` when `OPENAI_API_KEY` is set; `claude-sonnet-4-0` when `ANTHROPIC_API_KEY` is set; `gemini-2.5-pro` when `GEMINI_API_KEY` is set. | `-M o4-mini` |
159
+ | `-i, --additional-instructions`| Natural language description of specific instructions **or** path to a file containing detailed instructions to guide the LLM. | `None` | `-i instructions.md` or `-i "Optimize the model for faster inference"`|
160
+ | `-l, --log-dir` | Path to the directory to log intermediate steps and final optimization result. | `.runs/` | `-l ./logs/` |
134
161
 
135
162
  ---
136
163
 
137
- ### Weco Dashboard
138
- To associate your optimization runs with your Weco account and view them on the Weco dashboard, you can log in. `weco` uses a device authentication flow
164
+ ### Authentication & Dashboard
165
+
166
+ Weco offers both **anonymous** and **authenticated** usage:
167
+
168
+ #### Anonymous Usage
169
+ You can use Weco without creating an account by providing LLM API keys via environment variables. This is perfect for trying out Weco or for users who prefer not to create accounts.
170
+
171
+ #### Authenticated Usage (Recommended)
172
+ To save your optimization runs and view them on the Weco dashboard, you can log in using Weco's secure device authentication flow:
173
+
174
+ 1. **During onboarding**: When you run `weco` for the first time, you'll be prompted to log in or skip
175
+ 2. **Manual login**: Use `weco logout` to clear credentials, then run `weco` again to re-authenticate
176
+ 3. **Device flow**: Weco will open your browser automatically and guide you through a secure OAuth-style authentication
177
+
139
178
  ![image (16)](https://github.com/user-attachments/assets/8a0a285b-4894-46fa-b6a2-4990017ca0c6)
140
179
 
180
+ **Benefits of authenticated usage:**
181
+ - **Run history**: View all your optimization runs on the Weco dashboard
182
+ - **Progress tracking**: Monitor long-running optimizations remotely
183
+ - **Enhanced support**: Get better assistance with your optimization challenges
184
+
185
+ ---
186
+
187
+ ## Command Reference
188
+
189
+ ### Basic Usage Patterns
190
+
191
+ | Command | Description | When to Use |
192
+ |---------|-------------|-------------|
193
+ | `weco` | Launch interactive onboarding | **Recommended for beginners** - Analyzes your codebase and guides you through setup |
194
+ | `weco /path/to/project` | Launch onboarding for specific project | When working with a project in a different directory |
195
+ | `weco run [options]` | Direct optimization execution | **For advanced users** - When you know exactly what to optimize and how |
196
+ | `weco logout` | Clear authentication credentials | To switch accounts or troubleshoot authentication issues |
197
+
198
+ ### Model Selection
199
+
200
+ You can specify which LLM model to use with the `-M` or `--model` flag:
201
+
202
+ ```bash
203
+ # Use with onboarding
204
+ weco --model gpt-4o
205
+
206
+ # Use with direct execution
207
+ weco run --model claude-3.5-sonnet --source optimize.py [other options...]
208
+ ```
209
+
210
+ **Available models:**
211
+ - `gpt-4o`, `o4-mini` (requires `OPENAI_API_KEY`)
212
+ - `claude-3.5-sonnet`, `claude-sonnet-4-20250514` (requires `ANTHROPIC_API_KEY`)
213
+ - `gemini-2.5-pro` (requires `GEMINI_API_KEY`)
214
+
215
+ If no model is specified, Weco automatically selects the best available model based on your API keys.
216
+
141
217
  ---
142
218
 
143
219
  ### Performance & Expectations
@@ -174,29 +250,26 @@ Weco will parse this output to extract the numerical value (1.5 in this case) as
174
250
 
175
251
  ## Contributing
176
252
 
177
- We welcome contributions! To get started:
178
-
179
- 1. **Fork and Clone the Repository:**
253
+ We welcome your contributions! To get started:
180
254
 
255
+ 1. **Fork & Clone the Repository:**
181
256
  ```bash
182
257
  git clone https://github.com/WecoAI/weco-cli.git
183
258
  cd weco-cli
184
259
  ```
185
260
 
186
- 2. **Install Development Dependencies:**
187
-
261
+ 2. **Install Dependencies:**
188
262
  ```bash
189
263
  pip install -e ".[dev]"
190
264
  ```
191
265
 
192
266
  3. **Create a Feature Branch:**
193
-
194
267
  ```bash
195
268
  git checkout -b feature/your-feature-name
196
269
  ```
197
270
 
198
- 4. **Make Your Changes:** Ensure your code adheres to our style guidelines and includes relevant tests.
271
+ 4. **Make Changes:** Ensure your code adheres to our style guidelines and includes relevant tests.
199
272
 
200
- 5. **Commit and Push** your changes, then open a pull request with a clear description of your enhancements.
273
+ 5. **Commit, Push & Open a PR**: Commit your changes, and open a pull request with a clear description of your enhancements.
201
274
 
202
275
  ---
@@ -1,12 +1,16 @@
1
1
  <div align="center">
2
2
 
3
- # Weco: The Platform for Self-Improving Code
3
+ <div align="center">
4
+ <img src="assets/weco.svg" alt="Weco Logo" width="120" height="120" style="margin-bottom: 20px;">
5
+ <h1>Weco: The Platform for Self-Improving Code</h1>
6
+ </div>
4
7
 
5
8
  [![Python](https://img.shields.io/badge/Python-3.8.0+-blue)](https://www.python.org)
9
+ [![PyPI version](https://img.shields.io/pypi/v/weco?label=PyPI%20version&color=f05138&labelColor=555555)](https://badge.fury.io/py/weco)
6
10
  [![docs](https://img.shields.io/website?url=https://docs.weco.ai/&label=docs)](https://docs.weco.ai/)
7
- [![PyPI version](https://badge.fury.io/py/weco.svg)](https://badge.fury.io/py/weco)
8
- [![AIDE](https://img.shields.io/badge/AI--Driven_Exploration-arXiv-orange?style=flat-square&logo=arxiv)](https://arxiv.org/abs/2502.13138)
9
- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/WecoAI/weco-cli/blob/main/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb)
11
+ [![PyPI Downloads](https://static.pepy.tech/badge/weco?color=4c1)](https://pepy.tech/projects/weco)
12
+ [![arXiv on AIDE](https://img.shields.io/badge/arXiv-AIDE-b31b1b?logo=arxiv&logoColor=white)](https://arxiv.org/abs/2502.13138)
13
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg?labelColor=ffffff&color=F17E01)](https://colab.research.google.com/github/WecoAI/weco-cli/blob/main/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb)
10
14
 
11
15
  `pip install weco`
12
16
 
@@ -28,7 +32,7 @@ Example applications include:
28
32
 
29
33
  ## Overview
30
34
 
31
- The `weco` CLI leverages a tree search approach guided by Large Language Models (LLMs) to iteratively explore and refine your code. It automatically applies changes, runs your evaluation script, parses the results, and proposes further improvements based on the specified goal.
35
+ The `weco` CLI leverages a tree search approach guided by LLMs to iteratively explore and refine your code. It automatically applies changes, runs your evaluation script, parses the results, and proposes further improvements based on the specified goal.
32
36
 
33
37
  ![image](https://github.com/user-attachments/assets/a6ed63fa-9c40-498e-aa98-a873e5786509)
34
38
 
@@ -44,28 +48,48 @@ The `weco` CLI leverages a tree search approach guided by Large Language Models
44
48
 
45
49
  2. **Set Up LLM API Keys (Required):**
46
50
 
47
- `weco` requires API keys for the Large Language Models (LLMs) it uses internally. You **must** provide these keys via environment variables:
51
+ `weco` requires API keys for the LLMs it uses internally. You **must** provide these keys via environment variables:
48
52
 
49
- - **OpenAI:** `export OPENAI_API_KEY="your_key_here"`
50
- - **Anthropic:** `export ANTHROPIC_API_KEY="your_key_here"`
51
- - **Google DeepMind:** `export GEMINI_API_KEY="your_key_here"` (Google AI Studio has a free API usage quota. Create a key [here](https://aistudio.google.com/apikey) to use `weco` for free.)
53
+ - **OpenAI:** `export OPENAI_API_KEY="your_key_here"` (Create your OpenAI API key [here](https://platform.openai.com/api-keys))
54
+ - **Anthropic:** `export ANTHROPIC_API_KEY="your_key_here"` (Create your Anthropic API key [here](https://console.anthropic.com/settings/keys))
55
+ - **Google:** `export GEMINI_API_KEY="your_key_here"` (Google AI Studio has a free API usage quota. Create your Gemini API key [here](https://aistudio.google.com/apikey) to use `weco` for free.)
52
56
 
53
57
  ---
54
58
 
55
59
  ## Get Started
56
60
 
61
+ ### Quick Start (Recommended for New Users)
62
+
63
+ The easiest way to get started with Weco is to use the **interactive copilot**. Simply navigate to your project directory and run:
64
+
65
+ ```bash
66
+ weco
67
+ ```
68
+
69
+ Or specify a project path:
70
+
71
+ ```bash
72
+ weco /path/to/your/project
73
+ ```
74
+
75
+ This launches Weco's interactive copilot that will:
76
+
77
+ 1. **Analyze your codebase** using AI to understand your project structure and identify optimization opportunities
78
+ 2. **Suggest specific optimizations** tailored to your code (e.g., GPU kernel optimization, model improvements, prompt engineering)
79
+ 3. **Generate evaluation scripts** automatically or help you configure existing ones
80
+ 4. **Set up the complete optimization pipeline** with appropriate metrics and commands
81
+ 5. **Run the optimization** or provide you with the exact command to execute
82
+
57
83
  <div style="background-color: #fff3cd; border: 1px solid #ffeeba; padding: 15px; border-radius: 4px; margin-bottom: 15px;">
58
84
  <strong>⚠️ Warning: Code Modification</strong><br>
59
85
  <code>weco</code> directly modifies the file specified by <code>--source</code> during the optimization process. It is <strong>strongly recommended</strong> to use version control (like Git) to track changes and revert if needed. Alternatively, ensure you have a backup of your original file before running the command. Upon completion, the file will contain the best-performing version of the code found during the run.
60
86
  </div>
61
87
 
62
- ---
63
-
64
- **Example: Optimizing Simple PyTorch Operations**
88
+ ### Manual Setup
65
89
 
66
- This basic example shows how to optimize a simple PyTorch function for speedup.
90
+ **Configure optimization parameters yourself** - If you need precise control over the optimization parameters, you can use the direct `weco run` command:
67
91
 
68
- For more advanced examples, including [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md), [ML model optimization](/examples/spaceship-titanic/README.md), and [prompt engineering for math problems](https://github.com/WecoAI/weco-cli/tree/main/examples/prompt), please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
92
+ **Example: Optimizing Simple PyTorch Operations**
69
93
 
70
94
  ```bash
71
95
  # Navigate to the example directory
@@ -74,7 +98,7 @@ cd examples/hello-kernel-world
74
98
  # Install dependencies
75
99
  pip install torch
76
100
 
77
- # Run Weco
101
+ # Run Weco with manual configuration
78
102
  weco run --source optimize.py \
79
103
  --eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
80
104
  --metric speedup \
@@ -85,36 +109,87 @@ weco run --source optimize.py \
85
109
 
86
110
  **Note:** If you have an NVIDIA GPU, change the device in the `--eval-command` to `cuda`. If you are running this on Apple Silicon, set it to `mps`.
87
111
 
112
+ For more advanced examples, including [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md), [ML model optimization](/examples/spaceship-titanic/README.md), and [prompt engineering for math problems](examples/prompt/README.md), please see the `README.md` files within the corresponding subdirectories under the [`examples/`](examples/) folder.
113
+
88
114
  ---
89
115
 
90
116
  ### Arguments for `weco run`
91
117
 
92
118
  **Required:**
93
119
 
94
- | Argument | Description |
95
- | :------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
96
- | `-s, --source` | Path to the source code file that will be optimized (e.g., `optimize.py`). |
97
- | `-c, --eval-command`| Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. |
98
- | `-m, --metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. |
99
- | `-g, --goal` | `maximize`/`max` to maximize the `--metric` or `minimize`/`min` to minimize it. |
120
+ | Argument | Description | Example |
121
+ | :------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------- |
122
+ | `-s, --source` | Path to the source code file that will be optimized. | `-s model.py` |
123
+ | `-c, --eval-command`| Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | `-c "python eval.py"` |
124
+ | `-m, --metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name does not need to match what's printed by your `--eval-command` exactly (e.g., its okay to use "speedup" instead of "Speedup:"). | `-m speedup` |
125
+ | `-g, --goal` | `maximize`/`max` to maximize the `--metric` or `minimize`/`min` to minimize it. | `-g maximize` |
100
126
 
101
127
  <br>
102
128
 
103
129
  **Optional:**
104
130
 
105
- | Argument | Description | Default |
106
- | :----------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------ |
107
- | `-n, --steps` | Number of optimization steps (LLM iterations) to run. | 100 |
108
- | `-M, --model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). | `o4-mini` when `OPENAI_API_KEY` is set; `claude-3-7-sonnet-20250219` when `ANTHROPIC_API_KEY` is set; `gemini-2.5-pro-exp-03-25` when `GEMINI_API_KEY` is set (priority: `OPENAI_API_KEY` > `ANTHROPIC_API_KEY` > `GEMINI_API_KEY`). |
109
- | `-i, --additional-instructions`| Natural language description of specific instructions **or** path to a file containing detailed instructions to guide the LLM. | `None` |
110
- | `-l, --log-dir` | Path to the directory to log intermediate steps and final optimization result. | `.runs/` |
131
+ | Argument | Description | Default | Example |
132
+ | :----------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------ | :------------------ |
133
+ | `-n, --steps` | Number of optimization steps (LLM iterations) to run. | 100 | `-n 50` |
134
+ | `-M, --model` | Model identifier for the LLM to use (e.g., `o4-mini`, `claude-sonnet-4-0`). | `o4-mini` when `OPENAI_API_KEY` is set; `claude-sonnet-4-0` when `ANTHROPIC_API_KEY` is set; `gemini-2.5-pro` when `GEMINI_API_KEY` is set. | `-M o4-mini` |
135
+ | `-i, --additional-instructions`| Natural language description of specific instructions **or** path to a file containing detailed instructions to guide the LLM. | `None` | `-i instructions.md` or `-i "Optimize the model for faster inference"`|
136
+ | `-l, --log-dir` | Path to the directory to log intermediate steps and final optimization result. | `.runs/` | `-l ./logs/` |
111
137
 
112
138
  ---
113
139
 
114
- ### Weco Dashboard
115
- To associate your optimization runs with your Weco account and view them on the Weco dashboard, you can log in. `weco` uses a device authentication flow
140
+ ### Authentication & Dashboard
141
+
142
+ Weco offers both **anonymous** and **authenticated** usage:
143
+
144
+ #### Anonymous Usage
145
+ You can use Weco without creating an account by providing LLM API keys via environment variables. This is perfect for trying out Weco or for users who prefer not to create accounts.
146
+
147
+ #### Authenticated Usage (Recommended)
148
+ To save your optimization runs and view them on the Weco dashboard, you can log in using Weco's secure device authentication flow:
149
+
150
+ 1. **During onboarding**: When you run `weco` for the first time, you'll be prompted to log in or skip
151
+ 2. **Manual login**: Use `weco logout` to clear credentials, then run `weco` again to re-authenticate
152
+ 3. **Device flow**: Weco will open your browser automatically and guide you through a secure OAuth-style authentication
153
+
116
154
  ![image (16)](https://github.com/user-attachments/assets/8a0a285b-4894-46fa-b6a2-4990017ca0c6)
117
155
 
156
+ **Benefits of authenticated usage:**
157
+ - **Run history**: View all your optimization runs on the Weco dashboard
158
+ - **Progress tracking**: Monitor long-running optimizations remotely
159
+ - **Enhanced support**: Get better assistance with your optimization challenges
160
+
161
+ ---
162
+
163
+ ## Command Reference
164
+
165
+ ### Basic Usage Patterns
166
+
167
+ | Command | Description | When to Use |
168
+ |---------|-------------|-------------|
169
+ | `weco` | Launch interactive onboarding | **Recommended for beginners** - Analyzes your codebase and guides you through setup |
170
+ | `weco /path/to/project` | Launch onboarding for specific project | When working with a project in a different directory |
171
+ | `weco run [options]` | Direct optimization execution | **For advanced users** - When you know exactly what to optimize and how |
172
+ | `weco logout` | Clear authentication credentials | To switch accounts or troubleshoot authentication issues |
173
+
174
+ ### Model Selection
175
+
176
+ You can specify which LLM model to use with the `-M` or `--model` flag:
177
+
178
+ ```bash
179
+ # Use with onboarding
180
+ weco --model gpt-4o
181
+
182
+ # Use with direct execution
183
+ weco run --model claude-3.5-sonnet --source optimize.py [other options...]
184
+ ```
185
+
186
+ **Available models:**
187
+ - `gpt-4o`, `o4-mini` (requires `OPENAI_API_KEY`)
188
+ - `claude-3.5-sonnet`, `claude-sonnet-4-20250514` (requires `ANTHROPIC_API_KEY`)
189
+ - `gemini-2.5-pro` (requires `GEMINI_API_KEY`)
190
+
191
+ If no model is specified, Weco automatically selects the best available model based on your API keys.
192
+
118
193
  ---
119
194
 
120
195
  ### Performance & Expectations
@@ -151,29 +226,26 @@ Weco will parse this output to extract the numerical value (1.5 in this case) as
151
226
 
152
227
  ## Contributing
153
228
 
154
- We welcome contributions! To get started:
155
-
156
- 1. **Fork and Clone the Repository:**
229
+ We welcome your contributions! To get started:
157
230
 
231
+ 1. **Fork & Clone the Repository:**
158
232
  ```bash
159
233
  git clone https://github.com/WecoAI/weco-cli.git
160
234
  cd weco-cli
161
235
  ```
162
236
 
163
- 2. **Install Development Dependencies:**
164
-
237
+ 2. **Install Dependencies:**
165
238
  ```bash
166
239
  pip install -e ".[dev]"
167
240
  ```
168
241
 
169
242
  3. **Create a Feature Branch:**
170
-
171
243
  ```bash
172
244
  git checkout -b feature/your-feature-name
173
245
  ```
174
246
 
175
- 4. **Make Your Changes:** Ensure your code adheres to our style guidelines and includes relevant tests.
247
+ 4. **Make Changes:** Ensure your code adheres to our style guidelines and includes relevant tests.
176
248
 
177
- 5. **Commit and Push** your changes, then open a pull request with a clear description of your enhancements.
249
+ 5. **Commit, Push & Open a PR**: Commit your changes, and open a pull request with a clear description of your enhancements.
178
250
 
179
251
  ---