weco 0.2.4__tar.gz → 0.2.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {weco-0.2.4 → weco-0.2.6}/.github/workflows/release.yml +2 -2
- weco-0.2.6/PKG-INFO +226 -0
- weco-0.2.6/README.md +204 -0
- weco-0.2.6/examples/cuda/evaluate.py +157 -0
- weco-0.2.6/examples/cuda/guide.md +113 -0
- weco-0.2.6/examples/cuda/optimize.py +44 -0
- {weco-0.2.4/examples/simple-torch → weco-0.2.6/examples/hello-kernel-world}/evaluate.py +32 -17
- {weco-0.2.4/examples/simple-mlx → weco-0.2.6/examples/metal}/evaluate.py +28 -20
- weco-0.2.4/examples/simple-mlx/metal-examples.rst → weco-0.2.6/examples/metal/examples.rst +2 -1
- weco-0.2.6/examples/metal/optimize.py +28 -0
- weco-0.2.6/examples/triton/evaluate.py +153 -0
- weco-0.2.6/examples/triton/optimize.py +44 -0
- {weco-0.2.4 → weco-0.2.6}/pyproject.toml +2 -2
- {weco-0.2.4 → weco-0.2.6}/weco/__init__.py +1 -1
- {weco-0.2.4 → weco-0.2.6}/weco/api.py +8 -2
- {weco-0.2.4 → weco-0.2.6}/weco/cli.py +17 -3
- {weco-0.2.4 → weco-0.2.6}/weco/panels.py +12 -6
- weco-0.2.6/weco.egg-info/PKG-INFO +226 -0
- weco-0.2.6/weco.egg-info/SOURCES.txt +27 -0
- weco-0.2.4/PKG-INFO +0 -141
- weco-0.2.4/README.md +0 -119
- weco-0.2.4/examples/simple-mlx/optimize.py +0 -26
- weco-0.2.4/weco.egg-info/PKG-INFO +0 -141
- weco-0.2.4/weco.egg-info/SOURCES.txt +0 -22
- {weco-0.2.4 → weco-0.2.6}/.github/workflows/lint.yml +0 -0
- {weco-0.2.4 → weco-0.2.6}/.gitignore +0 -0
- {weco-0.2.4 → weco-0.2.6}/LICENSE +0 -0
- {weco-0.2.4/examples/simple-torch → weco-0.2.6/examples/hello-kernel-world}/optimize.py +0 -0
- {weco-0.2.4 → weco-0.2.6}/setup.cfg +0 -0
- {weco-0.2.4 → weco-0.2.6}/weco/utils.py +0 -0
- {weco-0.2.4 → weco-0.2.6}/weco.egg-info/dependency_links.txt +0 -0
- {weco-0.2.4 → weco-0.2.6}/weco.egg-info/entry_points.txt +0 -0
- {weco-0.2.4 → weco-0.2.6}/weco.egg-info/requires.txt +0 -0
- {weco-0.2.4 → weco-0.2.6}/weco.egg-info/top_level.txt +0 -0
|
@@ -90,7 +90,7 @@ jobs:
|
|
|
90
90
|
GITHUB_TOKEN: ${{ github.token }}
|
|
91
91
|
run: >-
|
|
92
92
|
gh release create
|
|
93
|
-
'v0.2.
|
|
93
|
+
'v0.2.6'
|
|
94
94
|
--repo '${{ github.repository }}'
|
|
95
95
|
--notes ""
|
|
96
96
|
|
|
@@ -102,5 +102,5 @@ jobs:
|
|
|
102
102
|
# sigstore-produced signatures and certificates.
|
|
103
103
|
run: >-
|
|
104
104
|
gh release upload
|
|
105
|
-
'v0.2.
|
|
105
|
+
'v0.2.6' dist/**
|
|
106
106
|
--repo '${{ github.repository }}'
|
weco-0.2.6/PKG-INFO
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: weco
|
|
3
|
+
Version: 0.2.6
|
|
4
|
+
Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
|
|
5
|
+
Author-email: Weco AI Team <contact@weco.ai>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/WecoAI/weco-cli
|
|
8
|
+
Keywords: AI,Code Optimization,Code Generation
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
+
Requires-Python: >=3.12
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
License-File: LICENSE
|
|
15
|
+
Requires-Dist: requests
|
|
16
|
+
Requires-Dist: rich
|
|
17
|
+
Provides-Extra: dev
|
|
18
|
+
Requires-Dist: ruff; extra == "dev"
|
|
19
|
+
Requires-Dist: build; extra == "dev"
|
|
20
|
+
Requires-Dist: setuptools_scm; extra == "dev"
|
|
21
|
+
Dynamic: license-file
|
|
22
|
+
|
|
23
|
+
# Weco CLI – Code Optimizer for Machine Learning Engineers
|
|
24
|
+
|
|
25
|
+
[](https://www.python.org)
|
|
26
|
+
[](LICENSE)
|
|
27
|
+
[](https://badge.fury.io/py/weco)
|
|
28
|
+
|
|
29
|
+
`weco` is a command-line interface for interacting with Weco AI's code optimizer, powered by [AI-Driven Exploration](https://arxiv.org/abs/2502.13138). It helps you automate the improvement of your code for tasks like GPU kernel optimization, feature engineering, model development, and prompt engineering.
|
|
30
|
+
|
|
31
|
+
https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
|
|
32
|
+
|
|
33
|
+
---
|
|
34
|
+
|
|
35
|
+
## Overview
|
|
36
|
+
|
|
37
|
+
The `weco` CLI leverages a tree search approach guided by Large Language Models (LLMs) to iteratively explore and refine your code. It automatically applies changes, runs your evaluation script, parses the results, and proposes further improvements based on the specified goal.
|
|
38
|
+
|
|
39
|
+

|
|
40
|
+
|
|
41
|
+
---
|
|
42
|
+
|
|
43
|
+
## Example Use Cases
|
|
44
|
+
|
|
45
|
+
Here's how `weco` can be applied to common ML engineering tasks:
|
|
46
|
+
|
|
47
|
+
* **GPU Kernel Optimization:**
|
|
48
|
+
* **Goal:** Improve the speed or efficiency of low-level GPU code.
|
|
49
|
+
* **How:** `weco` iteratively refines CUDA, Triton, Metal, or other kernel code specified in your `--source` file.
|
|
50
|
+
* **`--eval-command`:** Typically runs a script that compiles the kernel, executes it, and benchmarks performance (e.g., latency, throughput).
|
|
51
|
+
* **`--metric`:** Examples include `latency`, `throughput`, `TFLOPS`, `memory_bandwidth`. Optimize to `minimize` latency or `maximize` throughput.
|
|
52
|
+
|
|
53
|
+
* **Feature Engineering:**
|
|
54
|
+
* **Goal:** Discover better data transformations or feature combinations for your machine learning models.
|
|
55
|
+
* **How:** `weco` explores different processing steps or parameters within your feature transformation code (`--source`).
|
|
56
|
+
* **`--eval-command`:** Executes a script that applies the features, trains/validates a model using those features, and prints a performance score.
|
|
57
|
+
* **`--metric`:** Examples include `accuracy`, `AUC`, `F1-score`, `validation_loss`. Usually optimized to `maximize` accuracy/AUC/F1 or `minimize` loss.
|
|
58
|
+
|
|
59
|
+
* **Model Development:**
|
|
60
|
+
* **Goal:** Tune hyperparameters or experiment with small architectural changes directly within your model's code.
|
|
61
|
+
* **How:** `weco` modifies hyperparameter values (like learning rate, layer sizes if defined in the code) or structural elements in your model definition (`--source`).
|
|
62
|
+
* **`--eval-command`:** Runs your model training and evaluation script, printing the key performance indicator.
|
|
63
|
+
* **`--metric`:** Examples include `validation_accuracy`, `test_loss`, `inference_time`, `perplexity`. Optimize according to the metric's nature (e.g., `maximize` accuracy, `minimize` loss).
|
|
64
|
+
|
|
65
|
+
* **Prompt Engineering:**
|
|
66
|
+
* **Goal:** Refine prompts used within larger systems (e.g., for LLM interactions) to achieve better or more consistent outputs.
|
|
67
|
+
* **How:** `weco` modifies prompt templates, examples, or instructions stored in the `--source` file.
|
|
68
|
+
* **`--eval-command`:** Executes a script that uses the prompt, generates an output, evaluates that output against desired criteria (e.g., using another LLM, checking for keywords, format validation), and prints a score.
|
|
69
|
+
* **`--metric`:** Examples include `quality_score`, `relevance`, `task_success_rate`, `format_adherence`. Usually optimized to `maximize`.
|
|
70
|
+
|
|
71
|
+
---
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
## Setup
|
|
75
|
+
|
|
76
|
+
1. **Install the Package:**
|
|
77
|
+
|
|
78
|
+
```bash
|
|
79
|
+
pip install weco
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
2. **Configure API Keys:**
|
|
83
|
+
|
|
84
|
+
Set the appropriate environment variables for your desired language model provider:
|
|
85
|
+
|
|
86
|
+
- **OpenAI:** `export OPENAI_API_KEY="your_key_here"`
|
|
87
|
+
- **Anthropic:** `export ANTHROPIC_API_KEY="your_key_here"`
|
|
88
|
+
- **Google DeepMind:** `export GEMINI_API_KEY="your_key_here"` (Google AI Studio has a free API usage quota. Create a key [here](https://aistudio.google.com/apikey) to use weco for free.)
|
|
89
|
+
|
|
90
|
+
---
|
|
91
|
+
|
|
92
|
+
## Usage
|
|
93
|
+
<div style="background-color: #fff3cd; border: 1px solid #ffeeba; padding: 15px; border-radius: 4px; margin-bottom: 15px;">
|
|
94
|
+
<strong>⚠️ Warning: Code Modification</strong><br>
|
|
95
|
+
<code>weco</code> directly modifies the file specified by <code>--source</code> during the optimization process. It is <strong>strongly recommended</strong> to use version control (like Git) to track changes and revert if needed. Alternatively, ensure you have a backup of your original file before running the command. Upon completion, the file will contain the best-performing version of the code found during the run.
|
|
96
|
+
</div>
|
|
97
|
+
|
|
98
|
+
---
|
|
99
|
+
|
|
100
|
+
### Examples
|
|
101
|
+
|
|
102
|
+
**Example 1: Optimizing PyTorch simple operations**
|
|
103
|
+
|
|
104
|
+
```bash
|
|
105
|
+
cd examples/hello-kernel-world
|
|
106
|
+
pip install torch
|
|
107
|
+
weco --source optimize.py \
|
|
108
|
+
--eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
|
|
109
|
+
--metric speedup \
|
|
110
|
+
--maximize true \
|
|
111
|
+
--steps 15 \
|
|
112
|
+
--model claude-3-7-sonnet-20250219 \
|
|
113
|
+
--additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
Note that if you have an NVIDIA gpu, change the device to `cuda`. If you are running this on Apple Silicon, set it to `mps`.
|
|
117
|
+
|
|
118
|
+
**Example 2: Optimizing MLX operations with instructions from a file**
|
|
119
|
+
|
|
120
|
+
Lets optimize a 2D convolution operation in [`mlx`](https://github.com/ml-explore/mlx) using [Metal](https://developer.apple.com/documentation/metal/). Sometimes, additional context or instructions are too complex for a single command-line string. You can provide a path to a file containing these instructions.
|
|
121
|
+
|
|
122
|
+
```bash
|
|
123
|
+
cd examples/metal
|
|
124
|
+
pip install mlx
|
|
125
|
+
weco --source optimize.py \
|
|
126
|
+
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
127
|
+
--metric speedup \
|
|
128
|
+
--maximize true \
|
|
129
|
+
--steps 30 \
|
|
130
|
+
--model o3-mini \
|
|
131
|
+
--additional-instructions examples.rst
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
**Example 3: Level Agnostic Optimization: Causal Self Attention with Triton & CUDA**
|
|
135
|
+
|
|
136
|
+
Given how useful causal multihead self attention is to transformers, we've seen its wide adoption across ML engineering and AI research. Its great to keep things at a high-level (in PyTorch) when doing research, but when moving to production you often need to write highly customized low-level kernels to make things run as fast as they can. The `weco` CLI can optimize kernels across a variety of different abstraction levels and frameworks. Example 2 uses Metal but lets explore two more frameworks:
|
|
137
|
+
|
|
138
|
+
1. [Triton](https://github.com/triton-lang/triton)
|
|
139
|
+
```bash
|
|
140
|
+
cd examples/triton
|
|
141
|
+
pip install torch triton
|
|
142
|
+
weco --source optimize.py \
|
|
143
|
+
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
144
|
+
--metric speedup \
|
|
145
|
+
--maximize true \
|
|
146
|
+
--steps 30 \
|
|
147
|
+
--model gemini-2.5-pro-preview-03-25 \
|
|
148
|
+
--additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
2. [CUDA](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html)
|
|
152
|
+
```bash
|
|
153
|
+
cd examples/cuda
|
|
154
|
+
pip install torch
|
|
155
|
+
weco --source optimize.py \
|
|
156
|
+
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
157
|
+
--metric speedup \
|
|
158
|
+
--maximize true \
|
|
159
|
+
--steps 30 \
|
|
160
|
+
--model gemini-2.5-pro-preview-03-25 \
|
|
161
|
+
--additional-instructions guide.md
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
---
|
|
166
|
+
|
|
167
|
+
### Command Line Arguments
|
|
168
|
+
|
|
169
|
+
| Argument | Description | Required |
|
|
170
|
+
| :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- |
|
|
171
|
+
| `--source` | Path to the source code file that will be optimized (e.g., `optimize.py`). | Yes |
|
|
172
|
+
| `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
|
|
173
|
+
| `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
|
|
174
|
+
| `--maximize` | Whether to maximize (`true`) or minimize (`false`) the metric. | Yes |
|
|
175
|
+
| `--steps` | Number of optimization steps (LLM iterations) to run. | Yes |
|
|
176
|
+
| `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`. | Yes |
|
|
177
|
+
| `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
|
|
178
|
+
|
|
179
|
+
---
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
### Important Note on Evaluation
|
|
184
|
+
|
|
185
|
+
The command specified by `--eval-command` is crucial. It's responsible for executing the potentially modified code from `--source` and assessing its performance. **This command MUST print the metric you specified with `--metric` along with its numerical value to the terminal (standard output or standard error).** Weco reads this output to understand how well each code version performs and guide the optimization process.
|
|
186
|
+
|
|
187
|
+
For example, if you set `--metric speedup`, your evaluation script (`eval.py` in the examples) should output a line like:
|
|
188
|
+
|
|
189
|
+
```
|
|
190
|
+
speedup: 1.5
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
or
|
|
194
|
+
|
|
195
|
+
```
|
|
196
|
+
Final speedup value = 1.5
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
Weco will parse this output to extract the numerical value (1.5 in this case) associated with the metric name ('speedup').
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
## Contributing
|
|
203
|
+
|
|
204
|
+
We welcome contributions! To get started:
|
|
205
|
+
|
|
206
|
+
1. **Fork and Clone the Repository:**
|
|
207
|
+
```bash
|
|
208
|
+
git clone https://github.com/WecoAI/weco-cli.git
|
|
209
|
+
cd weco-cli
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
2. **Install Development Dependencies:**
|
|
213
|
+
```bash
|
|
214
|
+
pip install -e ".[dev]"
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
3. **Create a Feature Branch:**
|
|
218
|
+
```bash
|
|
219
|
+
git checkout -b feature/your-feature-name
|
|
220
|
+
```
|
|
221
|
+
|
|
222
|
+
4. **Make Your Changes:** Ensure your code adheres to our style guidelines and includes relevant tests.
|
|
223
|
+
|
|
224
|
+
5. **Commit and Push** your changes, then open a pull request with a clear description of your enhancements.
|
|
225
|
+
|
|
226
|
+
---
|
weco-0.2.6/README.md
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
# Weco CLI – Code Optimizer for Machine Learning Engineers
|
|
2
|
+
|
|
3
|
+
[](https://www.python.org)
|
|
4
|
+
[](LICENSE)
|
|
5
|
+
[](https://badge.fury.io/py/weco)
|
|
6
|
+
|
|
7
|
+
`weco` is a command-line interface for interacting with Weco AI's code optimizer, powered by [AI-Driven Exploration](https://arxiv.org/abs/2502.13138). It helps you automate the improvement of your code for tasks like GPU kernel optimization, feature engineering, model development, and prompt engineering.
|
|
8
|
+
|
|
9
|
+
https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
|
|
10
|
+
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
## Overview
|
|
14
|
+
|
|
15
|
+
The `weco` CLI leverages a tree search approach guided by Large Language Models (LLMs) to iteratively explore and refine your code. It automatically applies changes, runs your evaluation script, parses the results, and proposes further improvements based on the specified goal.
|
|
16
|
+
|
|
17
|
+

|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## Example Use Cases
|
|
22
|
+
|
|
23
|
+
Here's how `weco` can be applied to common ML engineering tasks:
|
|
24
|
+
|
|
25
|
+
* **GPU Kernel Optimization:**
|
|
26
|
+
* **Goal:** Improve the speed or efficiency of low-level GPU code.
|
|
27
|
+
* **How:** `weco` iteratively refines CUDA, Triton, Metal, or other kernel code specified in your `--source` file.
|
|
28
|
+
* **`--eval-command`:** Typically runs a script that compiles the kernel, executes it, and benchmarks performance (e.g., latency, throughput).
|
|
29
|
+
* **`--metric`:** Examples include `latency`, `throughput`, `TFLOPS`, `memory_bandwidth`. Optimize to `minimize` latency or `maximize` throughput.
|
|
30
|
+
|
|
31
|
+
* **Feature Engineering:**
|
|
32
|
+
* **Goal:** Discover better data transformations or feature combinations for your machine learning models.
|
|
33
|
+
* **How:** `weco` explores different processing steps or parameters within your feature transformation code (`--source`).
|
|
34
|
+
* **`--eval-command`:** Executes a script that applies the features, trains/validates a model using those features, and prints a performance score.
|
|
35
|
+
* **`--metric`:** Examples include `accuracy`, `AUC`, `F1-score`, `validation_loss`. Usually optimized to `maximize` accuracy/AUC/F1 or `minimize` loss.
|
|
36
|
+
|
|
37
|
+
* **Model Development:**
|
|
38
|
+
* **Goal:** Tune hyperparameters or experiment with small architectural changes directly within your model's code.
|
|
39
|
+
* **How:** `weco` modifies hyperparameter values (like learning rate, layer sizes if defined in the code) or structural elements in your model definition (`--source`).
|
|
40
|
+
* **`--eval-command`:** Runs your model training and evaluation script, printing the key performance indicator.
|
|
41
|
+
* **`--metric`:** Examples include `validation_accuracy`, `test_loss`, `inference_time`, `perplexity`. Optimize according to the metric's nature (e.g., `maximize` accuracy, `minimize` loss).
|
|
42
|
+
|
|
43
|
+
* **Prompt Engineering:**
|
|
44
|
+
* **Goal:** Refine prompts used within larger systems (e.g., for LLM interactions) to achieve better or more consistent outputs.
|
|
45
|
+
* **How:** `weco` modifies prompt templates, examples, or instructions stored in the `--source` file.
|
|
46
|
+
* **`--eval-command`:** Executes a script that uses the prompt, generates an output, evaluates that output against desired criteria (e.g., using another LLM, checking for keywords, format validation), and prints a score.
|
|
47
|
+
* **`--metric`:** Examples include `quality_score`, `relevance`, `task_success_rate`, `format_adherence`. Usually optimized to `maximize`.
|
|
48
|
+
|
|
49
|
+
---
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
## Setup
|
|
53
|
+
|
|
54
|
+
1. **Install the Package:**
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
pip install weco
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
2. **Configure API Keys:**
|
|
61
|
+
|
|
62
|
+
Set the appropriate environment variables for your desired language model provider:
|
|
63
|
+
|
|
64
|
+
- **OpenAI:** `export OPENAI_API_KEY="your_key_here"`
|
|
65
|
+
- **Anthropic:** `export ANTHROPIC_API_KEY="your_key_here"`
|
|
66
|
+
- **Google DeepMind:** `export GEMINI_API_KEY="your_key_here"` (Google AI Studio has a free API usage quota. Create a key [here](https://aistudio.google.com/apikey) to use weco for free.)
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
## Usage
|
|
71
|
+
<div style="background-color: #fff3cd; border: 1px solid #ffeeba; padding: 15px; border-radius: 4px; margin-bottom: 15px;">
|
|
72
|
+
<strong>⚠️ Warning: Code Modification</strong><br>
|
|
73
|
+
<code>weco</code> directly modifies the file specified by <code>--source</code> during the optimization process. It is <strong>strongly recommended</strong> to use version control (like Git) to track changes and revert if needed. Alternatively, ensure you have a backup of your original file before running the command. Upon completion, the file will contain the best-performing version of the code found during the run.
|
|
74
|
+
</div>
|
|
75
|
+
|
|
76
|
+
---
|
|
77
|
+
|
|
78
|
+
### Examples
|
|
79
|
+
|
|
80
|
+
**Example 1: Optimizing PyTorch simple operations**
|
|
81
|
+
|
|
82
|
+
```bash
|
|
83
|
+
cd examples/hello-kernel-world
|
|
84
|
+
pip install torch
|
|
85
|
+
weco --source optimize.py \
|
|
86
|
+
--eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
|
|
87
|
+
--metric speedup \
|
|
88
|
+
--maximize true \
|
|
89
|
+
--steps 15 \
|
|
90
|
+
--model claude-3-7-sonnet-20250219 \
|
|
91
|
+
--additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
Note that if you have an NVIDIA gpu, change the device to `cuda`. If you are running this on Apple Silicon, set it to `mps`.
|
|
95
|
+
|
|
96
|
+
**Example 2: Optimizing MLX operations with instructions from a file**
|
|
97
|
+
|
|
98
|
+
Lets optimize a 2D convolution operation in [`mlx`](https://github.com/ml-explore/mlx) using [Metal](https://developer.apple.com/documentation/metal/). Sometimes, additional context or instructions are too complex for a single command-line string. You can provide a path to a file containing these instructions.
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
cd examples/metal
|
|
102
|
+
pip install mlx
|
|
103
|
+
weco --source optimize.py \
|
|
104
|
+
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
105
|
+
--metric speedup \
|
|
106
|
+
--maximize true \
|
|
107
|
+
--steps 30 \
|
|
108
|
+
--model o3-mini \
|
|
109
|
+
--additional-instructions examples.rst
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
**Example 3: Level Agnostic Optimization: Causal Self Attention with Triton & CUDA**
|
|
113
|
+
|
|
114
|
+
Given how useful causal multihead self attention is to transformers, we've seen its wide adoption across ML engineering and AI research. Its great to keep things at a high-level (in PyTorch) when doing research, but when moving to production you often need to write highly customized low-level kernels to make things run as fast as they can. The `weco` CLI can optimize kernels across a variety of different abstraction levels and frameworks. Example 2 uses Metal but lets explore two more frameworks:
|
|
115
|
+
|
|
116
|
+
1. [Triton](https://github.com/triton-lang/triton)
|
|
117
|
+
```bash
|
|
118
|
+
cd examples/triton
|
|
119
|
+
pip install torch triton
|
|
120
|
+
weco --source optimize.py \
|
|
121
|
+
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
122
|
+
--metric speedup \
|
|
123
|
+
--maximize true \
|
|
124
|
+
--steps 30 \
|
|
125
|
+
--model gemini-2.5-pro-preview-03-25 \
|
|
126
|
+
--additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
2. [CUDA](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html)
|
|
130
|
+
```bash
|
|
131
|
+
cd examples/cuda
|
|
132
|
+
pip install torch
|
|
133
|
+
weco --source optimize.py \
|
|
134
|
+
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
135
|
+
--metric speedup \
|
|
136
|
+
--maximize true \
|
|
137
|
+
--steps 30 \
|
|
138
|
+
--model gemini-2.5-pro-preview-03-25 \
|
|
139
|
+
--additional-instructions guide.md
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
---
|
|
144
|
+
|
|
145
|
+
### Command Line Arguments
|
|
146
|
+
|
|
147
|
+
| Argument | Description | Required |
|
|
148
|
+
| :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- |
|
|
149
|
+
| `--source` | Path to the source code file that will be optimized (e.g., `optimize.py`). | Yes |
|
|
150
|
+
| `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
|
|
151
|
+
| `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
|
|
152
|
+
| `--maximize` | Whether to maximize (`true`) or minimize (`false`) the metric. | Yes |
|
|
153
|
+
| `--steps` | Number of optimization steps (LLM iterations) to run. | Yes |
|
|
154
|
+
| `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`. | Yes |
|
|
155
|
+
| `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
|
|
156
|
+
|
|
157
|
+
---
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
### Important Note on Evaluation
|
|
162
|
+
|
|
163
|
+
The command specified by `--eval-command` is crucial. It's responsible for executing the potentially modified code from `--source` and assessing its performance. **This command MUST print the metric you specified with `--metric` along with its numerical value to the terminal (standard output or standard error).** Weco reads this output to understand how well each code version performs and guide the optimization process.
|
|
164
|
+
|
|
165
|
+
For example, if you set `--metric speedup`, your evaluation script (`eval.py` in the examples) should output a line like:
|
|
166
|
+
|
|
167
|
+
```
|
|
168
|
+
speedup: 1.5
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
or
|
|
172
|
+
|
|
173
|
+
```
|
|
174
|
+
Final speedup value = 1.5
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
Weco will parse this output to extract the numerical value (1.5 in this case) associated with the metric name ('speedup').
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
## Contributing
|
|
181
|
+
|
|
182
|
+
We welcome contributions! To get started:
|
|
183
|
+
|
|
184
|
+
1. **Fork and Clone the Repository:**
|
|
185
|
+
```bash
|
|
186
|
+
git clone https://github.com/WecoAI/weco-cli.git
|
|
187
|
+
cd weco-cli
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
2. **Install Development Dependencies:**
|
|
191
|
+
```bash
|
|
192
|
+
pip install -e ".[dev]"
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
3. **Create a Feature Branch:**
|
|
196
|
+
```bash
|
|
197
|
+
git checkout -b feature/your-feature-name
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
4. **Make Your Changes:** Ensure your code adheres to our style guidelines and includes relevant tests.
|
|
201
|
+
|
|
202
|
+
5. **Commit and Push** your changes, then open a pull request with a clear description of your enhancements.
|
|
203
|
+
|
|
204
|
+
---
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import sys
|
|
3
|
+
import os
|
|
4
|
+
import pathlib
|
|
5
|
+
import importlib
|
|
6
|
+
import traceback
|
|
7
|
+
import torch
|
|
8
|
+
import torch.nn as nn
|
|
9
|
+
import torch.nn.functional as F
|
|
10
|
+
import math
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
########################################################
|
|
14
|
+
# Baseline
|
|
15
|
+
########################################################
|
|
16
|
+
class Model(nn.Module):
|
|
17
|
+
"""
|
|
18
|
+
A vanilla multi-head masked self-attention layer with a projection at the end.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, n_embd, n_head, attn_pdrop, resid_pdrop, max_seqlen):
|
|
22
|
+
super().__init__()
|
|
23
|
+
assert n_embd % n_head == 0
|
|
24
|
+
# key, query, value projections for all heads, but in a batch
|
|
25
|
+
self.c_attn = nn.Linear(n_embd, 3 * n_embd)
|
|
26
|
+
# output projection
|
|
27
|
+
self.c_proj = nn.Linear(n_embd, n_embd)
|
|
28
|
+
# regularization
|
|
29
|
+
self.attn_dropout = nn.Dropout(attn_pdrop)
|
|
30
|
+
self.resid_dropout = nn.Dropout(resid_pdrop)
|
|
31
|
+
# causal mask to ensure that attention is only applied to the left in the input sequence
|
|
32
|
+
self.register_buffer("bias", torch.tril(torch.ones(max_seqlen, max_seqlen)).view(1, 1, max_seqlen, max_seqlen))
|
|
33
|
+
self.n_head = n_head
|
|
34
|
+
self.n_embd = n_embd
|
|
35
|
+
|
|
36
|
+
def forward(self, x):
|
|
37
|
+
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
|
|
38
|
+
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
|
|
39
|
+
q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
|
|
40
|
+
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
|
|
41
|
+
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
|
|
42
|
+
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
|
|
43
|
+
|
|
44
|
+
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
|
|
45
|
+
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
|
|
46
|
+
att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float("-inf"))
|
|
47
|
+
att = F.softmax(att, dim=-1)
|
|
48
|
+
att = self.attn_dropout(att)
|
|
49
|
+
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
|
|
50
|
+
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
|
|
51
|
+
# output projection
|
|
52
|
+
y = self.resid_dropout(self.c_proj(y))
|
|
53
|
+
return y
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
########################################################
|
|
57
|
+
# Weco Solution
|
|
58
|
+
########################################################
|
|
59
|
+
def load_module_from_path(module_path: str, add_to_sys_modules: bool = False):
|
|
60
|
+
# Clean out all old compiled extensions to prevent namespace collisions during build
|
|
61
|
+
module_path = pathlib.Path(module_path)
|
|
62
|
+
name = module_path.stem
|
|
63
|
+
spec = importlib.util.spec_from_file_location(name, module_path)
|
|
64
|
+
mod = importlib.util.module_from_spec(spec) # type: ignore
|
|
65
|
+
if add_to_sys_modules:
|
|
66
|
+
sys.modules[name] = mod
|
|
67
|
+
spec.loader.exec_module(mod) # type: ignore
|
|
68
|
+
return mod
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
########################################################
|
|
72
|
+
# Benchmark
|
|
73
|
+
########################################################
|
|
74
|
+
os.environ["MAX_JOBS"] = "1" # number of workers for building with ninja
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def get_inputs(batch_size, seq_len, n_embd, device):
|
|
78
|
+
return torch.randn(batch_size, seq_len, n_embd, device=device, dtype=torch.float32)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def bench(f, inputs, n_warmup, n_rep):
|
|
82
|
+
with torch.no_grad():
|
|
83
|
+
# warmup
|
|
84
|
+
for _ in range(n_warmup):
|
|
85
|
+
f(inputs) # noqa
|
|
86
|
+
|
|
87
|
+
# benchmark
|
|
88
|
+
t_avg = 0.0
|
|
89
|
+
for _ in range(n_rep):
|
|
90
|
+
torch.cuda.empty_cache() # Clear cache before timing
|
|
91
|
+
start_time = time.time()
|
|
92
|
+
f(inputs)
|
|
93
|
+
torch.cuda.synchronize() # Wait for all computations to complete
|
|
94
|
+
t_avg += time.time() - start_time
|
|
95
|
+
t_avg /= n_rep * 1e-3
|
|
96
|
+
return t_avg
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
if __name__ == "__main__":
|
|
100
|
+
import argparse
|
|
101
|
+
|
|
102
|
+
parser = argparse.ArgumentParser()
|
|
103
|
+
parser.add_argument("--solution-path", type=str, required=True)
|
|
104
|
+
args = parser.parse_args()
|
|
105
|
+
|
|
106
|
+
# benchmarking parameters
|
|
107
|
+
n_correctness_trials = 10
|
|
108
|
+
n_warmup = 1000
|
|
109
|
+
n_rep = 5000
|
|
110
|
+
|
|
111
|
+
# init parameters
|
|
112
|
+
max_seqlen = 512
|
|
113
|
+
seq_len = 256
|
|
114
|
+
n_embd = 768
|
|
115
|
+
n_head = 8
|
|
116
|
+
# turn off dropout to measure correctness well
|
|
117
|
+
attn_pdrop = 0.0
|
|
118
|
+
resid_pdrop = 0.0
|
|
119
|
+
|
|
120
|
+
# input parameters
|
|
121
|
+
batch_size = 32
|
|
122
|
+
|
|
123
|
+
# load solution module
|
|
124
|
+
try:
|
|
125
|
+
torch.manual_seed(0)
|
|
126
|
+
solution_module = load_module_from_path(args.solution_path, add_to_sys_modules=False)
|
|
127
|
+
solution_model = solution_module.Model(
|
|
128
|
+
n_embd=n_embd, n_head=n_head, attn_pdrop=attn_pdrop, resid_pdrop=resid_pdrop, max_seqlen=max_seqlen
|
|
129
|
+
).to("cuda")
|
|
130
|
+
assert isinstance(solution_model, nn.Module)
|
|
131
|
+
except Exception:
|
|
132
|
+
print(f"Candidate module initialization failed: {traceback.format_exc()}")
|
|
133
|
+
exit(1)
|
|
134
|
+
|
|
135
|
+
torch.manual_seed(0)
|
|
136
|
+
baseline_model = Model(
|
|
137
|
+
n_embd=n_embd, n_head=n_head, attn_pdrop=attn_pdrop, resid_pdrop=resid_pdrop, max_seqlen=max_seqlen
|
|
138
|
+
).to("cuda")
|
|
139
|
+
|
|
140
|
+
# measure correctness
|
|
141
|
+
max_diff_avg = 0
|
|
142
|
+
for _ in range(n_correctness_trials):
|
|
143
|
+
inputs = get_inputs(batch_size=batch_size, seq_len=seq_len, n_embd=n_embd, device="cuda")
|
|
144
|
+
with torch.no_grad():
|
|
145
|
+
baseline_output = baseline_model(inputs)
|
|
146
|
+
optimized_output = solution_model(inputs)
|
|
147
|
+
max_diff_avg += torch.max(torch.abs(optimized_output - baseline_output))
|
|
148
|
+
max_diff_avg /= n_correctness_trials
|
|
149
|
+
print(f"max float diff between values of baseline and optimized model: {max_diff_avg}")
|
|
150
|
+
|
|
151
|
+
# measure performance
|
|
152
|
+
inputs = get_inputs(batch_size=batch_size, seq_len=seq_len, n_embd=n_embd, device="cuda")
|
|
153
|
+
t_avg_baseline = bench(baseline_model, inputs, n_warmup, n_rep)
|
|
154
|
+
print(f"baseline time: {t_avg_baseline:.2f}ms")
|
|
155
|
+
t_avg_optimized = bench(solution_model, inputs, n_warmup, n_rep)
|
|
156
|
+
print(f"optimized time: {t_avg_optimized:.2f}ms")
|
|
157
|
+
print(f"speedup: {t_avg_baseline / t_avg_optimized:.2f}x")
|