weco 0.2.6__tar.gz → 0.2.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {weco-0.2.6 → weco-0.2.8}/.github/workflows/lint.yml +10 -7
- {weco-0.2.6 → weco-0.2.8}/.github/workflows/release.yml +2 -2
- {weco-0.2.6 → weco-0.2.8}/.gitignore +2 -0
- {weco-0.2.6 → weco-0.2.8}/PKG-INFO +39 -92
- {weco-0.2.6 → weco-0.2.8}/README.md +37 -90
- weco-0.2.8/examples/cuda/README.md +40 -0
- weco-0.2.8/examples/metal/README.md +0 -0
- weco-0.2.8/examples/spaceship-titanic/README.md +62 -0
- weco-0.2.8/examples/spaceship-titanic/baseline.py +27 -0
- weco-0.2.8/examples/spaceship-titanic/evaluate.py +71 -0
- weco-0.2.8/examples/spaceship-titanic/optimize.py +27 -0
- weco-0.2.8/examples/spaceship-titanic/requirements-test.txt +8 -0
- weco-0.2.8/examples/spaceship-titanic/utils.py +56 -0
- weco-0.2.8/examples/triton/README.md +0 -0
- {weco-0.2.6 → weco-0.2.8}/pyproject.toml +2 -2
- {weco-0.2.6 → weco-0.2.8}/weco/__init__.py +1 -1
- {weco-0.2.6 → weco-0.2.8}/weco/api.py +3 -8
- {weco-0.2.6 → weco-0.2.8}/weco/cli.py +13 -10
- {weco-0.2.6 → weco-0.2.8}/weco/panels.py +16 -7
- {weco-0.2.6 → weco-0.2.8}/weco.egg-info/PKG-INFO +39 -92
- {weco-0.2.6 → weco-0.2.8}/weco.egg-info/SOURCES.txt +9 -0
- {weco-0.2.6 → weco-0.2.8}/LICENSE +0 -0
- {weco-0.2.6 → weco-0.2.8}/examples/cuda/evaluate.py +0 -0
- {weco-0.2.6 → weco-0.2.8}/examples/cuda/guide.md +0 -0
- {weco-0.2.6 → weco-0.2.8}/examples/cuda/optimize.py +0 -0
- {weco-0.2.6 → weco-0.2.8}/examples/hello-kernel-world/evaluate.py +0 -0
- {weco-0.2.6 → weco-0.2.8}/examples/hello-kernel-world/optimize.py +0 -0
- {weco-0.2.6 → weco-0.2.8}/examples/metal/evaluate.py +0 -0
- {weco-0.2.6 → weco-0.2.8}/examples/metal/examples.rst +0 -0
- {weco-0.2.6 → weco-0.2.8}/examples/metal/optimize.py +0 -0
- {weco-0.2.6 → weco-0.2.8}/examples/triton/evaluate.py +0 -0
- {weco-0.2.6 → weco-0.2.8}/examples/triton/optimize.py +0 -0
- {weco-0.2.6 → weco-0.2.8}/setup.cfg +0 -0
- {weco-0.2.6 → weco-0.2.8}/weco/utils.py +0 -0
- {weco-0.2.6 → weco-0.2.8}/weco.egg-info/dependency_links.txt +0 -0
- {weco-0.2.6 → weco-0.2.8}/weco.egg-info/entry_points.txt +0 -0
- {weco-0.2.6 → weco-0.2.8}/weco.egg-info/requires.txt +0 -0
- {weco-0.2.6 → weco-0.2.8}/weco.egg-info/top_level.txt +0 -0
|
@@ -5,6 +5,7 @@ on:
|
|
|
5
5
|
branches:
|
|
6
6
|
- main
|
|
7
7
|
- dev
|
|
8
|
+
pull_request: # Run on any pull request
|
|
8
9
|
|
|
9
10
|
jobs:
|
|
10
11
|
lint:
|
|
@@ -12,9 +13,7 @@ jobs:
|
|
|
12
13
|
|
|
13
14
|
steps:
|
|
14
15
|
- name: Checkout code
|
|
15
|
-
uses: actions/checkout@
|
|
16
|
-
with:
|
|
17
|
-
ref: ${{ github.head_ref }}
|
|
16
|
+
uses: actions/checkout@v4
|
|
18
17
|
|
|
19
18
|
- name: Set up Python
|
|
20
19
|
uses: actions/setup-python@v3
|
|
@@ -26,15 +25,19 @@ jobs:
|
|
|
26
25
|
python -m pip install --upgrade pip
|
|
27
26
|
pip install ruff
|
|
28
27
|
|
|
29
|
-
- name: Run
|
|
28
|
+
- name: Run Linter (PR Check)
|
|
29
|
+
if: github.event_name == 'pull_request'
|
|
30
30
|
run: |
|
|
31
|
-
ruff check .
|
|
32
|
-
|
|
33
|
-
- name: Run
|
|
31
|
+
ruff check .
|
|
32
|
+
|
|
33
|
+
- name: Run Linter & Formatter (Push)
|
|
34
|
+
if: github.event_name == 'push'
|
|
34
35
|
run: |
|
|
36
|
+
ruff check . --fix
|
|
35
37
|
ruff format .
|
|
36
38
|
|
|
37
39
|
- name: Commit changes
|
|
40
|
+
if: github.event_name == 'push'
|
|
38
41
|
run: |
|
|
39
42
|
git config --local user.email "action@github.com"
|
|
40
43
|
git config --local user.name "GitHub Action"
|
|
@@ -90,7 +90,7 @@ jobs:
|
|
|
90
90
|
GITHUB_TOKEN: ${{ github.token }}
|
|
91
91
|
run: >-
|
|
92
92
|
gh release create
|
|
93
|
-
'v0.2.
|
|
93
|
+
'v0.2.8'
|
|
94
94
|
--repo '${{ github.repository }}'
|
|
95
95
|
--notes ""
|
|
96
96
|
|
|
@@ -102,5 +102,5 @@ jobs:
|
|
|
102
102
|
# sigstore-produced signatures and certificates.
|
|
103
103
|
run: >-
|
|
104
104
|
gh release upload
|
|
105
|
-
'v0.2.
|
|
105
|
+
'v0.2.8' dist/**
|
|
106
106
|
--repo '${{ github.repository }}'
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: weco
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.8
|
|
4
4
|
Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
|
|
5
5
|
Author-email: Weco AI Team <contact@weco.ai>
|
|
6
6
|
License: MIT
|
|
@@ -9,7 +9,7 @@ Keywords: AI,Code Optimization,Code Generation
|
|
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
|
10
10
|
Classifier: Operating System :: OS Independent
|
|
11
11
|
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
-
Requires-Python: >=3.
|
|
12
|
+
Requires-Python: >=3.8
|
|
13
13
|
Description-Content-Type: text/markdown
|
|
14
14
|
License-File: LICENSE
|
|
15
15
|
Requires-Dist: requests
|
|
@@ -20,13 +20,19 @@ Requires-Dist: build; extra == "dev"
|
|
|
20
20
|
Requires-Dist: setuptools_scm; extra == "dev"
|
|
21
21
|
Dynamic: license-file
|
|
22
22
|
|
|
23
|
-
# Weco
|
|
23
|
+
# Weco: The Evaluation-Driven AI Code Optimizer
|
|
24
24
|
|
|
25
25
|
[](https://www.python.org)
|
|
26
|
-
[](LICENSE)
|
|
27
26
|
[](https://badge.fury.io/py/weco)
|
|
27
|
+
[](https://arxiv.org/abs/2502.13138)
|
|
28
28
|
|
|
29
|
-
|
|
29
|
+
Weco systematically optimizes your code, guided directly by your evaluation metrics.
|
|
30
|
+
|
|
31
|
+
Example applications include:
|
|
32
|
+
|
|
33
|
+
- **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA, Triton or Metal, optimizing for `latency`, `throughput`, or `memory_bandwidth`.
|
|
34
|
+
- **Model Development**: Tune feature transformations or architectures, optimizing for `validation_accuracy`, `AUC`, or `Sharpe Ratio`.
|
|
35
|
+
- **Prompt Engineering**: Refine prompts for LLMs, optimizing for `win_rate`, `relevance`, or `format_adherence`
|
|
30
36
|
|
|
31
37
|
https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
|
|
32
38
|
|
|
@@ -40,37 +46,6 @@ The `weco` CLI leverages a tree search approach guided by Large Language Models
|
|
|
40
46
|
|
|
41
47
|
---
|
|
42
48
|
|
|
43
|
-
## Example Use Cases
|
|
44
|
-
|
|
45
|
-
Here's how `weco` can be applied to common ML engineering tasks:
|
|
46
|
-
|
|
47
|
-
* **GPU Kernel Optimization:**
|
|
48
|
-
* **Goal:** Improve the speed or efficiency of low-level GPU code.
|
|
49
|
-
* **How:** `weco` iteratively refines CUDA, Triton, Metal, or other kernel code specified in your `--source` file.
|
|
50
|
-
* **`--eval-command`:** Typically runs a script that compiles the kernel, executes it, and benchmarks performance (e.g., latency, throughput).
|
|
51
|
-
* **`--metric`:** Examples include `latency`, `throughput`, `TFLOPS`, `memory_bandwidth`. Optimize to `minimize` latency or `maximize` throughput.
|
|
52
|
-
|
|
53
|
-
* **Feature Engineering:**
|
|
54
|
-
* **Goal:** Discover better data transformations or feature combinations for your machine learning models.
|
|
55
|
-
* **How:** `weco` explores different processing steps or parameters within your feature transformation code (`--source`).
|
|
56
|
-
* **`--eval-command`:** Executes a script that applies the features, trains/validates a model using those features, and prints a performance score.
|
|
57
|
-
* **`--metric`:** Examples include `accuracy`, `AUC`, `F1-score`, `validation_loss`. Usually optimized to `maximize` accuracy/AUC/F1 or `minimize` loss.
|
|
58
|
-
|
|
59
|
-
* **Model Development:**
|
|
60
|
-
* **Goal:** Tune hyperparameters or experiment with small architectural changes directly within your model's code.
|
|
61
|
-
* **How:** `weco` modifies hyperparameter values (like learning rate, layer sizes if defined in the code) or structural elements in your model definition (`--source`).
|
|
62
|
-
* **`--eval-command`:** Runs your model training and evaluation script, printing the key performance indicator.
|
|
63
|
-
* **`--metric`:** Examples include `validation_accuracy`, `test_loss`, `inference_time`, `perplexity`. Optimize according to the metric's nature (e.g., `maximize` accuracy, `minimize` loss).
|
|
64
|
-
|
|
65
|
-
* **Prompt Engineering:**
|
|
66
|
-
* **Goal:** Refine prompts used within larger systems (e.g., for LLM interactions) to achieve better or more consistent outputs.
|
|
67
|
-
* **How:** `weco` modifies prompt templates, examples, or instructions stored in the `--source` file.
|
|
68
|
-
* **`--eval-command`:** Executes a script that uses the prompt, generates an output, evaluates that output against desired criteria (e.g., using another LLM, checking for keywords, format validation), and prints a score.
|
|
69
|
-
* **`--metric`:** Examples include `quality_score`, `relevance`, `task_success_rate`, `format_adherence`. Usually optimized to `maximize`.
|
|
70
|
-
|
|
71
|
-
---
|
|
72
|
-
|
|
73
|
-
|
|
74
49
|
## Setup
|
|
75
50
|
|
|
76
51
|
1. **Install the Package:**
|
|
@@ -97,70 +72,30 @@ Here's how `weco` can be applied to common ML engineering tasks:
|
|
|
97
72
|
|
|
98
73
|
---
|
|
99
74
|
|
|
100
|
-
###
|
|
75
|
+
### Example: Optimizing Simple PyTorch Operations
|
|
76
|
+
|
|
77
|
+
This basic example shows how to optimize a simple PyTorch function for speedup.
|
|
101
78
|
|
|
102
|
-
**
|
|
79
|
+
For more advanced examples, including **[Metal/MLX](/examples/metal/README.md), [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md)**, and **[ML model optimization](/examples/spaceship-titanic/README.md)t**, please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
|
|
103
80
|
|
|
104
81
|
```bash
|
|
82
|
+
# Navigate to the example directory
|
|
105
83
|
cd examples/hello-kernel-world
|
|
106
|
-
|
|
84
|
+
|
|
85
|
+
# Install dependencies
|
|
86
|
+
pip install torch
|
|
87
|
+
|
|
88
|
+
# Run Weco
|
|
107
89
|
weco --source optimize.py \
|
|
108
90
|
--eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
|
|
109
91
|
--metric speedup \
|
|
110
92
|
--maximize true \
|
|
111
93
|
--steps 15 \
|
|
112
|
-
--model
|
|
94
|
+
--model gemini-2.5-pro-exp-03-25 \
|
|
113
95
|
--additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
|
|
114
96
|
```
|
|
115
97
|
|
|
116
|
-
Note
|
|
117
|
-
|
|
118
|
-
**Example 2: Optimizing MLX operations with instructions from a file**
|
|
119
|
-
|
|
120
|
-
Lets optimize a 2D convolution operation in [`mlx`](https://github.com/ml-explore/mlx) using [Metal](https://developer.apple.com/documentation/metal/). Sometimes, additional context or instructions are too complex for a single command-line string. You can provide a path to a file containing these instructions.
|
|
121
|
-
|
|
122
|
-
```bash
|
|
123
|
-
cd examples/metal
|
|
124
|
-
pip install mlx
|
|
125
|
-
weco --source optimize.py \
|
|
126
|
-
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
127
|
-
--metric speedup \
|
|
128
|
-
--maximize true \
|
|
129
|
-
--steps 30 \
|
|
130
|
-
--model o3-mini \
|
|
131
|
-
--additional-instructions examples.rst
|
|
132
|
-
```
|
|
133
|
-
|
|
134
|
-
**Example 3: Level Agnostic Optimization: Causal Self Attention with Triton & CUDA**
|
|
135
|
-
|
|
136
|
-
Given how useful causal multihead self attention is to transformers, we've seen its wide adoption across ML engineering and AI research. Its great to keep things at a high-level (in PyTorch) when doing research, but when moving to production you often need to write highly customized low-level kernels to make things run as fast as they can. The `weco` CLI can optimize kernels across a variety of different abstraction levels and frameworks. Example 2 uses Metal but lets explore two more frameworks:
|
|
137
|
-
|
|
138
|
-
1. [Triton](https://github.com/triton-lang/triton)
|
|
139
|
-
```bash
|
|
140
|
-
cd examples/triton
|
|
141
|
-
pip install torch triton
|
|
142
|
-
weco --source optimize.py \
|
|
143
|
-
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
144
|
-
--metric speedup \
|
|
145
|
-
--maximize true \
|
|
146
|
-
--steps 30 \
|
|
147
|
-
--model gemini-2.5-pro-preview-03-25 \
|
|
148
|
-
--additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
|
|
149
|
-
```
|
|
150
|
-
|
|
151
|
-
2. [CUDA](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html)
|
|
152
|
-
```bash
|
|
153
|
-
cd examples/cuda
|
|
154
|
-
pip install torch
|
|
155
|
-
weco --source optimize.py \
|
|
156
|
-
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
157
|
-
--metric speedup \
|
|
158
|
-
--maximize true \
|
|
159
|
-
--steps 30 \
|
|
160
|
-
--model gemini-2.5-pro-preview-03-25 \
|
|
161
|
-
--additional-instructions guide.md
|
|
162
|
-
```
|
|
163
|
-
|
|
98
|
+
**Note:** If you have an NVIDIA GPU, change the device in the `--eval-command` to `cuda`. If you are running this on Apple Silicon, set it to `mps`.
|
|
164
99
|
|
|
165
100
|
---
|
|
166
101
|
|
|
@@ -169,16 +104,28 @@ Given how useful causal multihead self attention is to transformers, we've seen
|
|
|
169
104
|
| Argument | Description | Required |
|
|
170
105
|
| :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- |
|
|
171
106
|
| `--source` | Path to the source code file that will be optimized (e.g., `optimize.py`). | Yes |
|
|
172
|
-
| `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below.
|
|
173
|
-
| `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`.
|
|
107
|
+
| `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
|
|
108
|
+
| `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
|
|
174
109
|
| `--maximize` | Whether to maximize (`true`) or minimize (`false`) the metric. | Yes |
|
|
175
110
|
| `--steps` | Number of optimization steps (LLM iterations) to run. | Yes |
|
|
176
|
-
| `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25
|
|
177
|
-
| `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM.
|
|
111
|
+
| `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`.| Yes |
|
|
112
|
+
| `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
|
|
113
|
+
| `--log-dir` | (Optional) Path to the directory to log intermediate steps and final optimization result. Defaults to `.runs/`. | No |
|
|
178
114
|
|
|
179
115
|
---
|
|
180
116
|
|
|
117
|
+
### Performance & Expectations
|
|
118
|
+
|
|
119
|
+
Weco, powered by the AIDE algorithm, optimizes code iteratively based on your evaluation results. Achieving significant improvements, especially on complex research-level tasks, often requires substantial exploration time.
|
|
181
120
|
|
|
121
|
+
The following plot from the independent [Research Engineering Benchmark (RE-Bench)](https://metr.org/AI_R_D_Evaluation_Report.pdf) report shows the performance of AIDE (the algorithm behind Weco) on challenging ML research engineering tasks over different time budgets.
|
|
122
|
+
<p align="center">
|
|
123
|
+
<img src="https://github.com/user-attachments/assets/ff0e471d-2f50-4e2d-b718-874862f533df" alt="RE-Bench Performance Across Time" width="60%"/>
|
|
124
|
+
</p>
|
|
125
|
+
|
|
126
|
+
As shown, AIDE demonstrates strong performance gains over time, surpassing lower human expert percentiles within hours and continuing to improve. This highlights the potential of evaluation-driven optimization but also indicates that reaching high levels of performance comparable to human experts on difficult benchmarks can take considerable time (tens of hours in this specific benchmark, corresponding to many `--steps` in the Weco CLI). Factor this into your planning when setting the number of `--steps` for your optimization runs.
|
|
127
|
+
|
|
128
|
+
---
|
|
182
129
|
|
|
183
130
|
### Important Note on Evaluation
|
|
184
131
|
|
|
@@ -1,10 +1,16 @@
|
|
|
1
|
-
# Weco
|
|
1
|
+
# Weco: The Evaluation-Driven AI Code Optimizer
|
|
2
2
|
|
|
3
3
|
[](https://www.python.org)
|
|
4
|
-
[](LICENSE)
|
|
5
4
|
[](https://badge.fury.io/py/weco)
|
|
5
|
+
[](https://arxiv.org/abs/2502.13138)
|
|
6
6
|
|
|
7
|
-
|
|
7
|
+
Weco systematically optimizes your code, guided directly by your evaluation metrics.
|
|
8
|
+
|
|
9
|
+
Example applications include:
|
|
10
|
+
|
|
11
|
+
- **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA, Triton or Metal, optimizing for `latency`, `throughput`, or `memory_bandwidth`.
|
|
12
|
+
- **Model Development**: Tune feature transformations or architectures, optimizing for `validation_accuracy`, `AUC`, or `Sharpe Ratio`.
|
|
13
|
+
- **Prompt Engineering**: Refine prompts for LLMs, optimizing for `win_rate`, `relevance`, or `format_adherence`
|
|
8
14
|
|
|
9
15
|
https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
|
|
10
16
|
|
|
@@ -18,37 +24,6 @@ The `weco` CLI leverages a tree search approach guided by Large Language Models
|
|
|
18
24
|
|
|
19
25
|
---
|
|
20
26
|
|
|
21
|
-
## Example Use Cases
|
|
22
|
-
|
|
23
|
-
Here's how `weco` can be applied to common ML engineering tasks:
|
|
24
|
-
|
|
25
|
-
* **GPU Kernel Optimization:**
|
|
26
|
-
* **Goal:** Improve the speed or efficiency of low-level GPU code.
|
|
27
|
-
* **How:** `weco` iteratively refines CUDA, Triton, Metal, or other kernel code specified in your `--source` file.
|
|
28
|
-
* **`--eval-command`:** Typically runs a script that compiles the kernel, executes it, and benchmarks performance (e.g., latency, throughput).
|
|
29
|
-
* **`--metric`:** Examples include `latency`, `throughput`, `TFLOPS`, `memory_bandwidth`. Optimize to `minimize` latency or `maximize` throughput.
|
|
30
|
-
|
|
31
|
-
* **Feature Engineering:**
|
|
32
|
-
* **Goal:** Discover better data transformations or feature combinations for your machine learning models.
|
|
33
|
-
* **How:** `weco` explores different processing steps or parameters within your feature transformation code (`--source`).
|
|
34
|
-
* **`--eval-command`:** Executes a script that applies the features, trains/validates a model using those features, and prints a performance score.
|
|
35
|
-
* **`--metric`:** Examples include `accuracy`, `AUC`, `F1-score`, `validation_loss`. Usually optimized to `maximize` accuracy/AUC/F1 or `minimize` loss.
|
|
36
|
-
|
|
37
|
-
* **Model Development:**
|
|
38
|
-
* **Goal:** Tune hyperparameters or experiment with small architectural changes directly within your model's code.
|
|
39
|
-
* **How:** `weco` modifies hyperparameter values (like learning rate, layer sizes if defined in the code) or structural elements in your model definition (`--source`).
|
|
40
|
-
* **`--eval-command`:** Runs your model training and evaluation script, printing the key performance indicator.
|
|
41
|
-
* **`--metric`:** Examples include `validation_accuracy`, `test_loss`, `inference_time`, `perplexity`. Optimize according to the metric's nature (e.g., `maximize` accuracy, `minimize` loss).
|
|
42
|
-
|
|
43
|
-
* **Prompt Engineering:**
|
|
44
|
-
* **Goal:** Refine prompts used within larger systems (e.g., for LLM interactions) to achieve better or more consistent outputs.
|
|
45
|
-
* **How:** `weco` modifies prompt templates, examples, or instructions stored in the `--source` file.
|
|
46
|
-
* **`--eval-command`:** Executes a script that uses the prompt, generates an output, evaluates that output against desired criteria (e.g., using another LLM, checking for keywords, format validation), and prints a score.
|
|
47
|
-
* **`--metric`:** Examples include `quality_score`, `relevance`, `task_success_rate`, `format_adherence`. Usually optimized to `maximize`.
|
|
48
|
-
|
|
49
|
-
---
|
|
50
|
-
|
|
51
|
-
|
|
52
27
|
## Setup
|
|
53
28
|
|
|
54
29
|
1. **Install the Package:**
|
|
@@ -75,70 +50,30 @@ Here's how `weco` can be applied to common ML engineering tasks:
|
|
|
75
50
|
|
|
76
51
|
---
|
|
77
52
|
|
|
78
|
-
###
|
|
53
|
+
### Example: Optimizing Simple PyTorch Operations
|
|
54
|
+
|
|
55
|
+
This basic example shows how to optimize a simple PyTorch function for speedup.
|
|
79
56
|
|
|
80
|
-
**
|
|
57
|
+
For more advanced examples, including **[Metal/MLX](/examples/metal/README.md), [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md)**, and **[ML model optimization](/examples/spaceship-titanic/README.md)t**, please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
|
|
81
58
|
|
|
82
59
|
```bash
|
|
60
|
+
# Navigate to the example directory
|
|
83
61
|
cd examples/hello-kernel-world
|
|
84
|
-
|
|
62
|
+
|
|
63
|
+
# Install dependencies
|
|
64
|
+
pip install torch
|
|
65
|
+
|
|
66
|
+
# Run Weco
|
|
85
67
|
weco --source optimize.py \
|
|
86
68
|
--eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
|
|
87
69
|
--metric speedup \
|
|
88
70
|
--maximize true \
|
|
89
71
|
--steps 15 \
|
|
90
|
-
--model
|
|
72
|
+
--model gemini-2.5-pro-exp-03-25 \
|
|
91
73
|
--additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
|
|
92
74
|
```
|
|
93
75
|
|
|
94
|
-
Note
|
|
95
|
-
|
|
96
|
-
**Example 2: Optimizing MLX operations with instructions from a file**
|
|
97
|
-
|
|
98
|
-
Lets optimize a 2D convolution operation in [`mlx`](https://github.com/ml-explore/mlx) using [Metal](https://developer.apple.com/documentation/metal/). Sometimes, additional context or instructions are too complex for a single command-line string. You can provide a path to a file containing these instructions.
|
|
99
|
-
|
|
100
|
-
```bash
|
|
101
|
-
cd examples/metal
|
|
102
|
-
pip install mlx
|
|
103
|
-
weco --source optimize.py \
|
|
104
|
-
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
105
|
-
--metric speedup \
|
|
106
|
-
--maximize true \
|
|
107
|
-
--steps 30 \
|
|
108
|
-
--model o3-mini \
|
|
109
|
-
--additional-instructions examples.rst
|
|
110
|
-
```
|
|
111
|
-
|
|
112
|
-
**Example 3: Level Agnostic Optimization: Causal Self Attention with Triton & CUDA**
|
|
113
|
-
|
|
114
|
-
Given how useful causal multihead self attention is to transformers, we've seen its wide adoption across ML engineering and AI research. Its great to keep things at a high-level (in PyTorch) when doing research, but when moving to production you often need to write highly customized low-level kernels to make things run as fast as they can. The `weco` CLI can optimize kernels across a variety of different abstraction levels and frameworks. Example 2 uses Metal but lets explore two more frameworks:
|
|
115
|
-
|
|
116
|
-
1. [Triton](https://github.com/triton-lang/triton)
|
|
117
|
-
```bash
|
|
118
|
-
cd examples/triton
|
|
119
|
-
pip install torch triton
|
|
120
|
-
weco --source optimize.py \
|
|
121
|
-
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
122
|
-
--metric speedup \
|
|
123
|
-
--maximize true \
|
|
124
|
-
--steps 30 \
|
|
125
|
-
--model gemini-2.5-pro-preview-03-25 \
|
|
126
|
-
--additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
|
|
127
|
-
```
|
|
128
|
-
|
|
129
|
-
2. [CUDA](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html)
|
|
130
|
-
```bash
|
|
131
|
-
cd examples/cuda
|
|
132
|
-
pip install torch
|
|
133
|
-
weco --source optimize.py \
|
|
134
|
-
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
135
|
-
--metric speedup \
|
|
136
|
-
--maximize true \
|
|
137
|
-
--steps 30 \
|
|
138
|
-
--model gemini-2.5-pro-preview-03-25 \
|
|
139
|
-
--additional-instructions guide.md
|
|
140
|
-
```
|
|
141
|
-
|
|
76
|
+
**Note:** If you have an NVIDIA GPU, change the device in the `--eval-command` to `cuda`. If you are running this on Apple Silicon, set it to `mps`.
|
|
142
77
|
|
|
143
78
|
---
|
|
144
79
|
|
|
@@ -147,16 +82,28 @@ Given how useful causal multihead self attention is to transformers, we've seen
|
|
|
147
82
|
| Argument | Description | Required |
|
|
148
83
|
| :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- |
|
|
149
84
|
| `--source` | Path to the source code file that will be optimized (e.g., `optimize.py`). | Yes |
|
|
150
|
-
| `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below.
|
|
151
|
-
| `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`.
|
|
85
|
+
| `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
|
|
86
|
+
| `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
|
|
152
87
|
| `--maximize` | Whether to maximize (`true`) or minimize (`false`) the metric. | Yes |
|
|
153
88
|
| `--steps` | Number of optimization steps (LLM iterations) to run. | Yes |
|
|
154
|
-
| `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25
|
|
155
|
-
| `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM.
|
|
89
|
+
| `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`.| Yes |
|
|
90
|
+
| `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
|
|
91
|
+
| `--log-dir` | (Optional) Path to the directory to log intermediate steps and final optimization result. Defaults to `.runs/`. | No |
|
|
156
92
|
|
|
157
93
|
---
|
|
158
94
|
|
|
95
|
+
### Performance & Expectations
|
|
96
|
+
|
|
97
|
+
Weco, powered by the AIDE algorithm, optimizes code iteratively based on your evaluation results. Achieving significant improvements, especially on complex research-level tasks, often requires substantial exploration time.
|
|
159
98
|
|
|
99
|
+
The following plot from the independent [Research Engineering Benchmark (RE-Bench)](https://metr.org/AI_R_D_Evaluation_Report.pdf) report shows the performance of AIDE (the algorithm behind Weco) on challenging ML research engineering tasks over different time budgets.
|
|
100
|
+
<p align="center">
|
|
101
|
+
<img src="https://github.com/user-attachments/assets/ff0e471d-2f50-4e2d-b718-874862f533df" alt="RE-Bench Performance Across Time" width="60%"/>
|
|
102
|
+
</p>
|
|
103
|
+
|
|
104
|
+
As shown, AIDE demonstrates strong performance gains over time, surpassing lower human expert percentiles within hours and continuing to improve. This highlights the potential of evaluation-driven optimization but also indicates that reaching high levels of performance comparable to human experts on difficult benchmarks can take considerable time (tens of hours in this specific benchmark, corresponding to many `--steps` in the Weco CLI). Factor this into your planning when setting the number of `--steps` for your optimization runs.
|
|
105
|
+
|
|
106
|
+
---
|
|
160
107
|
|
|
161
108
|
### Important Note on Evaluation
|
|
162
109
|
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# Example: Optimizing PyTorch Self-Attention with CUDA
|
|
2
|
+
|
|
3
|
+
This example showcases using Weco to optimize a PyTorch causal multi-head self-attention implementation by generating custom [CUDA](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html) kernels. This approach aims for low-level optimization beyond standard PyTorch or even Triton for potentially higher performance on NVIDIA GPUs.
|
|
4
|
+
|
|
5
|
+
This example uses a separate Markdown file (`guide.md`) to provide detailed instructions and context to the LLM.
|
|
6
|
+
|
|
7
|
+
## Setup
|
|
8
|
+
|
|
9
|
+
1. Ensure you are in the `examples/cuda` directory.
|
|
10
|
+
2. Install the required dependency:
|
|
11
|
+
```bash
|
|
12
|
+
pip install torch
|
|
13
|
+
```
|
|
14
|
+
*(Note: This example requires a compatible NVIDIA GPU and the CUDA Toolkit installed on your system for compiling and running the generated CUDA code.)*
|
|
15
|
+
|
|
16
|
+
## Optimization Command
|
|
17
|
+
|
|
18
|
+
Run the following command to start the optimization process:
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
weco --source optimize.py \
|
|
22
|
+
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
23
|
+
--metric speedup \
|
|
24
|
+
--maximize true \
|
|
25
|
+
--steps 30 \
|
|
26
|
+
--model gemini-2.5-pro-exp-03-25 \
|
|
27
|
+
--additional-instructions guide.md
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
### Explanation
|
|
31
|
+
|
|
32
|
+
* `--source optimize.py`: The initial PyTorch self-attention code to be optimized with CUDA.
|
|
33
|
+
* `--eval-command "python evaluate.py --solution-path optimize.py"`: Runs the evaluation script, which compiles (if necessary) and benchmarks the CUDA-enhanced code in `optimize.py` against a baseline, printing the `speedup`.
|
|
34
|
+
* `--metric speedup`: The optimization target metric.
|
|
35
|
+
* `--maximize true`: Weco aims to increase the speedup.
|
|
36
|
+
* `--steps 30`: The number of optimization iterations.
|
|
37
|
+
* `--model gemini-2.5-pro-exp-03-25`: The LLM used for code generation.
|
|
38
|
+
* `--additional-instructions guide.md`: Points Weco to a file containing detailed instructions for the LLM on how to write the CUDA kernels, handle compilation (e.g., using `torch.utils.cpp_extension`), manage data types, and ensure correctness.
|
|
39
|
+
|
|
40
|
+
Weco will iteratively modify `optimize.py`, potentially generating and integrating CUDA C++ code, guided by the evaluation results and the instructions in `guide.md`.
|
|
File without changes
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
# Example: Optimizing a Kaggle Classification Model (Spaceship Titanic)
|
|
2
|
+
|
|
3
|
+
This example demonstrates using Weco to optimize a Python script designed for the [Spaceship Titanic Kaggle competition](https://www.kaggle.com/competitions/spaceship-titanic/overview). The goal is to improve the model's `accuracy` metric by modifying the feature engineering and modeling steps within the `optimize.py` script.
|
|
4
|
+
|
|
5
|
+
This example uses the `README.md` file (this file) to provide additional instructions to the LLM.
|
|
6
|
+
|
|
7
|
+
## Setup
|
|
8
|
+
|
|
9
|
+
1. Ensure you are in the `examples/spaceship-titanic` directory.
|
|
10
|
+
2. **Kaggle Credentials:** You need your Kaggle API credentials (`kaggle.json`) configured to download the competition dataset. Place the `kaggle.json` file in `~/.kaggle/` or set the `KAGGLE_USERNAME` and `KAGGLE_KEY` environment variables. See [Kaggle API documentation](https://github.com/Kaggle/kaggle-api#api-credentials) for details.
|
|
11
|
+
3. **Install Dependencies:** Install the required Python packages:
|
|
12
|
+
```bash
|
|
13
|
+
pip install -r requirements-test.txt
|
|
14
|
+
```
|
|
15
|
+
4. **Prepare Data:** Run the utility script once to download the dataset from Kaggle and place it in the expected `public/` and `private/` subdirectories:
|
|
16
|
+
```bash
|
|
17
|
+
python utils.py
|
|
18
|
+
```
|
|
19
|
+
After running `utils.py`, your directory structure should look like this:
|
|
20
|
+
```
|
|
21
|
+
.
|
|
22
|
+
├── baseline.py
|
|
23
|
+
├── evaluate.py
|
|
24
|
+
├── optimize.py
|
|
25
|
+
├── private
|
|
26
|
+
│ └── test.csv
|
|
27
|
+
├── public
|
|
28
|
+
│ ├── sample_submission.csv
|
|
29
|
+
│ ├── test.csv
|
|
30
|
+
│ └── train.csv
|
|
31
|
+
├── README.md # This file
|
|
32
|
+
├── requirements-test.txt
|
|
33
|
+
└── utils.py
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
## Optimization Command
|
|
37
|
+
|
|
38
|
+
Run the following command to start optimizing the model:
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
weco --source optimize.py \
|
|
42
|
+
--eval-command "python optimize.py && python evaluate.py" \
|
|
43
|
+
--metric accuracy \
|
|
44
|
+
--maximize true \
|
|
45
|
+
--steps 10 \
|
|
46
|
+
--model gemini-2.5-pro-exp-03-25 \
|
|
47
|
+
--additional-instructions README.md
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
### Explanation
|
|
51
|
+
|
|
52
|
+
* `--source optimize.py`: The script containing the model training and prediction logic to be optimized. It starts identical to `baseline.py`.
|
|
53
|
+
* `--eval-command "python optimize.py && python evaluate.py"`: This is a multi-step evaluation.
|
|
54
|
+
* `python optimize.py`: Runs the modified script to generate predictions (`submission.csv`).
|
|
55
|
+
* `python evaluate.py`: Compares the generated `submission.csv` against the ground truth (using the training data as a proxy evaluation set in this example) and prints the `accuracy` metric.
|
|
56
|
+
* `--metric accuracy`: The target metric Weco should optimize.
|
|
57
|
+
* `--maximize true`: Weco aims to increase the accuracy.
|
|
58
|
+
* `--steps 10`: The number of optimization iterations.
|
|
59
|
+
* `--model gemini-2.5-pro-exp-03-25`: The LLM driving the optimization.
|
|
60
|
+
* `--additional-instructions README.md`: Provides this file as context to the LLM, which might include hints about feature engineering techniques, model types to try, or specific data columns to focus on (you can add such instructions to this file if desired).
|
|
61
|
+
|
|
62
|
+
Weco will iteratively modify the feature engineering or modeling code within `optimize.py`, run the evaluation pipeline, and use the resulting `accuracy` to guide further improvements. The `baseline.py` file is provided as a reference starting point.
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def predict(test: Path, save: Path):
|
|
6
|
+
# TODO: Add a model here
|
|
7
|
+
|
|
8
|
+
test_data = pd.read_csv(test)
|
|
9
|
+
submission = pd.DataFrame({"PassengerId": test_data["PassengerId"], "Transported": False})
|
|
10
|
+
submission.to_csv(save, index=False)
|
|
11
|
+
print(f"Test submission saved to {save}")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
if __name__ == "__main__":
|
|
15
|
+
# This block is primarily for testing the script directly,
|
|
16
|
+
# it's not used by the weco evaluation loop.
|
|
17
|
+
script_dir = Path(__file__).resolve().parent
|
|
18
|
+
# Use validation data as test data *for this test block only*
|
|
19
|
+
train_file_path = script_dir / "public" / "train.csv"
|
|
20
|
+
print("train_file_path:", train_file_path)
|
|
21
|
+
test_file_path = script_dir / "public" / "test.csv"
|
|
22
|
+
print("test_file_path:", test_file_path)
|
|
23
|
+
submission_output_path = script_dir / "submission.csv"
|
|
24
|
+
|
|
25
|
+
# Call predict with the DataFrame and the correct output path
|
|
26
|
+
predict(train_file_path, test_file_path, submission_output_path)
|
|
27
|
+
print(f"Test submission saved to {submission_output_path}")
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import pandas as pd
|
|
3
|
+
from sklearn.metrics import accuracy_score
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class InvalidSubmissionError(Exception):
|
|
8
|
+
"""
|
|
9
|
+
A custom exception for when the agent submission cannot be graded.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
pass
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def prepare_for_accuracy_metric(submission: pd.DataFrame, answers: pd.DataFrame, target_column: str, id_column: str) -> dict:
|
|
16
|
+
# Answers checks
|
|
17
|
+
assert target_column in answers.columns, f"Answers must have a `{target_column}` column"
|
|
18
|
+
assert id_column in answers.columns, f"Answers must have a `{id_column}` column"
|
|
19
|
+
|
|
20
|
+
# Submission checks
|
|
21
|
+
if len(submission) != len(answers):
|
|
22
|
+
raise InvalidSubmissionError("Submission must have the same length as the answers.")
|
|
23
|
+
if target_column not in submission.columns:
|
|
24
|
+
raise InvalidSubmissionError(f"Submission must have a `{target_column}` column")
|
|
25
|
+
if id_column not in submission.columns:
|
|
26
|
+
raise InvalidSubmissionError(f"Submission must have a `{id_column}` column")
|
|
27
|
+
|
|
28
|
+
# sort on id to ensure correct order
|
|
29
|
+
submission = submission.sort_values(id_column)
|
|
30
|
+
answers = answers.sort_values(id_column)
|
|
31
|
+
|
|
32
|
+
if (submission[id_column].values != answers[id_column].values).any():
|
|
33
|
+
raise InvalidSubmissionError(f"Submission and Answers `{id_column}`'s do not match")
|
|
34
|
+
|
|
35
|
+
y_pred = submission[target_column].to_numpy()
|
|
36
|
+
y_true = answers[target_column].to_numpy()
|
|
37
|
+
|
|
38
|
+
return {"y_true": y_true, "y_pred": y_pred}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
|
|
42
|
+
accuracy_inputs = prepare_for_accuracy_metric(
|
|
43
|
+
submission=submission, answers=answers, target_column="Transported", id_column="PassengerId"
|
|
44
|
+
)
|
|
45
|
+
return accuracy_score(**accuracy_inputs)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
if __name__ == "__main__":
|
|
49
|
+
# Get the directory where the script is located
|
|
50
|
+
script_dir = Path(__file__).resolve().parent
|
|
51
|
+
# The ground truth answers are now in private/test.csv
|
|
52
|
+
answers_path = script_dir / "private" / "test.csv"
|
|
53
|
+
# Assume the agent's submission is saved here
|
|
54
|
+
submission_path = script_dir / "submission.csv"
|
|
55
|
+
|
|
56
|
+
# Check if files exist before proceeding
|
|
57
|
+
if not answers_path.exists():
|
|
58
|
+
print(f"Error: Answers file not found at {answers_path}") # Updated path in error message
|
|
59
|
+
sys.exit(1)
|
|
60
|
+
|
|
61
|
+
if not submission_path.exists():
|
|
62
|
+
print(f"Error: Submission file not found at {submission_path}")
|
|
63
|
+
sys.exit(1)
|
|
64
|
+
|
|
65
|
+
submission = pd.read_csv(submission_path)
|
|
66
|
+
# Read answers from the updated path
|
|
67
|
+
answers = pd.read_csv(answers_path)
|
|
68
|
+
|
|
69
|
+
# Calculate and print the grade
|
|
70
|
+
score = grade(submission, answers)
|
|
71
|
+
print(f"accuracy: {score}")
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def predict(test: Path, save: Path):
|
|
6
|
+
# TODO: Add a model here
|
|
7
|
+
|
|
8
|
+
test_data = pd.read_csv(test)
|
|
9
|
+
submission = pd.DataFrame({"PassengerId": test_data["PassengerId"], "Transported": False})
|
|
10
|
+
submission.to_csv(save, index=False)
|
|
11
|
+
print(f"Test submission saved to {save}")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
if __name__ == "__main__":
|
|
15
|
+
# This block is primarily for testing the script directly,
|
|
16
|
+
# it's not used by the weco evaluation loop.
|
|
17
|
+
script_dir = Path(__file__).resolve().parent
|
|
18
|
+
# Use validation data as test data *for this test block only*
|
|
19
|
+
train_file_path = script_dir / "public" / "train.csv"
|
|
20
|
+
print("train_file_path:", train_file_path)
|
|
21
|
+
test_file_path = script_dir / "public" / "test.csv"
|
|
22
|
+
print("test_file_path:", test_file_path)
|
|
23
|
+
submission_output_path = script_dir / "submission.csv"
|
|
24
|
+
|
|
25
|
+
# Call predict with the DataFrame and the correct output path
|
|
26
|
+
predict(train_file_path, test_file_path, submission_output_path)
|
|
27
|
+
print(f"Test submission saved to {submission_output_path}")
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
from sklearn.model_selection import train_test_split
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import kaggle
|
|
5
|
+
import zipfile
|
|
6
|
+
import os
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def prepare_data():
|
|
10
|
+
kaggle.api.competition_download_files("spaceship-titanic")
|
|
11
|
+
# unzip the data
|
|
12
|
+
with zipfile.ZipFile("spaceship-titanic.zip", "r") as zip_ref:
|
|
13
|
+
zip_ref.extractall()
|
|
14
|
+
# remove the zip file
|
|
15
|
+
os.remove("spaceship-titanic.zip")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def split_data(public: Path, private: Path):
|
|
19
|
+
df = pd.read_csv("train.csv")
|
|
20
|
+
# Use a fixed random_state for reproducibility
|
|
21
|
+
new_train, new_test = train_test_split(df, test_size=0.1, random_state=0)
|
|
22
|
+
|
|
23
|
+
os.makedirs(public, exist_ok=True)
|
|
24
|
+
os.makedirs(private, exist_ok=True)
|
|
25
|
+
|
|
26
|
+
example_submission = new_test[["PassengerId", "Transported"]].copy()
|
|
27
|
+
example_submission["Transported"] = False
|
|
28
|
+
example_submission.to_csv(public / "sample_submission.csv", index=False)
|
|
29
|
+
|
|
30
|
+
new_train.to_csv(public / "train.csv", index=False)
|
|
31
|
+
print("training sample shape:", new_train.shape)
|
|
32
|
+
new_test.to_csv(private / "test.csv", index=False)
|
|
33
|
+
print("test sample shape:", new_test.shape)
|
|
34
|
+
print(f"Validation data saved to {public / 'test.csv'}")
|
|
35
|
+
new_test.drop("Transported", axis="columns").to_csv(public / "test.csv", index=False)
|
|
36
|
+
|
|
37
|
+
# remove the previous files
|
|
38
|
+
os.remove("train.csv")
|
|
39
|
+
os.remove("sample_submission.csv")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def setup_data():
|
|
43
|
+
# download the data
|
|
44
|
+
prepare_data()
|
|
45
|
+
|
|
46
|
+
# Get the directory where the script is located
|
|
47
|
+
script_dir = Path(__file__).resolve().parent
|
|
48
|
+
public_path = script_dir / "public"
|
|
49
|
+
private_path = script_dir / "private"
|
|
50
|
+
|
|
51
|
+
# split the data
|
|
52
|
+
split_data(public_path, private_path)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
if __name__ == "__main__":
|
|
56
|
+
setup_data()
|
|
File without changes
|
|
@@ -10,9 +10,9 @@ authors = [
|
|
|
10
10
|
]
|
|
11
11
|
description = "Documentation for `weco`, a CLI for using Weco AI's code optimizer."
|
|
12
12
|
readme = "README.md"
|
|
13
|
-
version = "0.2.
|
|
13
|
+
version = "0.2.8"
|
|
14
14
|
license = {text = "MIT"}
|
|
15
|
-
requires-python = ">=3.
|
|
15
|
+
requires-python = ">=3.8"
|
|
16
16
|
dependencies = ["requests", "rich"]
|
|
17
17
|
keywords = ["AI", "Code Optimization", "Code Generation"]
|
|
18
18
|
classifiers = [
|
|
@@ -6,14 +6,9 @@ import sys
|
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
def handle_api_error(e: requests.exceptions.HTTPError, console: rich.console.Console) -> None:
|
|
9
|
-
"""Extract and display error messages from API responses."""
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
error_message = error_data.get("detail", str(e))
|
|
13
|
-
console.print(f"[bold red]Server Error:[/] {error_message}")
|
|
14
|
-
except Exception:
|
|
15
|
-
# If we can't parse the JSON, just show the original error
|
|
16
|
-
console.print(f"[bold red]Server Error:[/] {str(e)}")
|
|
9
|
+
"""Extract and display error messages from API responses in a structured format."""
|
|
10
|
+
error_message = str(e) # Default message
|
|
11
|
+
console.print(f"[bold red]Error:[/] {error_message}")
|
|
17
12
|
sys.exit(1)
|
|
18
13
|
|
|
19
14
|
|
|
@@ -36,7 +36,7 @@ def main() -> None:
|
|
|
36
36
|
parser = argparse.ArgumentParser(
|
|
37
37
|
description="[bold cyan]Weco CLI[/]", formatter_class=argparse.RawDescriptionHelpFormatter
|
|
38
38
|
)
|
|
39
|
-
parser.add_argument("--source", type=str, required=True, help="Path to the
|
|
39
|
+
parser.add_argument("--source", type=str, required=True, help="Path to the source code (e.g. optimize.py)")
|
|
40
40
|
parser.add_argument(
|
|
41
41
|
"--eval-command", type=str, required=True, help="Command to run for evaluation (e.g. 'python eval.py --arg1=val1')"
|
|
42
42
|
)
|
|
@@ -50,6 +50,7 @@ def main() -> None:
|
|
|
50
50
|
)
|
|
51
51
|
parser.add_argument("--steps", type=int, required=True, help="Number of steps to run")
|
|
52
52
|
parser.add_argument("--model", type=str, required=True, help="Model to use for optimization")
|
|
53
|
+
parser.add_argument("--log-dir", type=str, default=".runs", help="Directory to store logs and results")
|
|
53
54
|
parser.add_argument(
|
|
54
55
|
"--additional-instructions",
|
|
55
56
|
default=None,
|
|
@@ -83,9 +84,11 @@ def main() -> None:
|
|
|
83
84
|
timeout = 800
|
|
84
85
|
|
|
85
86
|
# Initialize panels
|
|
86
|
-
summary_panel = SummaryPanel(
|
|
87
|
+
summary_panel = SummaryPanel(
|
|
88
|
+
maximize=maximize, metric_name=metric_name, total_steps=steps, model=args.model, runs_dir=args.log_dir
|
|
89
|
+
)
|
|
87
90
|
plan_panel = PlanPanel()
|
|
88
|
-
solution_panels = SolutionPanels(metric_name=metric_name)
|
|
91
|
+
solution_panels = SolutionPanels(metric_name=metric_name, source_fp=source_fp)
|
|
89
92
|
eval_output_panel = EvaluationOutputPanel()
|
|
90
93
|
tree_panel = MetricTreePanel(maximize=maximize)
|
|
91
94
|
layout = create_optimization_layout()
|
|
@@ -112,11 +115,11 @@ def main() -> None:
|
|
|
112
115
|
with Live(layout, refresh_per_second=refresh_rate, screen=True) as live:
|
|
113
116
|
# Define the runs directory (.runs/<session-id>)
|
|
114
117
|
session_id = session_response["session_id"]
|
|
115
|
-
runs_dir = pathlib.Path(
|
|
118
|
+
runs_dir = pathlib.Path(args.log_dir) / session_id
|
|
116
119
|
runs_dir.mkdir(parents=True, exist_ok=True)
|
|
117
120
|
|
|
118
|
-
# Save the original code (.runs/<session-id>/original
|
|
119
|
-
runs_copy_source_fp = runs_dir / "original.
|
|
121
|
+
# Save the original code (.runs/<session-id>/original.<extension>)
|
|
122
|
+
runs_copy_source_fp = runs_dir / f"original.{source_fp.suffix}"
|
|
120
123
|
write_to_path(fp=runs_copy_source_fp, content=source_code)
|
|
121
124
|
|
|
122
125
|
# Write the code string to the source file path
|
|
@@ -197,8 +200,8 @@ def main() -> None:
|
|
|
197
200
|
api_keys=api_keys,
|
|
198
201
|
timeout=timeout,
|
|
199
202
|
)
|
|
200
|
-
# Save next solution (.runs/<session-id>/step_<step
|
|
201
|
-
write_to_path(fp=runs_dir / f"step_{step}.
|
|
203
|
+
# Save next solution (.runs/<session-id>/step_<step>.<extension>)
|
|
204
|
+
write_to_path(fp=runs_dir / f"step_{step}.{source_fp.suffix}", content=eval_and_next_solution_response["code"])
|
|
202
205
|
|
|
203
206
|
# Write the next solution to the source file
|
|
204
207
|
write_to_path(fp=source_fp, content=eval_and_next_solution_response["code"])
|
|
@@ -348,8 +351,8 @@ def main() -> None:
|
|
|
348
351
|
)
|
|
349
352
|
best_solution_content = f"# Best solution from Weco with a score of {best_score_str}\n\n{best_solution_code}"
|
|
350
353
|
|
|
351
|
-
# Save best solution to .runs/<session-id>/best
|
|
352
|
-
write_to_path(fp=runs_dir / "best.
|
|
354
|
+
# Save best solution to .runs/<session-id>/best.<extension>
|
|
355
|
+
write_to_path(fp=runs_dir / f"best.{source_fp.suffix}", content=best_solution_content)
|
|
353
356
|
|
|
354
357
|
# write the best solution to the source file
|
|
355
358
|
write_to_path(fp=source_fp, content=best_solution_content)
|
|
@@ -6,12 +6,13 @@ from rich.panel import Panel
|
|
|
6
6
|
from rich.syntax import Syntax
|
|
7
7
|
from typing import Dict, List, Optional, Union, Tuple
|
|
8
8
|
from .utils import format_number
|
|
9
|
+
import pathlib
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
class SummaryPanel:
|
|
12
13
|
"""Holds a summary of the optimization session."""
|
|
13
14
|
|
|
14
|
-
def __init__(self, maximize: bool, metric_name: str, total_steps: int, model: str, session_id: str = None):
|
|
15
|
+
def __init__(self, maximize: bool, metric_name: str, total_steps: int, model: str, runs_dir: str, session_id: str = None):
|
|
15
16
|
self.maximize = maximize
|
|
16
17
|
self.metric_name = metric_name
|
|
17
18
|
self.goal = ("Maximizing" if self.maximize else "Minimizing") + f" {self.metric_name}..."
|
|
@@ -19,7 +20,8 @@ class SummaryPanel:
|
|
|
19
20
|
self.total_output_tokens = 0
|
|
20
21
|
self.total_steps = total_steps
|
|
21
22
|
self.model = model
|
|
22
|
-
self.
|
|
23
|
+
self.runs_dir = runs_dir
|
|
24
|
+
self.session_id = session_id if session_id is not None else "N/A"
|
|
23
25
|
self.progress = Progress(
|
|
24
26
|
TextColumn("[progress.description]{task.description}"),
|
|
25
27
|
BarColumn(bar_width=20),
|
|
@@ -45,6 +47,8 @@ class SummaryPanel:
|
|
|
45
47
|
"""Create a summary panel with the relevant information."""
|
|
46
48
|
layout = Layout(name="summary")
|
|
47
49
|
summary_table = Table(show_header=False, box=None, padding=(0, 1))
|
|
50
|
+
|
|
51
|
+
summary_table.add_row("")
|
|
48
52
|
# Goal
|
|
49
53
|
if final_message is not None:
|
|
50
54
|
summary_table.add_row(f"[bold cyan]Result:[/] {final_message}")
|
|
@@ -55,8 +59,7 @@ class SummaryPanel:
|
|
|
55
59
|
summary_table.add_row(f"[bold cyan]Model:[/] {self.model}")
|
|
56
60
|
summary_table.add_row("")
|
|
57
61
|
# Log directory
|
|
58
|
-
|
|
59
|
-
summary_table.add_row(f"[bold cyan]Logs:[/] [blue underline]{runs_dir}[/]")
|
|
62
|
+
summary_table.add_row(f"[bold cyan]Logs:[/] [blue underline]{self.runs_dir}/{self.session_id}[/]")
|
|
60
63
|
summary_table.add_row("")
|
|
61
64
|
# Token counts
|
|
62
65
|
summary_table.add_row(
|
|
@@ -256,13 +259,19 @@ class EvaluationOutputPanel:
|
|
|
256
259
|
class SolutionPanels:
|
|
257
260
|
"""Displays the current and best solutions side by side."""
|
|
258
261
|
|
|
259
|
-
def __init__(self, metric_name: str):
|
|
262
|
+
def __init__(self, metric_name: str, source_fp: pathlib.Path):
|
|
260
263
|
# Current solution
|
|
261
264
|
self.current_node = None
|
|
262
265
|
# Best solution
|
|
263
266
|
self.best_node = None
|
|
264
267
|
# Metric name
|
|
265
268
|
self.metric_name = metric_name.capitalize()
|
|
269
|
+
# Determine the lexer for the source file
|
|
270
|
+
self.lexer = self._determine_lexer(source_fp)
|
|
271
|
+
|
|
272
|
+
def _determine_lexer(self, source_fp: pathlib.Path) -> str:
|
|
273
|
+
"""Determine the lexer for the source file."""
|
|
274
|
+
return Syntax.from_path(source_fp).lexer
|
|
266
275
|
|
|
267
276
|
def update(self, current_node: Union[Node, None], best_node: Union[Node, None]):
|
|
268
277
|
"""Update the current and best solutions."""
|
|
@@ -280,7 +289,7 @@ class SolutionPanels:
|
|
|
280
289
|
# Current solution (without score)
|
|
281
290
|
current_title = f"[bold]💡 Current Solution (Step {current_step})"
|
|
282
291
|
current_panel = Panel(
|
|
283
|
-
Syntax(str(current_code),
|
|
292
|
+
Syntax(str(current_code), self.lexer, theme="monokai", line_numbers=True, word_wrap=False),
|
|
284
293
|
title=current_title,
|
|
285
294
|
border_style="yellow",
|
|
286
295
|
expand=True,
|
|
@@ -290,7 +299,7 @@ class SolutionPanels:
|
|
|
290
299
|
# Best solution
|
|
291
300
|
best_title = f"[bold]🏆 Best Solution ([green]{self.metric_name}: {f'{best_score:.4f}' if best_score is not None else 'N/A'}[/])"
|
|
292
301
|
best_panel = Panel(
|
|
293
|
-
Syntax(str(best_code),
|
|
302
|
+
Syntax(str(best_code), self.lexer, theme="monokai", line_numbers=True, word_wrap=False),
|
|
294
303
|
title=best_title,
|
|
295
304
|
border_style="green",
|
|
296
305
|
expand=True,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: weco
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.8
|
|
4
4
|
Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
|
|
5
5
|
Author-email: Weco AI Team <contact@weco.ai>
|
|
6
6
|
License: MIT
|
|
@@ -9,7 +9,7 @@ Keywords: AI,Code Optimization,Code Generation
|
|
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
|
10
10
|
Classifier: Operating System :: OS Independent
|
|
11
11
|
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
-
Requires-Python: >=3.
|
|
12
|
+
Requires-Python: >=3.8
|
|
13
13
|
Description-Content-Type: text/markdown
|
|
14
14
|
License-File: LICENSE
|
|
15
15
|
Requires-Dist: requests
|
|
@@ -20,13 +20,19 @@ Requires-Dist: build; extra == "dev"
|
|
|
20
20
|
Requires-Dist: setuptools_scm; extra == "dev"
|
|
21
21
|
Dynamic: license-file
|
|
22
22
|
|
|
23
|
-
# Weco
|
|
23
|
+
# Weco: The Evaluation-Driven AI Code Optimizer
|
|
24
24
|
|
|
25
25
|
[](https://www.python.org)
|
|
26
|
-
[](LICENSE)
|
|
27
26
|
[](https://badge.fury.io/py/weco)
|
|
27
|
+
[](https://arxiv.org/abs/2502.13138)
|
|
28
28
|
|
|
29
|
-
|
|
29
|
+
Weco systematically optimizes your code, guided directly by your evaluation metrics.
|
|
30
|
+
|
|
31
|
+
Example applications include:
|
|
32
|
+
|
|
33
|
+
- **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA, Triton or Metal, optimizing for `latency`, `throughput`, or `memory_bandwidth`.
|
|
34
|
+
- **Model Development**: Tune feature transformations or architectures, optimizing for `validation_accuracy`, `AUC`, or `Sharpe Ratio`.
|
|
35
|
+
- **Prompt Engineering**: Refine prompts for LLMs, optimizing for `win_rate`, `relevance`, or `format_adherence`
|
|
30
36
|
|
|
31
37
|
https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
|
|
32
38
|
|
|
@@ -40,37 +46,6 @@ The `weco` CLI leverages a tree search approach guided by Large Language Models
|
|
|
40
46
|
|
|
41
47
|
---
|
|
42
48
|
|
|
43
|
-
## Example Use Cases
|
|
44
|
-
|
|
45
|
-
Here's how `weco` can be applied to common ML engineering tasks:
|
|
46
|
-
|
|
47
|
-
* **GPU Kernel Optimization:**
|
|
48
|
-
* **Goal:** Improve the speed or efficiency of low-level GPU code.
|
|
49
|
-
* **How:** `weco` iteratively refines CUDA, Triton, Metal, or other kernel code specified in your `--source` file.
|
|
50
|
-
* **`--eval-command`:** Typically runs a script that compiles the kernel, executes it, and benchmarks performance (e.g., latency, throughput).
|
|
51
|
-
* **`--metric`:** Examples include `latency`, `throughput`, `TFLOPS`, `memory_bandwidth`. Optimize to `minimize` latency or `maximize` throughput.
|
|
52
|
-
|
|
53
|
-
* **Feature Engineering:**
|
|
54
|
-
* **Goal:** Discover better data transformations or feature combinations for your machine learning models.
|
|
55
|
-
* **How:** `weco` explores different processing steps or parameters within your feature transformation code (`--source`).
|
|
56
|
-
* **`--eval-command`:** Executes a script that applies the features, trains/validates a model using those features, and prints a performance score.
|
|
57
|
-
* **`--metric`:** Examples include `accuracy`, `AUC`, `F1-score`, `validation_loss`. Usually optimized to `maximize` accuracy/AUC/F1 or `minimize` loss.
|
|
58
|
-
|
|
59
|
-
* **Model Development:**
|
|
60
|
-
* **Goal:** Tune hyperparameters or experiment with small architectural changes directly within your model's code.
|
|
61
|
-
* **How:** `weco` modifies hyperparameter values (like learning rate, layer sizes if defined in the code) or structural elements in your model definition (`--source`).
|
|
62
|
-
* **`--eval-command`:** Runs your model training and evaluation script, printing the key performance indicator.
|
|
63
|
-
* **`--metric`:** Examples include `validation_accuracy`, `test_loss`, `inference_time`, `perplexity`. Optimize according to the metric's nature (e.g., `maximize` accuracy, `minimize` loss).
|
|
64
|
-
|
|
65
|
-
* **Prompt Engineering:**
|
|
66
|
-
* **Goal:** Refine prompts used within larger systems (e.g., for LLM interactions) to achieve better or more consistent outputs.
|
|
67
|
-
* **How:** `weco` modifies prompt templates, examples, or instructions stored in the `--source` file.
|
|
68
|
-
* **`--eval-command`:** Executes a script that uses the prompt, generates an output, evaluates that output against desired criteria (e.g., using another LLM, checking for keywords, format validation), and prints a score.
|
|
69
|
-
* **`--metric`:** Examples include `quality_score`, `relevance`, `task_success_rate`, `format_adherence`. Usually optimized to `maximize`.
|
|
70
|
-
|
|
71
|
-
---
|
|
72
|
-
|
|
73
|
-
|
|
74
49
|
## Setup
|
|
75
50
|
|
|
76
51
|
1. **Install the Package:**
|
|
@@ -97,70 +72,30 @@ Here's how `weco` can be applied to common ML engineering tasks:
|
|
|
97
72
|
|
|
98
73
|
---
|
|
99
74
|
|
|
100
|
-
###
|
|
75
|
+
### Example: Optimizing Simple PyTorch Operations
|
|
76
|
+
|
|
77
|
+
This basic example shows how to optimize a simple PyTorch function for speedup.
|
|
101
78
|
|
|
102
|
-
**
|
|
79
|
+
For more advanced examples, including **[Metal/MLX](/examples/metal/README.md), [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md)**, and **[ML model optimization](/examples/spaceship-titanic/README.md)t**, please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
|
|
103
80
|
|
|
104
81
|
```bash
|
|
82
|
+
# Navigate to the example directory
|
|
105
83
|
cd examples/hello-kernel-world
|
|
106
|
-
|
|
84
|
+
|
|
85
|
+
# Install dependencies
|
|
86
|
+
pip install torch
|
|
87
|
+
|
|
88
|
+
# Run Weco
|
|
107
89
|
weco --source optimize.py \
|
|
108
90
|
--eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
|
|
109
91
|
--metric speedup \
|
|
110
92
|
--maximize true \
|
|
111
93
|
--steps 15 \
|
|
112
|
-
--model
|
|
94
|
+
--model gemini-2.5-pro-exp-03-25 \
|
|
113
95
|
--additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
|
|
114
96
|
```
|
|
115
97
|
|
|
116
|
-
Note
|
|
117
|
-
|
|
118
|
-
**Example 2: Optimizing MLX operations with instructions from a file**
|
|
119
|
-
|
|
120
|
-
Lets optimize a 2D convolution operation in [`mlx`](https://github.com/ml-explore/mlx) using [Metal](https://developer.apple.com/documentation/metal/). Sometimes, additional context or instructions are too complex for a single command-line string. You can provide a path to a file containing these instructions.
|
|
121
|
-
|
|
122
|
-
```bash
|
|
123
|
-
cd examples/metal
|
|
124
|
-
pip install mlx
|
|
125
|
-
weco --source optimize.py \
|
|
126
|
-
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
127
|
-
--metric speedup \
|
|
128
|
-
--maximize true \
|
|
129
|
-
--steps 30 \
|
|
130
|
-
--model o3-mini \
|
|
131
|
-
--additional-instructions examples.rst
|
|
132
|
-
```
|
|
133
|
-
|
|
134
|
-
**Example 3: Level Agnostic Optimization: Causal Self Attention with Triton & CUDA**
|
|
135
|
-
|
|
136
|
-
Given how useful causal multihead self attention is to transformers, we've seen its wide adoption across ML engineering and AI research. Its great to keep things at a high-level (in PyTorch) when doing research, but when moving to production you often need to write highly customized low-level kernels to make things run as fast as they can. The `weco` CLI can optimize kernels across a variety of different abstraction levels and frameworks. Example 2 uses Metal but lets explore two more frameworks:
|
|
137
|
-
|
|
138
|
-
1. [Triton](https://github.com/triton-lang/triton)
|
|
139
|
-
```bash
|
|
140
|
-
cd examples/triton
|
|
141
|
-
pip install torch triton
|
|
142
|
-
weco --source optimize.py \
|
|
143
|
-
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
144
|
-
--metric speedup \
|
|
145
|
-
--maximize true \
|
|
146
|
-
--steps 30 \
|
|
147
|
-
--model gemini-2.5-pro-preview-03-25 \
|
|
148
|
-
--additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
|
|
149
|
-
```
|
|
150
|
-
|
|
151
|
-
2. [CUDA](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html)
|
|
152
|
-
```bash
|
|
153
|
-
cd examples/cuda
|
|
154
|
-
pip install torch
|
|
155
|
-
weco --source optimize.py \
|
|
156
|
-
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
157
|
-
--metric speedup \
|
|
158
|
-
--maximize true \
|
|
159
|
-
--steps 30 \
|
|
160
|
-
--model gemini-2.5-pro-preview-03-25 \
|
|
161
|
-
--additional-instructions guide.md
|
|
162
|
-
```
|
|
163
|
-
|
|
98
|
+
**Note:** If you have an NVIDIA GPU, change the device in the `--eval-command` to `cuda`. If you are running this on Apple Silicon, set it to `mps`.
|
|
164
99
|
|
|
165
100
|
---
|
|
166
101
|
|
|
@@ -169,16 +104,28 @@ Given how useful causal multihead self attention is to transformers, we've seen
|
|
|
169
104
|
| Argument | Description | Required |
|
|
170
105
|
| :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- |
|
|
171
106
|
| `--source` | Path to the source code file that will be optimized (e.g., `optimize.py`). | Yes |
|
|
172
|
-
| `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below.
|
|
173
|
-
| `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`.
|
|
107
|
+
| `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
|
|
108
|
+
| `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
|
|
174
109
|
| `--maximize` | Whether to maximize (`true`) or minimize (`false`) the metric. | Yes |
|
|
175
110
|
| `--steps` | Number of optimization steps (LLM iterations) to run. | Yes |
|
|
176
|
-
| `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25
|
|
177
|
-
| `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM.
|
|
111
|
+
| `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`.| Yes |
|
|
112
|
+
| `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
|
|
113
|
+
| `--log-dir` | (Optional) Path to the directory to log intermediate steps and final optimization result. Defaults to `.runs/`. | No |
|
|
178
114
|
|
|
179
115
|
---
|
|
180
116
|
|
|
117
|
+
### Performance & Expectations
|
|
118
|
+
|
|
119
|
+
Weco, powered by the AIDE algorithm, optimizes code iteratively based on your evaluation results. Achieving significant improvements, especially on complex research-level tasks, often requires substantial exploration time.
|
|
181
120
|
|
|
121
|
+
The following plot from the independent [Research Engineering Benchmark (RE-Bench)](https://metr.org/AI_R_D_Evaluation_Report.pdf) report shows the performance of AIDE (the algorithm behind Weco) on challenging ML research engineering tasks over different time budgets.
|
|
122
|
+
<p align="center">
|
|
123
|
+
<img src="https://github.com/user-attachments/assets/ff0e471d-2f50-4e2d-b718-874862f533df" alt="RE-Bench Performance Across Time" width="60%"/>
|
|
124
|
+
</p>
|
|
125
|
+
|
|
126
|
+
As shown, AIDE demonstrates strong performance gains over time, surpassing lower human expert percentiles within hours and continuing to improve. This highlights the potential of evaluation-driven optimization but also indicates that reaching high levels of performance comparable to human experts on difficult benchmarks can take considerable time (tens of hours in this specific benchmark, corresponding to many `--steps` in the Weco CLI). Factor this into your planning when setting the number of `--steps` for your optimization runs.
|
|
127
|
+
|
|
128
|
+
---
|
|
182
129
|
|
|
183
130
|
### Important Note on Evaluation
|
|
184
131
|
|
|
@@ -4,14 +4,23 @@ README.md
|
|
|
4
4
|
pyproject.toml
|
|
5
5
|
.github/workflows/lint.yml
|
|
6
6
|
.github/workflows/release.yml
|
|
7
|
+
examples/cuda/README.md
|
|
7
8
|
examples/cuda/evaluate.py
|
|
8
9
|
examples/cuda/guide.md
|
|
9
10
|
examples/cuda/optimize.py
|
|
10
11
|
examples/hello-kernel-world/evaluate.py
|
|
11
12
|
examples/hello-kernel-world/optimize.py
|
|
13
|
+
examples/metal/README.md
|
|
12
14
|
examples/metal/evaluate.py
|
|
13
15
|
examples/metal/examples.rst
|
|
14
16
|
examples/metal/optimize.py
|
|
17
|
+
examples/spaceship-titanic/README.md
|
|
18
|
+
examples/spaceship-titanic/baseline.py
|
|
19
|
+
examples/spaceship-titanic/evaluate.py
|
|
20
|
+
examples/spaceship-titanic/optimize.py
|
|
21
|
+
examples/spaceship-titanic/requirements-test.txt
|
|
22
|
+
examples/spaceship-titanic/utils.py
|
|
23
|
+
examples/triton/README.md
|
|
15
24
|
examples/triton/evaluate.py
|
|
16
25
|
examples/triton/optimize.py
|
|
17
26
|
weco/__init__.py
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|