weco 0.2.18__tar.gz → 0.2.20__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {weco-0.2.18 → weco-0.2.20}/PKG-INFO +26 -15
  2. {weco-0.2.18 → weco-0.2.20}/README.md +25 -14
  3. {weco-0.2.18 → weco-0.2.20}/examples/cuda/README.md +2 -2
  4. weco-0.2.20/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb +310 -0
  5. {weco-0.2.18 → weco-0.2.20}/examples/prompt/README.md +2 -2
  6. {weco-0.2.18 → weco-0.2.20}/examples/spaceship-titanic/README.md +2 -2
  7. {weco-0.2.18 → weco-0.2.20}/examples/triton/README.md +2 -2
  8. {weco-0.2.18 → weco-0.2.20}/pyproject.toml +1 -1
  9. {weco-0.2.18 → weco-0.2.20}/weco/api.py +24 -40
  10. {weco-0.2.18 → weco-0.2.20}/weco/cli.py +243 -247
  11. {weco-0.2.18 → weco-0.2.20}/weco/panels.py +13 -10
  12. {weco-0.2.18 → weco-0.2.20}/weco.egg-info/PKG-INFO +26 -15
  13. {weco-0.2.18 → weco-0.2.20}/weco.egg-info/SOURCES.txt +1 -0
  14. {weco-0.2.18 → weco-0.2.20}/.github/workflows/lint.yml +0 -0
  15. {weco-0.2.18 → weco-0.2.20}/.github/workflows/release.yml +0 -0
  16. {weco-0.2.18 → weco-0.2.20}/.gitignore +0 -0
  17. {weco-0.2.18 → weco-0.2.20}/.repomixignore +0 -0
  18. {weco-0.2.18 → weco-0.2.20}/LICENSE +0 -0
  19. {weco-0.2.18 → weco-0.2.20}/assets/example-optimization.gif +0 -0
  20. {weco-0.2.18 → weco-0.2.20}/examples/cuda/evaluate.py +0 -0
  21. {weco-0.2.18 → weco-0.2.20}/examples/cuda/guide.md +0 -0
  22. {weco-0.2.18 → weco-0.2.20}/examples/cuda/optimize.py +0 -0
  23. {weco-0.2.18 → weco-0.2.20}/examples/hello-kernel-world/evaluate.py +0 -0
  24. {weco-0.2.18 → weco-0.2.20}/examples/hello-kernel-world/optimize.py +0 -0
  25. {weco-0.2.18 → weco-0.2.20}/examples/prompt/eval.py +0 -0
  26. {weco-0.2.18 → weco-0.2.20}/examples/prompt/optimize.py +0 -0
  27. {weco-0.2.18 → weco-0.2.20}/examples/prompt/prompt_guide.md +0 -0
  28. {weco-0.2.18 → weco-0.2.20}/examples/spaceship-titanic/competition_description.md +0 -0
  29. {weco-0.2.18 → weco-0.2.20}/examples/spaceship-titanic/data/sample_submission.csv +0 -0
  30. {weco-0.2.18 → weco-0.2.20}/examples/spaceship-titanic/data/test.csv +0 -0
  31. {weco-0.2.18 → weco-0.2.20}/examples/spaceship-titanic/data/train.csv +0 -0
  32. {weco-0.2.18 → weco-0.2.20}/examples/spaceship-titanic/evaluate.py +0 -0
  33. {weco-0.2.18 → weco-0.2.20}/examples/spaceship-titanic/requirements-test.txt +0 -0
  34. {weco-0.2.18 → weco-0.2.20}/examples/triton/evaluate.py +0 -0
  35. {weco-0.2.18 → weco-0.2.20}/examples/triton/optimize.py +0 -0
  36. {weco-0.2.18 → weco-0.2.20}/setup.cfg +0 -0
  37. {weco-0.2.18 → weco-0.2.20}/weco/__init__.py +0 -0
  38. {weco-0.2.18 → weco-0.2.20}/weco/auth.py +0 -0
  39. {weco-0.2.18 → weco-0.2.20}/weco/utils.py +0 -0
  40. {weco-0.2.18 → weco-0.2.20}/weco.egg-info/dependency_links.txt +0 -0
  41. {weco-0.2.18 → weco-0.2.20}/weco.egg-info/entry_points.txt +0 -0
  42. {weco-0.2.18 → weco-0.2.20}/weco.egg-info/requires.txt +0 -0
  43. {weco-0.2.18 → weco-0.2.20}/weco.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: weco
3
- Version: 0.2.18
3
+ Version: 0.2.20
4
4
  Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
5
5
  Author-email: Weco AI Team <contact@weco.ai>
6
6
  License: MIT
@@ -29,6 +29,9 @@ Dynamic: license-file
29
29
  [![docs](https://img.shields.io/website?url=https://docs.weco.ai/&label=docs)](https://docs.weco.ai/)
30
30
  [![PyPI version](https://badge.fury.io/py/weco.svg)](https://badge.fury.io/py/weco)
31
31
  [![AIDE](https://img.shields.io/badge/AI--Driven_Exploration-arXiv-orange?style=flat-square&logo=arxiv)](https://arxiv.org/abs/2502.13138)
32
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/WecoAI/weco-cli/blob/main/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb)
33
+
34
+ `pip install weco`
32
35
 
33
36
  </div>
34
37
 
@@ -98,9 +101,8 @@ pip install torch
98
101
  weco run --source optimize.py \
99
102
  --eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
100
103
  --metric speedup \
101
- --maximize true \
104
+ --goal maximize \
102
105
  --steps 15 \
103
- --model gemini-2.5-pro-exp-03-25 \
104
106
  --additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
105
107
  ```
106
108
 
@@ -108,18 +110,27 @@ weco run --source optimize.py \
108
110
 
109
111
  ---
110
112
 
111
- **Arguments for `weco run`:**
112
-
113
- | Argument | Description | Required |
114
- | :-------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :------- |
115
- | `--source` | Path to the source code file that will be optimized (e.g., `optimize.py`). | Yes |
116
- | `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
117
- | `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
118
- | `--maximize` | Whether to maximize (`true`) or minimize (`false`) the metric. | Yes |
119
- | `--steps` | Number of optimization steps (LLM iterations) to run. | Yes |
120
- | `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`. | Yes |
121
- | `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
122
- | `--log-dir` | (Optional) Path to the directory to log intermediate steps and final optimization result. Defaults to `.runs/`. | No |
113
+ ### Arguments for `weco run`
114
+
115
+ **Required:**
116
+
117
+ | Argument | Description |
118
+ | :------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
119
+ | `-s, --source` | Path to the source code file that will be optimized (e.g., `optimize.py`). |
120
+ | `-c, --eval-command`| Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. |
121
+ | `-m, --metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. |
122
+ | `-g, --goal` | `maximize`/`max` to maximize the `--metric` or `minimize`/`min` to minimize it. |
123
+
124
+ <br>
125
+
126
+ **Optional:**
127
+
128
+ | Argument | Description | Default |
129
+ | :----------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------ |
130
+ | `-n, --steps` | Number of optimization steps (LLM iterations) to run. | 100 |
131
+ | `-M, --model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). | `o4-mini` when `OPENAI_API_KEY` is set; `claude-3-7-sonnet-20250219` when `ANTHROPIC_API_KEY` is set; `gemini-2.5-pro-exp-03-25` when `GEMINI_API_KEY` is set (priority: `OPENAI_API_KEY` > `ANTHROPIC_API_KEY` > `GEMINI_API_KEY`). |
132
+ | `-i, --additional-instructions`| Natural language description of specific instructions **or** path to a file containing detailed instructions to guide the LLM. | `None` |
133
+ | `-l, --log-dir` | Path to the directory to log intermediate steps and final optimization result. | `.runs/` |
123
134
 
124
135
  ---
125
136
 
@@ -6,6 +6,9 @@
6
6
  [![docs](https://img.shields.io/website?url=https://docs.weco.ai/&label=docs)](https://docs.weco.ai/)
7
7
  [![PyPI version](https://badge.fury.io/py/weco.svg)](https://badge.fury.io/py/weco)
8
8
  [![AIDE](https://img.shields.io/badge/AI--Driven_Exploration-arXiv-orange?style=flat-square&logo=arxiv)](https://arxiv.org/abs/2502.13138)
9
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/WecoAI/weco-cli/blob/main/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb)
10
+
11
+ `pip install weco`
9
12
 
10
13
  </div>
11
14
 
@@ -75,9 +78,8 @@ pip install torch
75
78
  weco run --source optimize.py \
76
79
  --eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
77
80
  --metric speedup \
78
- --maximize true \
81
+ --goal maximize \
79
82
  --steps 15 \
80
- --model gemini-2.5-pro-exp-03-25 \
81
83
  --additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
82
84
  ```
83
85
 
@@ -85,18 +87,27 @@ weco run --source optimize.py \
85
87
 
86
88
  ---
87
89
 
88
- **Arguments for `weco run`:**
89
-
90
- | Argument | Description | Required |
91
- | :-------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :------- |
92
- | `--source` | Path to the source code file that will be optimized (e.g., `optimize.py`). | Yes |
93
- | `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
94
- | `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
95
- | `--maximize` | Whether to maximize (`true`) or minimize (`false`) the metric. | Yes |
96
- | `--steps` | Number of optimization steps (LLM iterations) to run. | Yes |
97
- | `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`. | Yes |
98
- | `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
99
- | `--log-dir` | (Optional) Path to the directory to log intermediate steps and final optimization result. Defaults to `.runs/`. | No |
90
+ ### Arguments for `weco run`
91
+
92
+ **Required:**
93
+
94
+ | Argument | Description |
95
+ | :------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
96
+ | `-s, --source` | Path to the source code file that will be optimized (e.g., `optimize.py`). |
97
+ | `-c, --eval-command`| Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. |
98
+ | `-m, --metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. |
99
+ | `-g, --goal` | `maximize`/`max` to maximize the `--metric` or `minimize`/`min` to minimize it. |
100
+
101
+ <br>
102
+
103
+ **Optional:**
104
+
105
+ | Argument | Description | Default |
106
+ | :----------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------ |
107
+ | `-n, --steps` | Number of optimization steps (LLM iterations) to run. | 100 |
108
+ | `-M, --model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). | `o4-mini` when `OPENAI_API_KEY` is set; `claude-3-7-sonnet-20250219` when `ANTHROPIC_API_KEY` is set; `gemini-2.5-pro-exp-03-25` when `GEMINI_API_KEY` is set (priority: `OPENAI_API_KEY` > `ANTHROPIC_API_KEY` > `GEMINI_API_KEY`). |
109
+ | `-i, --additional-instructions`| Natural language description of specific instructions **or** path to a file containing detailed instructions to guide the LLM. | `None` |
110
+ | `-l, --log-dir` | Path to the directory to log intermediate steps and final optimization result. | `.runs/` |
100
111
 
101
112
  ---
102
113
 
@@ -21,7 +21,7 @@ Run the following command to start the optimization process:
21
21
  weco run --source optimize.py \
22
22
  --eval-command "python evaluate.py --solution-path optimize.py" \
23
23
  --metric speedup \
24
- --maximize true \
24
+ --goal maximize \
25
25
  --steps 30 \
26
26
  --model gemini-2.5-pro-exp-03-25 \
27
27
  --additional-instructions guide.md
@@ -32,7 +32,7 @@ weco run --source optimize.py \
32
32
  * `--source optimize.py`: The initial PyTorch self-attention code to be optimized with CUDA.
33
33
  * `--eval-command "python evaluate.py --solution-path optimize.py"`: Runs the evaluation script, which compiles (if necessary) and benchmarks the CUDA-enhanced code in `optimize.py` against a baseline, printing the `speedup`.
34
34
  * `--metric speedup`: The optimization target metric.
35
- * `--maximize true`: Weco aims to increase the speedup.
35
+ * `--goal maximize`: Weco aims to increase the speedup.
36
36
  * `--steps 30`: The number of optimization iterations.
37
37
  * `--model gemini-2.5-pro-exp-03-25`: The LLM used for code generation.
38
38
  * `--additional-instructions guide.md`: Points Weco to a file containing detailed instructions for the LLM on how to write the CUDA kernels, handle compilation (e.g., using `torch.utils.cpp_extension`), manage data types, and ensure correctness.
@@ -0,0 +1,310 @@
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "markdown",
21
+ "source": [
22
+ "# Hello Kernel World 🔥"
23
+ ],
24
+ "metadata": {
25
+ "id": "RO1o5fS5W8xc"
26
+ }
27
+ },
28
+ {
29
+ "cell_type": "markdown",
30
+ "source": [
31
+ "<p align=\"left\">\n",
32
+ " <img src=\"https://raw.githubusercontent.com/WecoAI/weco-cli/main/assets/example-optimization.gif\"\n",
33
+ " alt=\"Optimization demo\"\n",
34
+ " width=\"720\">\n",
35
+ "</p>\n",
36
+ "\n",
37
+ "## 🖥️ Weco CLI Resources\n",
38
+ "\n",
39
+ "- 📖 [CLI Reference](https://docs.weco.ai/cli/cli-reference) - Explore our docs for an in-depth look at what the tool can do\n",
40
+ "- ✨ [Examples](https://docs.weco.ai/examples) - Explore automated R&D across kernel engineering, ML engineering and prompt engineering"
41
+ ],
42
+ "metadata": {
43
+ "id": "yorBWlqGuC7-"
44
+ }
45
+ },
46
+ {
47
+ "cell_type": "markdown",
48
+ "source": [
49
+ "## Setup Dependencies"
50
+ ],
51
+ "metadata": {
52
+ "id": "5BQGGqbJW2Eq"
53
+ }
54
+ },
55
+ {
56
+ "cell_type": "code",
57
+ "source": [
58
+ "# Install requirements\n",
59
+ "%pip install -q weco ipywidgets numpy torch\n",
60
+ "\n",
61
+ "# Enable custom widgets\n",
62
+ "from google.colab import output\n",
63
+ "output.enable_custom_widget_manager()"
64
+ ],
65
+ "metadata": {
66
+ "id": "89doT3fbWcGi"
67
+ },
68
+ "execution_count": null,
69
+ "outputs": []
70
+ },
71
+ {
72
+ "cell_type": "markdown",
73
+ "source": [
74
+ "Now we need to determine what `DEVICE` we can run this on, a CPU or GPU..."
75
+ ],
76
+ "metadata": {
77
+ "id": "gYxLwOXzfmiF"
78
+ }
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "source": [
83
+ "import torch\n",
84
+ "from rich import print as rprint\n",
85
+ "\n",
86
+ "# Check if you're connected to a GPU (it's free!)\n",
87
+ "if not torch.cuda.is_available():\n",
88
+ " DEVICE = \"cpu\"\n",
89
+ " rprint(\n",
90
+ " \"\"\"\n",
91
+ "[bold yellow]⚠️ GPU is not enabled.[/bold yellow] The notebook will fall back to [bold]CPU[/bold], but [italic]performance may be lower[/italic].\n",
92
+ "\n",
93
+ "[bold]👉 To enable GPU (FREE):[/bold]\n",
94
+ "• Go to [green]Runtime > Change runtime type[/green]\n",
95
+ "• Set [bold]'Hardware Accelerator'[/bold] to [bold green]'GPU'[/bold green]\n",
96
+ "• Click [bold]Save[/bold] and [bold]rerun all cells[/bold]\n",
97
+ "\n",
98
+ "[dim]Continuing with CPU for now...[/dim]\n",
99
+ "\"\"\"\n",
100
+ " )\n",
101
+ "else:\n",
102
+ " DEVICE = \"cuda\"\n",
103
+ " rprint(\"[bold green]✅ GPU is enabled.[/bold green] Proceeding with [bold green]CUDA[/bold green]...\")"
104
+ ],
105
+ "metadata": {
106
+ "id": "PFAn_bzAXLGO"
107
+ },
108
+ "execution_count": null,
109
+ "outputs": []
110
+ },
111
+ {
112
+ "cell_type": "code",
113
+ "source": [
114
+ "# Download the example files from CLI repo\n",
115
+ "!wget https://github.com/WecoAI/weco-cli/archive/refs/heads/main.zip -O repo.zip\n",
116
+ "!unzip -j repo.zip \"weco-cli-main/examples/hello-kernel-world/*\" -d .\n",
117
+ "!rm repo.zip"
118
+ ],
119
+ "metadata": {
120
+ "id": "dFGClqxpzwyM"
121
+ },
122
+ "execution_count": null,
123
+ "outputs": []
124
+ },
125
+ {
126
+ "cell_type": "markdown",
127
+ "source": [
128
+ "Google AI Studio has a free API usage quota. Create a key [here](https://aistudio.google.com/apikey) to use `weco` for free!"
129
+ ],
130
+ "metadata": {
131
+ "id": "PGlEsI78bMtN"
132
+ }
133
+ },
134
+ {
135
+ "cell_type": "code",
136
+ "source": [
137
+ "import os\n",
138
+ "\n",
139
+ "# Pass your API key below\n",
140
+ "os.environ[\"GEMINI_API_KEY\"] = \"\"\n",
141
+ "# os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
142
+ "# os.environ[\"ANTHROPIC_API_KEY\"] = \"\"\n",
143
+ "\n",
144
+ "\n",
145
+ "if not any([os.environ.get(key) for key in [\"GEMINI_API_KEY\", \"OPENAI_API_KEY\", \"ANTHROPIC_API_KEY\"]]):\n",
146
+ " rprint(\n",
147
+ "\"[bold red]❌ No API keys found.[/bold red]\\n\"\n",
148
+ "\"\\n\"\n",
149
+ "\"Please set one of the following environment variables:\\n\"\n",
150
+ "\" • [cyan]GEMINI_API_KEY[/cyan]\\n\"\n",
151
+ "\" • [cyan]OPENAI_API_KEY[/cyan]\\n\"\n",
152
+ "\" • [cyan]ANTHROPIC_API_KEY[/cyan]\\n\"\n",
153
+ "\"\\n\"\n",
154
+ "\"Setup your [cyan]GEMINI_API_KEY[/cyan] for free - [underline white]https://aistudio.google.com/apikey[/underline white] !\"\n",
155
+ " )\n",
156
+ "else:\n",
157
+ " rprint(\"[bold green]✅ API keys found.[/bold green]\\n\\nWe'll only be able to know if they are correct once the optimization starts.\")"
158
+ ],
159
+ "metadata": {
160
+ "id": "b4XuOeNzYTdp"
161
+ },
162
+ "execution_count": null,
163
+ "outputs": []
164
+ },
165
+ {
166
+ "cell_type": "markdown",
167
+ "source": [
168
+ "## Let's Start Optimizing!"
169
+ ],
170
+ "metadata": {
171
+ "id": "sbvA8oQceOt5"
172
+ }
173
+ },
174
+ {
175
+ "cell_type": "markdown",
176
+ "source": [
177
+ "Now that we've got our dependecies, GPU and LLM API key sorted out, let's take a look at what code we're optimizing!\n",
178
+ "\n",
179
+ "Earlier, we downloaded two files:\n",
180
+ "1. An evaluation script to help score *how good a solution is* (`evaluate.py`)\n",
181
+ "2. A snippet of code we'd like to optimize (`optimize.py`)\n",
182
+ "\n",
183
+ "Let's take a look at what the code we want to optimize looks like..."
184
+ ],
185
+ "metadata": {
186
+ "id": "4OjXTBkjc4Id"
187
+ }
188
+ },
189
+ {
190
+ "cell_type": "code",
191
+ "source": [
192
+ "from IPython.display import display, HTML\n",
193
+ "from pygments import highlight\n",
194
+ "from pygments.lexers import PythonLexer\n",
195
+ "from pygments.formatters import HtmlFormatter\n",
196
+ "\n",
197
+ "def view_code_block(path: str):\n",
198
+ " with open(path) as f:\n",
199
+ " display(HTML(highlight(f.read(), PythonLexer(), HtmlFormatter(full=True, style=\"monokai\"))))\n",
200
+ "\n",
201
+ "view_code_block(\"optimize.py\")"
202
+ ],
203
+ "metadata": {
204
+ "id": "rUTxqxWgcC34"
205
+ },
206
+ "execution_count": null,
207
+ "outputs": []
208
+ },
209
+ {
210
+ "cell_type": "markdown",
211
+ "source": [
212
+ "Real-world code is often more complex but this is a good place to start. You can find more advanced examples [here](https://docs.weco.ai/examples), however, we'd recommend starting with this notebook as the optimization setup is the exact same, no matter the complexity!"
213
+ ],
214
+ "metadata": {
215
+ "id": "5C5dvasXdmNw"
216
+ }
217
+ },
218
+ {
219
+ "cell_type": "markdown",
220
+ "source": [
221
+ "It's simple to start optimizing any piece of code! You just need to set:\n",
222
+ "1. Path to source code - we can point this to our `optimize.py`\n",
223
+ "2. Command to run evaluation - notice how we are using the `DEVICE` we setup earlier\n",
224
+ "3. The metric we are optimizing for - in this case, the evaluation script (`evaluate.py`) prints the `'speedup'` achieved to the terminal\n",
225
+ "4. Whether you want to maximize or minimize the metric you mentioned above - in our case, we want to make this code faster!\n",
226
+ "5. Number of steps to optimize for - we'll keep it low to avoid any rate limits being hit on your free Gemini API key\n",
227
+ "6. Additional context - anything information you think should guide the optimization process"
228
+ ],
229
+ "metadata": {
230
+ "id": "YfDg-pP9fAdC"
231
+ }
232
+ },
233
+ {
234
+ "cell_type": "markdown",
235
+ "source": [
236
+ "Now let's get straight into it. Keep an eye on the `Best Solution` panel!\n",
237
+ "\n",
238
+ "Note that you can track the optimization in the logs directory (`.runs/`) and on our dashboard (links shown in the `Summary` panel)."
239
+ ],
240
+ "metadata": {
241
+ "id": "TbG_3nwEhs5G"
242
+ }
243
+ },
244
+ {
245
+ "cell_type": "code",
246
+ "source": [
247
+ "import sys, weco.cli as weco_cli\n",
248
+ "\n",
249
+ "# When running in a terminal, you can use this instead:\n",
250
+ "# weco run --source optimize.py \\\n",
251
+ "# --eval-command f\"python evaluate.py --solution-path optimize.py --device {DEVICE}\" \\\n",
252
+ "# --metric speedup \\\n",
253
+ "# --goal maximize \\\n",
254
+ "# --steps 10 \\\n",
255
+ "# --additional-instructions \"Fuse operations in the forward method while ensuring the max float deviation remains small.\"\n",
256
+ "\n",
257
+ "sys.argv = [\n",
258
+ " \"weco\", \"run\",\n",
259
+ " \"--source\", \"optimize.py\",\n",
260
+ " \"--eval-command\", f\"python evaluate.py --solution-path optimize.py --device {DEVICE}\",\n",
261
+ " \"--metric\", \"speedup\",\n",
262
+ " \"--goal\", \"maximize\",\n",
263
+ " \"--steps\", \"10\",\n",
264
+ " \"--additional-instructions\", \"Fuse operations in the forward method while ensuring the max float deviation remains small.\"\n",
265
+ "]\n",
266
+ "\n",
267
+ "try: weco_cli.main()\n",
268
+ "except SystemExit: pass"
269
+ ],
270
+ "metadata": {
271
+ "id": "17YZ2euZplDJ"
272
+ },
273
+ "execution_count": null,
274
+ "outputs": []
275
+ },
276
+ {
277
+ "cell_type": "markdown",
278
+ "source": [
279
+ "Let's take a look at what our optimized code looks like (`optimize.py`)!"
280
+ ],
281
+ "metadata": {
282
+ "id": "990ueX_JsO_1"
283
+ }
284
+ },
285
+ {
286
+ "cell_type": "code",
287
+ "source": [
288
+ "view_code_block(\"optimize.py\")"
289
+ ],
290
+ "metadata": {
291
+ "id": "9dqfzXkajQKs"
292
+ },
293
+ "execution_count": null,
294
+ "outputs": []
295
+ },
296
+ {
297
+ "cell_type": "markdown",
298
+ "source": [
299
+ "Happy Optimizing from the [Weco](https://www.weco.ai/) Team!\n",
300
+ "\n",
301
+ "If you'd like to learn more about what Weco can do, here are some spots to check out:\n",
302
+ "- 📖 [CLI Reference](https://docs.weco.ai/cli/cli-reference) - Explore our docs for an in-depth look at what the tool can do\n",
303
+ "- ✨ [Examples](https://docs.weco.ai/examples) - Explore automated R&D across kernel engineering, ML engineering and prompt engineering"
304
+ ],
305
+ "metadata": {
306
+ "id": "17oICow9yjn8"
307
+ }
308
+ }
309
+ ]
310
+ }
@@ -24,10 +24,10 @@ This example uses `gpt-4o-mini` via the OpenAI API by default. Ensure your `OPEN
24
24
  weco --source optimize.py \
25
25
  --eval-command "python eval.py" \
26
26
  --metric accuracy \
27
- --maximize true \
27
+ --goal maximize \
28
28
  --steps 40 \
29
29
  --model gemini-2.5-flash-preview-04-17 \
30
- --addtional-instructions prompt_guide.md
30
+ --additional-instructions prompt_guide.md
31
31
  ```
32
32
 
33
33
  During each evaluation round you will see log lines similar to the following.
@@ -20,7 +20,7 @@ Run the following command to start optimizing the model:
20
20
  weco run --source evaluate.py \
21
21
  --eval-command "python evaluate.py --data-dir ./data" \
22
22
  --metric accuracy \
23
- --maximize true \
23
+ --goal maximize \
24
24
  --steps 20 \
25
25
  --model o4-mini \
26
26
  --additional-instructions "Improve feature engineering, model choice and hyper-parameters."
@@ -34,7 +34,7 @@ weco run --source evaluate.py \
34
34
  * [optional] `--data-dir`: path to the train and test data.
35
35
  * [optional] `--seed`: Seed for reproduce the experiment.
36
36
  * `--metric accuracy`: The target metric Weco should optimize.
37
- * `--maximize true`: Weco aims to increase the accuracy.
37
+ * `--goal maximize`: Weco aims to increase the accuracy.
38
38
  * `--steps 10`: The number of optimization iterations.
39
39
  * `--model gemini-2.5-pro-exp-03-25`: The LLM driving the optimization.
40
40
  * `--additional-instructions "Improve feature engineering, model choice and hyper-parameters."`: A simple instruction for model improvement or you can put the path to [`comptition_description.md`](./competition_description.md) within the repo to feed the agent more detailed information.
@@ -19,7 +19,7 @@ Run the following command to start the optimization process:
19
19
  weco run --source optimize.py \
20
20
  --eval-command "python evaluate.py --solution-path optimize.py" \
21
21
  --metric speedup \
22
- --maximize true \
22
+ --goal maximize \
23
23
  --steps 30 \
24
24
  --model gemini-2.5-pro-exp-03-25 \
25
25
  --additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
@@ -30,7 +30,7 @@ weco run --source optimize.py \
30
30
  * `--source optimize.py`: The PyTorch self-attention implementation to be optimized.
31
31
  * `--eval-command "python evaluate.py --solution-path optimize.py"`: Executes the evaluation script, which benchmarks the `optimize.py` code against a baseline and prints the `speedup`.
32
32
  * `--metric speedup`: The target metric for optimization.
33
- * `--maximize true`: Weco should maximize the speedup.
33
+ * `--goal maximize`: The agent should maximize the speedup.
34
34
  * `--steps 30`: The number of optimization iterations.
35
35
  * `--model gemini-2.5-pro-exp-03-25`: The LLM driving the optimization.
36
36
  * `--additional-instructions "..."`: Provides specific guidance to the LLM, instructing it to use Triton, maintain numerical accuracy ("small max float diff"), and preserve the code structure.
@@ -8,7 +8,7 @@ name = "weco"
8
8
  authors = [{ name = "Weco AI Team", email = "contact@weco.ai" }]
9
9
  description = "Documentation for `weco`, a CLI for using Weco AI's code optimizer."
10
10
  readme = "README.md"
11
- version = "0.2.18"
11
+ version = "0.2.20"
12
12
  license = { text = "MIT" }
13
13
  requires-python = ">=3.8"
14
14
  dependencies = ["requests", "rich", "packaging"]