weco 0.2.6__tar.gz → 0.2.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {weco-0.2.6 → weco-0.2.7}/.github/workflows/lint.yml +10 -7
  2. {weco-0.2.6 → weco-0.2.7}/.github/workflows/release.yml +2 -2
  3. {weco-0.2.6 → weco-0.2.7}/.gitignore +2 -0
  4. {weco-0.2.6 → weco-0.2.7}/PKG-INFO +63 -9
  5. {weco-0.2.6 → weco-0.2.7}/README.md +62 -8
  6. weco-0.2.7/examples/spaceship-titanic/README.md +93 -0
  7. weco-0.2.7/examples/spaceship-titanic/baseline.py +27 -0
  8. weco-0.2.7/examples/spaceship-titanic/evaluate.py +71 -0
  9. weco-0.2.7/examples/spaceship-titanic/optimize.py +27 -0
  10. weco-0.2.7/examples/spaceship-titanic/requirements-test.txt +8 -0
  11. weco-0.2.7/examples/spaceship-titanic/utils.py +56 -0
  12. {weco-0.2.6 → weco-0.2.7}/pyproject.toml +1 -1
  13. {weco-0.2.6 → weco-0.2.7}/weco/__init__.py +1 -1
  14. {weco-0.2.6 → weco-0.2.7}/weco/cli.py +5 -2
  15. {weco-0.2.6 → weco-0.2.7}/weco/panels.py +4 -4
  16. {weco-0.2.6 → weco-0.2.7}/weco.egg-info/PKG-INFO +63 -9
  17. {weco-0.2.6 → weco-0.2.7}/weco.egg-info/SOURCES.txt +6 -0
  18. {weco-0.2.6 → weco-0.2.7}/LICENSE +0 -0
  19. {weco-0.2.6 → weco-0.2.7}/examples/cuda/evaluate.py +0 -0
  20. {weco-0.2.6 → weco-0.2.7}/examples/cuda/guide.md +0 -0
  21. {weco-0.2.6 → weco-0.2.7}/examples/cuda/optimize.py +0 -0
  22. {weco-0.2.6 → weco-0.2.7}/examples/hello-kernel-world/evaluate.py +0 -0
  23. {weco-0.2.6 → weco-0.2.7}/examples/hello-kernel-world/optimize.py +0 -0
  24. {weco-0.2.6 → weco-0.2.7}/examples/metal/evaluate.py +0 -0
  25. {weco-0.2.6 → weco-0.2.7}/examples/metal/examples.rst +0 -0
  26. {weco-0.2.6 → weco-0.2.7}/examples/metal/optimize.py +0 -0
  27. {weco-0.2.6 → weco-0.2.7}/examples/triton/evaluate.py +0 -0
  28. {weco-0.2.6 → weco-0.2.7}/examples/triton/optimize.py +0 -0
  29. {weco-0.2.6 → weco-0.2.7}/setup.cfg +0 -0
  30. {weco-0.2.6 → weco-0.2.7}/weco/api.py +0 -0
  31. {weco-0.2.6 → weco-0.2.7}/weco/utils.py +0 -0
  32. {weco-0.2.6 → weco-0.2.7}/weco.egg-info/dependency_links.txt +0 -0
  33. {weco-0.2.6 → weco-0.2.7}/weco.egg-info/entry_points.txt +0 -0
  34. {weco-0.2.6 → weco-0.2.7}/weco.egg-info/requires.txt +0 -0
  35. {weco-0.2.6 → weco-0.2.7}/weco.egg-info/top_level.txt +0 -0
@@ -5,6 +5,7 @@ on:
5
5
  branches:
6
6
  - main
7
7
  - dev
8
+ pull_request: # Run on any pull request
8
9
 
9
10
  jobs:
10
11
  lint:
@@ -12,9 +13,7 @@ jobs:
12
13
 
13
14
  steps:
14
15
  - name: Checkout code
15
- uses: actions/checkout@v3
16
- with:
17
- ref: ${{ github.head_ref }}
16
+ uses: actions/checkout@v4
18
17
 
19
18
  - name: Set up Python
20
19
  uses: actions/setup-python@v3
@@ -26,15 +25,19 @@ jobs:
26
25
  python -m pip install --upgrade pip
27
26
  pip install ruff
28
27
 
29
- - name: Run linter
28
+ - name: Run Linter (PR Check)
29
+ if: github.event_name == 'pull_request'
30
30
  run: |
31
- ruff check . --fix
32
-
33
- - name: Run formatter
31
+ ruff check .
32
+
33
+ - name: Run Linter & Formatter (Push)
34
+ if: github.event_name == 'push'
34
35
  run: |
36
+ ruff check . --fix
35
37
  ruff format .
36
38
 
37
39
  - name: Commit changes
40
+ if: github.event_name == 'push'
38
41
  run: |
39
42
  git config --local user.email "action@github.com"
40
43
  git config --local user.name "GitHub Action"
@@ -90,7 +90,7 @@ jobs:
90
90
  GITHUB_TOKEN: ${{ github.token }}
91
91
  run: >-
92
92
  gh release create
93
- 'v0.2.6'
93
+ 'v0.2.7'
94
94
  --repo '${{ github.repository }}'
95
95
  --notes ""
96
96
 
@@ -102,5 +102,5 @@ jobs:
102
102
  # sigstore-produced signatures and certificates.
103
103
  run: >-
104
104
  gh release upload
105
- 'v0.2.6' dist/**
105
+ 'v0.2.7' dist/**
106
106
  --repo '${{ github.repository }}'
@@ -69,3 +69,5 @@ etc/
69
69
  # AI generated files
70
70
  digest.txt
71
71
  .runs/
72
+
73
+ *.pyc
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: weco
3
- Version: 0.2.6
3
+ Version: 0.2.7
4
4
  Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
5
5
  Author-email: Weco AI Team <contact@weco.ai>
6
6
  License: MIT
@@ -109,7 +109,7 @@ weco --source optimize.py \
109
109
  --metric speedup \
110
110
  --maximize true \
111
111
  --steps 15 \
112
- --model claude-3-7-sonnet-20250219 \
112
+ --model gemini-2.5-pro-exp-03-25 \
113
113
  --additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
114
114
  ```
115
115
 
@@ -127,7 +127,7 @@ weco --source optimize.py \
127
127
  --metric speedup \
128
128
  --maximize true \
129
129
  --steps 30 \
130
- --model o3-mini \
130
+ --model gemini-2.5-pro-exp-03-25 \
131
131
  --additional-instructions examples.rst
132
132
  ```
133
133
 
@@ -144,7 +144,7 @@ Given how useful causal multihead self attention is to transformers, we've seen
144
144
  --metric speedup \
145
145
  --maximize true \
146
146
  --steps 30 \
147
- --model gemini-2.5-pro-preview-03-25 \
147
+ --model gemini-2.5-pro-exp-03-25 \
148
148
  --additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
149
149
  ```
150
150
 
@@ -157,10 +157,52 @@ Given how useful causal multihead self attention is to transformers, we've seen
157
157
  --metric speedup \
158
158
  --maximize true \
159
159
  --steps 30 \
160
- --model gemini-2.5-pro-preview-03-25 \
160
+ --model gemini-2.5-pro-exp-03-25 \
161
161
  --additional-instructions guide.md
162
162
  ```
163
163
 
164
+ **Example 4: Optimizing a Classification Model**
165
+
166
+ This example demonstrates optimizing a script for a Kaggle competition ([Spaceship Titanic](https://www.kaggle.com/competitions/spaceship-titanic/overview)) to improve classification accuracy. The additional instructions are provided via a separate file (`examples/spaceship-titanic/README.md`).
167
+
168
+ First, install the requirements for the example environment:
169
+ ```bash
170
+ pip install -r examples/spaceship-titanic/requirements-test.txt
171
+ ```
172
+ And run utility function once to prepare the dataset
173
+ ```bash
174
+ python examples/spaceship-titanic/utils.py
175
+ ```
176
+
177
+ You should see the following structure at `examples/spaceship-titanic`. You need to prepare the kaggle credentials for downloading the dataset.
178
+ ```
179
+ .
180
+ ├── baseline.py
181
+ ├── evaluate.py
182
+ ├── optimize.py
183
+ ├── private
184
+ │ └── test.csv
185
+ ├── public
186
+ │ ├── sample_submission.csv
187
+ │ ├── test.csv
188
+ │ └── train.csv
189
+ ├── README.md
190
+ ├── requirements-test.txt
191
+ └── utils.py
192
+ ```
193
+
194
+ Then, execute the optimization command:
195
+ ```bash
196
+ weco --source examples/spaceship-titanic/optimize.py \
197
+ --eval-command "python examples/spaceship-titanic/optimize.py && python examples/spaceship-titanic/evaluate.py" \
198
+ --metric accuracy \
199
+ --maximize true \
200
+ --steps 10 \
201
+ --model gemini-2.5-pro-exp-03-25 \
202
+ --additional-instructions examples/spaceship-titanic/README.md
203
+ ```
204
+
205
+ *The [baseline.py](examples/spaceship-titanic/baseline.py) is provided as a start point for optimization*
164
206
 
165
207
  ---
166
208
 
@@ -169,16 +211,28 @@ Given how useful causal multihead self attention is to transformers, we've seen
169
211
  | Argument | Description | Required |
170
212
  | :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- |
171
213
  | `--source` | Path to the source code file that will be optimized (e.g., `optimize.py`). | Yes |
172
- | `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
173
- | `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
214
+ | `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
215
+ | `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
174
216
  | `--maximize` | Whether to maximize (`true`) or minimize (`false`) the metric. | Yes |
175
217
  | `--steps` | Number of optimization steps (LLM iterations) to run. | Yes |
176
- | `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`. | Yes |
177
- | `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
218
+ | `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`.| Yes |
219
+ | `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
220
+ | `--log-dir` | (Optional) Path to the directory to log intermediate steps and final optimization result. Defaults to `.runs/`. | No |
178
221
 
179
222
  ---
180
223
 
224
+ ### Performance & Expectations
181
225
 
226
+ Weco, powered by the AIDE algorithm, optimizes code iteratively based on your evaluation results. Achieving significant improvements, especially on complex research-level tasks, often requires substantial exploration time.
227
+
228
+ The following plot from the independent [Research Engineering Benchmark (RE-Bench)](https://metr.org/AI_R_D_Evaluation_Report.pdf) report shows the performance of AIDE (the algorithm behind Weco) on challenging ML research engineering tasks over different time budgets.
229
+ <p align="center">
230
+ <img src="https://github.com/user-attachments/assets/ff0e471d-2f50-4e2d-b718-874862f533df" alt="RE-Bench Performance Across Time" width="60%"/>
231
+ </p>
232
+
233
+ As shown, AIDE demonstrates strong performance gains over time, surpassing lower human expert percentiles within hours and continuing to improve. This highlights the potential of evaluation-driven optimization but also indicates that reaching high levels of performance comparable to human experts on difficult benchmarks can take considerable time (tens of hours in this specific benchmark, corresponding to many `--steps` in the Weco CLI). Factor this into your planning when setting the number of `--steps` for your optimization runs.
234
+
235
+ ---
182
236
 
183
237
  ### Important Note on Evaluation
184
238
 
@@ -87,7 +87,7 @@ weco --source optimize.py \
87
87
  --metric speedup \
88
88
  --maximize true \
89
89
  --steps 15 \
90
- --model claude-3-7-sonnet-20250219 \
90
+ --model gemini-2.5-pro-exp-03-25 \
91
91
  --additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
92
92
  ```
93
93
 
@@ -105,7 +105,7 @@ weco --source optimize.py \
105
105
  --metric speedup \
106
106
  --maximize true \
107
107
  --steps 30 \
108
- --model o3-mini \
108
+ --model gemini-2.5-pro-exp-03-25 \
109
109
  --additional-instructions examples.rst
110
110
  ```
111
111
 
@@ -122,7 +122,7 @@ Given how useful causal multihead self attention is to transformers, we've seen
122
122
  --metric speedup \
123
123
  --maximize true \
124
124
  --steps 30 \
125
- --model gemini-2.5-pro-preview-03-25 \
125
+ --model gemini-2.5-pro-exp-03-25 \
126
126
  --additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
127
127
  ```
128
128
 
@@ -135,10 +135,52 @@ Given how useful causal multihead self attention is to transformers, we've seen
135
135
  --metric speedup \
136
136
  --maximize true \
137
137
  --steps 30 \
138
- --model gemini-2.5-pro-preview-03-25 \
138
+ --model gemini-2.5-pro-exp-03-25 \
139
139
  --additional-instructions guide.md
140
140
  ```
141
141
 
142
+ **Example 4: Optimizing a Classification Model**
143
+
144
+ This example demonstrates optimizing a script for a Kaggle competition ([Spaceship Titanic](https://www.kaggle.com/competitions/spaceship-titanic/overview)) to improve classification accuracy. The additional instructions are provided via a separate file (`examples/spaceship-titanic/README.md`).
145
+
146
+ First, install the requirements for the example environment:
147
+ ```bash
148
+ pip install -r examples/spaceship-titanic/requirements-test.txt
149
+ ```
150
+ And run utility function once to prepare the dataset
151
+ ```bash
152
+ python examples/spaceship-titanic/utils.py
153
+ ```
154
+
155
+ You should see the following structure at `examples/spaceship-titanic`. You need to prepare the kaggle credentials for downloading the dataset.
156
+ ```
157
+ .
158
+ ├── baseline.py
159
+ ├── evaluate.py
160
+ ├── optimize.py
161
+ ├── private
162
+ │ └── test.csv
163
+ ├── public
164
+ │ ├── sample_submission.csv
165
+ │ ├── test.csv
166
+ │ └── train.csv
167
+ ├── README.md
168
+ ├── requirements-test.txt
169
+ └── utils.py
170
+ ```
171
+
172
+ Then, execute the optimization command:
173
+ ```bash
174
+ weco --source examples/spaceship-titanic/optimize.py \
175
+ --eval-command "python examples/spaceship-titanic/optimize.py && python examples/spaceship-titanic/evaluate.py" \
176
+ --metric accuracy \
177
+ --maximize true \
178
+ --steps 10 \
179
+ --model gemini-2.5-pro-exp-03-25 \
180
+ --additional-instructions examples/spaceship-titanic/README.md
181
+ ```
182
+
183
+ *The [baseline.py](examples/spaceship-titanic/baseline.py) is provided as a start point for optimization*
142
184
 
143
185
  ---
144
186
 
@@ -147,16 +189,28 @@ Given how useful causal multihead self attention is to transformers, we've seen
147
189
  | Argument | Description | Required |
148
190
  | :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- |
149
191
  | `--source` | Path to the source code file that will be optimized (e.g., `optimize.py`). | Yes |
150
- | `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
151
- | `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
192
+ | `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
193
+ | `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
152
194
  | `--maximize` | Whether to maximize (`true`) or minimize (`false`) the metric. | Yes |
153
195
  | `--steps` | Number of optimization steps (LLM iterations) to run. | Yes |
154
- | `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`. | Yes |
155
- | `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
196
+ | `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`.| Yes |
197
+ | `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
198
+ | `--log-dir` | (Optional) Path to the directory to log intermediate steps and final optimization result. Defaults to `.runs/`. | No |
156
199
 
157
200
  ---
158
201
 
202
+ ### Performance & Expectations
159
203
 
204
+ Weco, powered by the AIDE algorithm, optimizes code iteratively based on your evaluation results. Achieving significant improvements, especially on complex research-level tasks, often requires substantial exploration time.
205
+
206
+ The following plot from the independent [Research Engineering Benchmark (RE-Bench)](https://metr.org/AI_R_D_Evaluation_Report.pdf) report shows the performance of AIDE (the algorithm behind Weco) on challenging ML research engineering tasks over different time budgets.
207
+ <p align="center">
208
+ <img src="https://github.com/user-attachments/assets/ff0e471d-2f50-4e2d-b718-874862f533df" alt="RE-Bench Performance Across Time" width="60%"/>
209
+ </p>
210
+
211
+ As shown, AIDE demonstrates strong performance gains over time, surpassing lower human expert percentiles within hours and continuing to improve. This highlights the potential of evaluation-driven optimization but also indicates that reaching high levels of performance comparable to human experts on difficult benchmarks can take considerable time (tens of hours in this specific benchmark, corresponding to many `--steps` in the Weco CLI). Factor this into your planning when setting the number of `--steps` for your optimization runs.
212
+
213
+ ---
160
214
 
161
215
  ### Important Note on Evaluation
162
216
 
@@ -0,0 +1,93 @@
1
+ # Overview
2
+
3
+ ## Description
4
+ Welcome to the year 2912, where your data science skills are needed to solve a cosmic mystery. We've received a transmission from four lightyears away and things aren't looking good.
5
+
6
+ The *Spaceship Titanic* was an interstellar passenger liner launched a month ago. With almost 13,000 passengers on board, the vessel set out on its maiden voyage transporting emigrants from our solar system to three newly habitable exoplanets orbiting nearby stars.
7
+
8
+ While rounding Alpha Centauri en route to its first destination—the torrid 55 Cancri E—the unwary *Spaceship Titanic* collided with a spacetime anomaly hidden within a dust cloud. Sadly, it met a similar fate as its namesake from 1000 years before. Though the ship stayed intact, almost half of the passengers were transported to an alternate dimension!
9
+
10
+ ![joel-filipe-QwoNAhbmLLo-unsplash.jpg](https://storage.googleapis.com/kaggle-media/competitions/Spaceship%20Titanic/joel-filipe-QwoNAhbmLLo-unsplash.jpg)
11
+
12
+ To help rescue crews and retrieve the lost passengers, you are challenged to predict which passengers were transported by the anomaly using records recovered from the spaceship’s damaged computer system.
13
+
14
+ Help save them and change history!
15
+
16
+ ### Acknowledgments
17
+
18
+ Photos by [Joel Filipe](https://unsplash.com/@joelfilip?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText), [Richard Gatley](https://unsplash.com/@uncle_rickie?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) and [ActionVance](https://unsplash.com/@actionvance?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash.
19
+
20
+ ## Evaluation
21
+
22
+ ### Metric
23
+
24
+ Submissions are evaluated based on their [classification accuracy](https://developers.google.com/machine-learning/crash-course/classification/accuracy), the percentage of predicted labels that are correct.
25
+
26
+ ### Submission Format
27
+
28
+ The submission format for the competition is a csv file with the following format:
29
+
30
+ ```
31
+ PassengerId,Transported
32
+ 0013_01,False
33
+ 0018_01,False
34
+ 0019_01,False
35
+ 0021_01,False
36
+ etc.
37
+ ```
38
+
39
+ ## Frequently Asked Questions
40
+
41
+ ### What is a Getting Started competition?
42
+
43
+ Getting Started competitions were created by Kaggle data scientists for people who have little to no machine learning background. They are a great place to begin if you are new to data science or just finished a MOOC and want to get involved in Kaggle.
44
+
45
+ Getting Started competitions are a non-competitive way to get familiar with Kaggle’s platform, learn basic machine learning concepts, and start meeting people in the community. They have no cash prize and are on a rolling timeline.
46
+
47
+ ### How do I create and manage a team?
48
+
49
+ When you accept the competition rules, a team will be created for you. You can invite others to your team, accept a merger with another team, and update basic information like team name by going to the [Team](https://www.kaggle.com/c/spaceship-titanic/team) page.
50
+
51
+ We've heard from many Kagglers that teaming up is the best way to learn new skills AND have fun. If you don't have a teammate already, consider asking if anyone wants to team up in the [discussion forum](https://www.kaggle.com/c/spaceship-titanic/discussion).
52
+
53
+ ### What are Notebooks?
54
+
55
+ Kaggle Notebooks is a cloud computational environment that enables reproducible and collaborative analysis. Notebooks support scripts in Python and R, Jupyter Notebooks, and RMarkdown reports. You can visit the [Notebooks](https://www.kaggle.com/c/spaceship-titanic/notebooks) tab to view all of the publicly shared code for the Spaceship Titanic competition. For more on how to use Notebooks to learn data science, check out our [Courses](https://www.kaggle.com/learn/overview)!
56
+
57
+ ### Why did my team disappear from the leaderboard?
58
+
59
+ To keep with the spirit of getting-started competitions, we have implemented a two month rolling window on submissions. Once a submission is more than two months old, it will be invalidated and no longer count towards the leaderboard.
60
+
61
+ If your team has no submissions in the previous two months, the team will also drop from the leaderboard. This will keep the leaderboard at a manageable size, freshen it up, and prevent newcomers from getting lost in a sea of abandoned scores.
62
+
63
+ *"I worked so hard to get that score! Give it back!"* Read more about our decision to implement a rolling leaderboard [here](https://www.kaggle.com/c/titanic/discussion/6240).
64
+
65
+ ### How do I contact Support?
66
+
67
+ Kaggle does not have a dedicated support team so you’ll typically find that you receive a response more quickly by asking your question in the appropriate forum. (For this competition, you’ll want to use the [Spaceship Titanic discussion forum](https://www.kaggle.com/c/spaceship-titanic/discussion)).
68
+
69
+ Support is only able to help with issues that are being experienced by all participants. Before contacting support, please check the discussion forum for information on your problem. If you can’t find it, you can post your problem in the forum so a fellow participant or a Kaggle team member can provide help. The forums are full of useful information on the data, metric, and different approaches. We encourage you to use the forums often. If you share your knowledge, you'll find that others will share a lot in turn!
70
+
71
+ If your problem persists or it seems to be effective all participants then please [contact us](https://www.kaggle.com/contact).
72
+
73
+ # Dataset Description
74
+
75
+ In this competition your task is to predict whether a passenger was transported to an alternate dimension during the Spaceship Titanic's collision with the spacetime anomaly. To help you make these predictions, you're given a set of personal records recovered from the ship's damaged computer system.
76
+
77
+ ## File and Data Field Descriptions
78
+
79
+ - **train.csv** - Personal records for about two-thirds (~8700) of the passengers, to be used as training data.
80
+ - `PassengerId` - A unique Id for each passenger. Each Id takes the form `gggg_pp` where `gggg` indicates a group the passenger is travelling with and `pp` is their number within the group. People in a group are often family members, but not always.
81
+ - `HomePlanet` - The planet the passenger departed from, typically their planet of permanent residence.
82
+ - `CryoSleep` - Indicates whether the passenger elected to be put into suspended animation for the duration of the voyage. Passengers in cryosleep are confined to their cabins.
83
+ - `Cabin` - The cabin number where the passenger is staying. Takes the form `deck/num/side`, where `side` can be either `P` for *Port* or `S` for *Starboard*.
84
+ - `Destination` - The planet the passenger will be debarking to.
85
+ - `Age` - The age of the passenger.
86
+ - `VIP` - Whether the passenger has paid for special VIP service during the voyage.
87
+ - `RoomService`, `FoodCourt`, `ShoppingMall`, `Spa`, `VRDeck` - Amount the passenger has billed at each of the *Spaceship Titanic*'s many luxury amenities.
88
+ - `Name` - The first and last names of the passenger.
89
+ - `Transported` - Whether the passenger was transported to another dimension. This is the target, the column you are trying to predict.
90
+ - **test.csv** - Personal records for the remaining one-third (~4300) of the passengers, to be used as test data. Your task is to predict the value of `Transported` for the passengers in this set.
91
+ - **sample_submission.csv** - A submission file in the correct format.
92
+ - `PassengerId` - Id for each passenger in the test set.
93
+ - `Transported` - The target. For each passenger, predict either `True` or `False`.
@@ -0,0 +1,27 @@
1
+ from pathlib import Path
2
+ import pandas as pd
3
+
4
+
5
+ def predict(test: Path, save: Path):
6
+ # TODO: Add a model here
7
+
8
+ test_data = pd.read_csv(test)
9
+ submission = pd.DataFrame({"PassengerId": test_data["PassengerId"], "Transported": False})
10
+ submission.to_csv(save, index=False)
11
+ print(f"Test submission saved to {save}")
12
+
13
+
14
+ if __name__ == "__main__":
15
+ # This block is primarily for testing the script directly,
16
+ # it's not used by the weco evaluation loop.
17
+ script_dir = Path(__file__).resolve().parent
18
+ # Use validation data as test data *for this test block only*
19
+ train_file_path = script_dir / "public" / "train.csv"
20
+ print("train_file_path:", train_file_path)
21
+ test_file_path = script_dir / "public" / "test.csv"
22
+ print("test_file_path:", test_file_path)
23
+ submission_output_path = script_dir / "submission.csv"
24
+
25
+ # Call predict with the DataFrame and the correct output path
26
+ predict(train_file_path, test_file_path, submission_output_path)
27
+ print(f"Test submission saved to {submission_output_path}")
@@ -0,0 +1,71 @@
1
+ import sys
2
+ import pandas as pd
3
+ from sklearn.metrics import accuracy_score
4
+ from pathlib import Path
5
+
6
+
7
+ class InvalidSubmissionError(Exception):
8
+ """
9
+ A custom exception for when the agent submission cannot be graded.
10
+ """
11
+
12
+ pass
13
+
14
+
15
+ def prepare_for_accuracy_metric(submission: pd.DataFrame, answers: pd.DataFrame, target_column: str, id_column: str) -> dict:
16
+ # Answers checks
17
+ assert target_column in answers.columns, f"Answers must have a `{target_column}` column"
18
+ assert id_column in answers.columns, f"Answers must have a `{id_column}` column"
19
+
20
+ # Submission checks
21
+ if len(submission) != len(answers):
22
+ raise InvalidSubmissionError("Submission must have the same length as the answers.")
23
+ if target_column not in submission.columns:
24
+ raise InvalidSubmissionError(f"Submission must have a `{target_column}` column")
25
+ if id_column not in submission.columns:
26
+ raise InvalidSubmissionError(f"Submission must have a `{id_column}` column")
27
+
28
+ # sort on id to ensure correct order
29
+ submission = submission.sort_values(id_column)
30
+ answers = answers.sort_values(id_column)
31
+
32
+ if (submission[id_column].values != answers[id_column].values).any():
33
+ raise InvalidSubmissionError(f"Submission and Answers `{id_column}`'s do not match")
34
+
35
+ y_pred = submission[target_column].to_numpy()
36
+ y_true = answers[target_column].to_numpy()
37
+
38
+ return {"y_true": y_true, "y_pred": y_pred}
39
+
40
+
41
+ def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
42
+ accuracy_inputs = prepare_for_accuracy_metric(
43
+ submission=submission, answers=answers, target_column="Transported", id_column="PassengerId"
44
+ )
45
+ return accuracy_score(**accuracy_inputs)
46
+
47
+
48
+ if __name__ == "__main__":
49
+ # Get the directory where the script is located
50
+ script_dir = Path(__file__).resolve().parent
51
+ # The ground truth answers are now in private/test.csv
52
+ answers_path = script_dir / "private" / "test.csv"
53
+ # Assume the agent's submission is saved here
54
+ submission_path = script_dir / "submission.csv"
55
+
56
+ # Check if files exist before proceeding
57
+ if not answers_path.exists():
58
+ print(f"Error: Answers file not found at {answers_path}") # Updated path in error message
59
+ sys.exit(1)
60
+
61
+ if not submission_path.exists():
62
+ print(f"Error: Submission file not found at {submission_path}")
63
+ sys.exit(1)
64
+
65
+ submission = pd.read_csv(submission_path)
66
+ # Read answers from the updated path
67
+ answers = pd.read_csv(answers_path)
68
+
69
+ # Calculate and print the grade
70
+ score = grade(submission, answers)
71
+ print(f"accuracy: {score}")
@@ -0,0 +1,27 @@
1
+ from pathlib import Path
2
+ import pandas as pd
3
+
4
+
5
+ def predict(test: Path, save: Path):
6
+ # TODO: Add a model here
7
+
8
+ test_data = pd.read_csv(test)
9
+ submission = pd.DataFrame({"PassengerId": test_data["PassengerId"], "Transported": False})
10
+ submission.to_csv(save, index=False)
11
+ print(f"Test submission saved to {save}")
12
+
13
+
14
+ if __name__ == "__main__":
15
+ # This block is primarily for testing the script directly,
16
+ # it's not used by the weco evaluation loop.
17
+ script_dir = Path(__file__).resolve().parent
18
+ # Use validation data as test data *for this test block only*
19
+ train_file_path = script_dir / "public" / "train.csv"
20
+ print("train_file_path:", train_file_path)
21
+ test_file_path = script_dir / "public" / "test.csv"
22
+ print("test_file_path:", test_file_path)
23
+ submission_output_path = script_dir / "submission.csv"
24
+
25
+ # Call predict with the DataFrame and the correct output path
26
+ predict(train_file_path, test_file_path, submission_output_path)
27
+ print(f"Test submission saved to {submission_output_path}")
@@ -0,0 +1,8 @@
1
+ pandas
2
+ numpy
3
+ scikit-learn
4
+ torch
5
+ xgboost
6
+ lightgbm
7
+ catboost
8
+ kaggle
@@ -0,0 +1,56 @@
1
+ import pandas as pd
2
+ from sklearn.model_selection import train_test_split
3
+ from pathlib import Path
4
+ import kaggle
5
+ import zipfile
6
+ import os
7
+
8
+
9
+ def prepare_data():
10
+ kaggle.api.competition_download_files("spaceship-titanic")
11
+ # unzip the data
12
+ with zipfile.ZipFile("spaceship-titanic.zip", "r") as zip_ref:
13
+ zip_ref.extractall()
14
+ # remove the zip file
15
+ os.remove("spaceship-titanic.zip")
16
+
17
+
18
+ def split_data(public: Path, private: Path):
19
+ df = pd.read_csv("train.csv")
20
+ # Use a fixed random_state for reproducibility
21
+ new_train, new_test = train_test_split(df, test_size=0.1, random_state=0)
22
+
23
+ os.makedirs(public, exist_ok=True)
24
+ os.makedirs(private, exist_ok=True)
25
+
26
+ example_submission = new_test[["PassengerId", "Transported"]].copy()
27
+ example_submission["Transported"] = False
28
+ example_submission.to_csv(public / "sample_submission.csv", index=False)
29
+
30
+ new_train.to_csv(public / "train.csv", index=False)
31
+ print("training sample shape:", new_train.shape)
32
+ new_test.to_csv(private / "test.csv", index=False)
33
+ print("test sample shape:", new_test.shape)
34
+ print(f"Validation data saved to {public / 'test.csv'}")
35
+ new_test.drop("Transported", axis="columns").to_csv(public / "test.csv", index=False)
36
+
37
+ # remove the previous files
38
+ os.remove("train.csv")
39
+ os.remove("sample_submission.csv")
40
+
41
+
42
+ def setup_data():
43
+ # download the data
44
+ prepare_data()
45
+
46
+ # Get the directory where the script is located
47
+ script_dir = Path(__file__).resolve().parent
48
+ public_path = script_dir / "public"
49
+ private_path = script_dir / "private"
50
+
51
+ # split the data
52
+ split_data(public_path, private_path)
53
+
54
+
55
+ if __name__ == "__main__":
56
+ setup_data()
@@ -10,7 +10,7 @@ authors = [
10
10
  ]
11
11
  description = "Documentation for `weco`, a CLI for using Weco AI's code optimizer."
12
12
  readme = "README.md"
13
- version = "0.2.6"
13
+ version = "0.2.7"
14
14
  license = {text = "MIT"}
15
15
  requires-python = ">=3.12"
16
16
  dependencies = ["requests", "rich"]
@@ -1,4 +1,4 @@
1
1
  # DO NOT EDIT
2
- __pkg_version__ = "0.2.6"
2
+ __pkg_version__ = "0.2.7"
3
3
  __api_version__ = "v1"
4
4
  __base_url__ = f"https://api.aide.weco.ai/{__api_version__}"
@@ -50,6 +50,7 @@ def main() -> None:
50
50
  )
51
51
  parser.add_argument("--steps", type=int, required=True, help="Number of steps to run")
52
52
  parser.add_argument("--model", type=str, required=True, help="Model to use for optimization")
53
+ parser.add_argument("--log-dir", type=str, default=".runs", help="Directory to store logs and results")
53
54
  parser.add_argument(
54
55
  "--additional-instructions",
55
56
  default=None,
@@ -83,7 +84,9 @@ def main() -> None:
83
84
  timeout = 800
84
85
 
85
86
  # Initialize panels
86
- summary_panel = SummaryPanel(maximize=maximize, metric_name=metric_name, total_steps=steps, model=args.model)
87
+ summary_panel = SummaryPanel(
88
+ maximize=maximize, metric_name=metric_name, total_steps=steps, model=args.model, runs_dir=args.log_dir
89
+ )
87
90
  plan_panel = PlanPanel()
88
91
  solution_panels = SolutionPanels(metric_name=metric_name)
89
92
  eval_output_panel = EvaluationOutputPanel()
@@ -112,7 +115,7 @@ def main() -> None:
112
115
  with Live(layout, refresh_per_second=refresh_rate, screen=True) as live:
113
116
  # Define the runs directory (.runs/<session-id>)
114
117
  session_id = session_response["session_id"]
115
- runs_dir = pathlib.Path(".runs") / session_id
118
+ runs_dir = pathlib.Path(args.log_dir) / session_id
116
119
  runs_dir.mkdir(parents=True, exist_ok=True)
117
120
 
118
121
  # Save the original code (.runs/<session-id>/original.py)
@@ -11,7 +11,7 @@ from .utils import format_number
11
11
  class SummaryPanel:
12
12
  """Holds a summary of the optimization session."""
13
13
 
14
- def __init__(self, maximize: bool, metric_name: str, total_steps: int, model: str, session_id: str = None):
14
+ def __init__(self, maximize: bool, metric_name: str, total_steps: int, model: str, runs_dir: str, session_id: str = None):
15
15
  self.maximize = maximize
16
16
  self.metric_name = metric_name
17
17
  self.goal = ("Maximizing" if self.maximize else "Minimizing") + f" {self.metric_name}..."
@@ -19,7 +19,8 @@ class SummaryPanel:
19
19
  self.total_output_tokens = 0
20
20
  self.total_steps = total_steps
21
21
  self.model = model
22
- self.session_id = session_id or "N/A"
22
+ self.runs_dir = runs_dir
23
+ self.session_id = session_id if session_id is not None else "N/A"
23
24
  self.progress = Progress(
24
25
  TextColumn("[progress.description]{task.description}"),
25
26
  BarColumn(bar_width=20),
@@ -55,8 +56,7 @@ class SummaryPanel:
55
56
  summary_table.add_row(f"[bold cyan]Model:[/] {self.model}")
56
57
  summary_table.add_row("")
57
58
  # Log directory
58
- runs_dir = f".runs/{self.session_id}"
59
- summary_table.add_row(f"[bold cyan]Logs:[/] [blue underline]{runs_dir}[/]")
59
+ summary_table.add_row(f"[bold cyan]Logs:[/] [blue underline]{self.runs_dir}/{self.session_id}[/]")
60
60
  summary_table.add_row("")
61
61
  # Token counts
62
62
  summary_table.add_row(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: weco
3
- Version: 0.2.6
3
+ Version: 0.2.7
4
4
  Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
5
5
  Author-email: Weco AI Team <contact@weco.ai>
6
6
  License: MIT
@@ -109,7 +109,7 @@ weco --source optimize.py \
109
109
  --metric speedup \
110
110
  --maximize true \
111
111
  --steps 15 \
112
- --model claude-3-7-sonnet-20250219 \
112
+ --model gemini-2.5-pro-exp-03-25 \
113
113
  --additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
114
114
  ```
115
115
 
@@ -127,7 +127,7 @@ weco --source optimize.py \
127
127
  --metric speedup \
128
128
  --maximize true \
129
129
  --steps 30 \
130
- --model o3-mini \
130
+ --model gemini-2.5-pro-exp-03-25 \
131
131
  --additional-instructions examples.rst
132
132
  ```
133
133
 
@@ -144,7 +144,7 @@ Given how useful causal multihead self attention is to transformers, we've seen
144
144
  --metric speedup \
145
145
  --maximize true \
146
146
  --steps 30 \
147
- --model gemini-2.5-pro-preview-03-25 \
147
+ --model gemini-2.5-pro-exp-03-25 \
148
148
  --additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
149
149
  ```
150
150
 
@@ -157,10 +157,52 @@ Given how useful causal multihead self attention is to transformers, we've seen
157
157
  --metric speedup \
158
158
  --maximize true \
159
159
  --steps 30 \
160
- --model gemini-2.5-pro-preview-03-25 \
160
+ --model gemini-2.5-pro-exp-03-25 \
161
161
  --additional-instructions guide.md
162
162
  ```
163
163
 
164
+ **Example 4: Optimizing a Classification Model**
165
+
166
+ This example demonstrates optimizing a script for a Kaggle competition ([Spaceship Titanic](https://www.kaggle.com/competitions/spaceship-titanic/overview)) to improve classification accuracy. The additional instructions are provided via a separate file (`examples/spaceship-titanic/README.md`).
167
+
168
+ First, install the requirements for the example environment:
169
+ ```bash
170
+ pip install -r examples/spaceship-titanic/requirements-test.txt
171
+ ```
172
+ And run utility function once to prepare the dataset
173
+ ```bash
174
+ python examples/spaceship-titanic/utils.py
175
+ ```
176
+
177
+ You should see the following structure at `examples/spaceship-titanic`. You need to prepare the kaggle credentials for downloading the dataset.
178
+ ```
179
+ .
180
+ ├── baseline.py
181
+ ├── evaluate.py
182
+ ├── optimize.py
183
+ ├── private
184
+ │ └── test.csv
185
+ ├── public
186
+ │ ├── sample_submission.csv
187
+ │ ├── test.csv
188
+ │ └── train.csv
189
+ ├── README.md
190
+ ├── requirements-test.txt
191
+ └── utils.py
192
+ ```
193
+
194
+ Then, execute the optimization command:
195
+ ```bash
196
+ weco --source examples/spaceship-titanic/optimize.py \
197
+ --eval-command "python examples/spaceship-titanic/optimize.py && python examples/spaceship-titanic/evaluate.py" \
198
+ --metric accuracy \
199
+ --maximize true \
200
+ --steps 10 \
201
+ --model gemini-2.5-pro-exp-03-25 \
202
+ --additional-instructions examples/spaceship-titanic/README.md
203
+ ```
204
+
205
+ *The [baseline.py](examples/spaceship-titanic/baseline.py) is provided as a start point for optimization*
164
206
 
165
207
  ---
166
208
 
@@ -169,16 +211,28 @@ Given how useful causal multihead self attention is to transformers, we've seen
169
211
  | Argument | Description | Required |
170
212
  | :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- |
171
213
  | `--source` | Path to the source code file that will be optimized (e.g., `optimize.py`). | Yes |
172
- | `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
173
- | `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
214
+ | `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
215
+ | `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
174
216
  | `--maximize` | Whether to maximize (`true`) or minimize (`false`) the metric. | Yes |
175
217
  | `--steps` | Number of optimization steps (LLM iterations) to run. | Yes |
176
- | `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`. | Yes |
177
- | `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
218
+ | `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`.| Yes |
219
+ | `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
220
+ | `--log-dir` | (Optional) Path to the directory to log intermediate steps and final optimization result. Defaults to `.runs/`. | No |
178
221
 
179
222
  ---
180
223
 
224
+ ### Performance & Expectations
181
225
 
226
+ Weco, powered by the AIDE algorithm, optimizes code iteratively based on your evaluation results. Achieving significant improvements, especially on complex research-level tasks, often requires substantial exploration time.
227
+
228
+ The following plot from the independent [Research Engineering Benchmark (RE-Bench)](https://metr.org/AI_R_D_Evaluation_Report.pdf) report shows the performance of AIDE (the algorithm behind Weco) on challenging ML research engineering tasks over different time budgets.
229
+ <p align="center">
230
+ <img src="https://github.com/user-attachments/assets/ff0e471d-2f50-4e2d-b718-874862f533df" alt="RE-Bench Performance Across Time" width="60%"/>
231
+ </p>
232
+
233
+ As shown, AIDE demonstrates strong performance gains over time, surpassing lower human expert percentiles within hours and continuing to improve. This highlights the potential of evaluation-driven optimization but also indicates that reaching high levels of performance comparable to human experts on difficult benchmarks can take considerable time (tens of hours in this specific benchmark, corresponding to many `--steps` in the Weco CLI). Factor this into your planning when setting the number of `--steps` for your optimization runs.
234
+
235
+ ---
182
236
 
183
237
  ### Important Note on Evaluation
184
238
 
@@ -12,6 +12,12 @@ examples/hello-kernel-world/optimize.py
12
12
  examples/metal/evaluate.py
13
13
  examples/metal/examples.rst
14
14
  examples/metal/optimize.py
15
+ examples/spaceship-titanic/README.md
16
+ examples/spaceship-titanic/baseline.py
17
+ examples/spaceship-titanic/evaluate.py
18
+ examples/spaceship-titanic/optimize.py
19
+ examples/spaceship-titanic/requirements-test.txt
20
+ examples/spaceship-titanic/utils.py
15
21
  examples/triton/evaluate.py
16
22
  examples/triton/optimize.py
17
23
  weco/__init__.py
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes