weco 0.2.15__tar.gz → 0.2.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {weco-0.2.15 → weco-0.2.17}/PKG-INFO +16 -8
- {weco-0.2.15 → weco-0.2.17}/README.md +15 -7
- weco-0.2.17/assets/example-optimization.gif +0 -0
- {weco-0.2.15 → weco-0.2.17}/examples/cuda/README.md +7 -7
- {weco-0.2.15 → weco-0.2.17}/examples/cuda/evaluate.py +1 -0
- {weco-0.2.15 → weco-0.2.17}/examples/hello-kernel-world/evaluate.py +1 -0
- {weco-0.2.15 → weco-0.2.17}/examples/prompt/README.md +7 -8
- weco-0.2.17/examples/spaceship-titanic/README.md +67 -0
- weco-0.2.17/examples/spaceship-titanic/competition_description.md +93 -0
- weco-0.2.17/examples/spaceship-titanic/evaluate.py +43 -0
- weco-0.2.17/examples/spaceship-titanic/get_data.py +16 -0
- weco-0.2.17/examples/spaceship-titanic/submit.py +14 -0
- {weco-0.2.15 → weco-0.2.17}/examples/triton/README.md +7 -7
- {weco-0.2.15 → weco-0.2.17}/examples/triton/evaluate.py +1 -0
- {weco-0.2.15 → weco-0.2.17}/pyproject.toml +1 -1
- {weco-0.2.15 → weco-0.2.17}/weco/__init__.py +1 -1
- weco-0.2.17/weco/api.py +86 -0
- {weco-0.2.15 → weco-0.2.17}/weco/cli.py +13 -26
- {weco-0.2.15 → weco-0.2.17}/weco/panels.py +15 -10
- {weco-0.2.15 → weco-0.2.17}/weco.egg-info/PKG-INFO +16 -8
- {weco-0.2.15 → weco-0.2.17}/weco.egg-info/SOURCES.txt +4 -7
- weco-0.2.15/examples/metal/README.md +0 -39
- weco-0.2.15/examples/metal/evaluate.py +0 -141
- weco-0.2.15/examples/metal/examples.rst +0 -428
- weco-0.2.15/examples/metal/optimize.py +0 -28
- weco-0.2.15/examples/spaceship-titanic/README.md +0 -62
- weco-0.2.15/examples/spaceship-titanic/baseline.py +0 -27
- weco-0.2.15/examples/spaceship-titanic/evaluate.py +0 -71
- weco-0.2.15/examples/spaceship-titanic/optimize.py +0 -27
- weco-0.2.15/examples/spaceship-titanic/utils.py +0 -56
- weco-0.2.15/weco/api.py +0 -103
- {weco-0.2.15 → weco-0.2.17}/.github/workflows/lint.yml +0 -0
- {weco-0.2.15 → weco-0.2.17}/.github/workflows/release.yml +0 -0
- {weco-0.2.15 → weco-0.2.17}/.gitignore +0 -0
- {weco-0.2.15 → weco-0.2.17}/.repomixignore +0 -0
- {weco-0.2.15 → weco-0.2.17}/LICENSE +0 -0
- {weco-0.2.15 → weco-0.2.17}/examples/cuda/guide.md +0 -0
- {weco-0.2.15 → weco-0.2.17}/examples/cuda/optimize.py +0 -0
- {weco-0.2.15 → weco-0.2.17}/examples/hello-kernel-world/optimize.py +0 -0
- {weco-0.2.15 → weco-0.2.17}/examples/prompt/eval.py +0 -0
- {weco-0.2.15 → weco-0.2.17}/examples/prompt/optimize.py +0 -0
- {weco-0.2.15 → weco-0.2.17}/examples/prompt/prompt_guide.md +0 -0
- {weco-0.2.15 → weco-0.2.17}/examples/spaceship-titanic/requirements-test.txt +0 -0
- {weco-0.2.15 → weco-0.2.17}/examples/triton/optimize.py +0 -0
- {weco-0.2.15 → weco-0.2.17}/setup.cfg +0 -0
- {weco-0.2.15 → weco-0.2.17}/weco/auth.py +0 -0
- {weco-0.2.15 → weco-0.2.17}/weco/utils.py +0 -0
- {weco-0.2.15 → weco-0.2.17}/weco.egg-info/dependency_links.txt +0 -0
- {weco-0.2.15 → weco-0.2.17}/weco.egg-info/entry_points.txt +0 -0
- {weco-0.2.15 → weco-0.2.17}/weco.egg-info/requires.txt +0 -0
- {weco-0.2.15 → weco-0.2.17}/weco.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: weco
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.17
|
|
4
4
|
Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
|
|
5
5
|
Author-email: Weco AI Team <contact@weco.ai>
|
|
6
6
|
License: MIT
|
|
@@ -20,21 +20,30 @@ Requires-Dist: build; extra == "dev"
|
|
|
20
20
|
Requires-Dist: setuptools_scm; extra == "dev"
|
|
21
21
|
Dynamic: license-file
|
|
22
22
|
|
|
23
|
-
|
|
23
|
+
<div align="center">
|
|
24
24
|
|
|
25
|
-
|
|
25
|
+
# Weco: The AI Code Optimizer
|
|
26
|
+
|
|
27
|
+
[](https://www.python.org)
|
|
28
|
+
[](https://docs.weco.ai/)
|
|
26
29
|
[](https://badge.fury.io/py/weco)
|
|
27
30
|
[](https://arxiv.org/abs/2502.13138)
|
|
28
31
|
|
|
32
|
+
<code>pip install weco</code>
|
|
33
|
+
|
|
34
|
+
</div>
|
|
35
|
+
|
|
36
|
+
---
|
|
37
|
+
|
|
29
38
|
Weco systematically optimizes your code, guided directly by your evaluation metrics.
|
|
30
39
|
|
|
31
40
|
Example applications include:
|
|
32
41
|
|
|
33
|
-
- **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA
|
|
42
|
+
- **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA or Triton optimizing for `latency`, `throughput`, or `memory_bandwidth`.
|
|
34
43
|
- **Model Development**: Tune feature transformations or architectures, optimizing for `validation_accuracy`, `AUC`, or `Sharpe Ratio`.
|
|
35
44
|
- **Prompt Engineering**: Refine prompts for LLMs, optimizing for `win_rate`, `relevance`, or `format_adherence`
|
|
36
45
|
|
|
37
|
-
|
|
46
|
+

|
|
38
47
|
|
|
39
48
|
---
|
|
40
49
|
|
|
@@ -42,7 +51,7 @@ https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
|
|
|
42
51
|
|
|
43
52
|
The `weco` CLI leverages a tree search approach guided by Large Language Models (LLMs) to iteratively explore and refine your code. It automatically applies changes, runs your evaluation script, parses the results, and proposes further improvements based on the specified goal.
|
|
44
53
|
|
|
45
|
-
[image](https://github.com/user-attachments/assets/a6ed63fa-9c40-498e-aa98-a873e5786509)
|
|
54
|
+

|
|
46
55
|
|
|
47
56
|
---
|
|
48
57
|
|
|
@@ -101,7 +110,7 @@ This command starts the optimization process.
|
|
|
101
110
|
|
|
102
111
|
This basic example shows how to optimize a simple PyTorch function for speedup.
|
|
103
112
|
|
|
104
|
-
For more advanced examples, including
|
|
113
|
+
For more advanced examples, including [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md), [ML model optimization](/examples/spaceship-titanic/README.md), and [prompt engineering for math problems](https://github.com/WecoAI/weco-cli/tree/main/examples/prompt), please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
|
|
105
114
|
|
|
106
115
|
```bash
|
|
107
116
|
# Navigate to the example directory
|
|
@@ -136,7 +145,6 @@ weco run --source optimize.py \
|
|
|
136
145
|
| `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`. | Yes |
|
|
137
146
|
| `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
|
|
138
147
|
| `--log-dir` | (Optional) Path to the directory to log intermediate steps and final optimization result. Defaults to `.runs/`. | No |
|
|
139
|
-
| `--preserve-source` | (Optional) If set, do not overwrite the original `--source` file. Modifications and the best solution will still be saved in the `--log-dir`. | No |
|
|
140
148
|
|
|
141
149
|
---
|
|
142
150
|
|
|
@@ -1,18 +1,27 @@
|
|
|
1
|
-
|
|
1
|
+
<div align="center">
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
# Weco: The AI Code Optimizer
|
|
4
|
+
|
|
5
|
+
[](https://www.python.org)
|
|
6
|
+
[](https://docs.weco.ai/)
|
|
4
7
|
[](https://badge.fury.io/py/weco)
|
|
5
8
|
[](https://arxiv.org/abs/2502.13138)
|
|
6
9
|
|
|
10
|
+
<code>pip install weco</code>
|
|
11
|
+
|
|
12
|
+
</div>
|
|
13
|
+
|
|
14
|
+
---
|
|
15
|
+
|
|
7
16
|
Weco systematically optimizes your code, guided directly by your evaluation metrics.
|
|
8
17
|
|
|
9
18
|
Example applications include:
|
|
10
19
|
|
|
11
|
-
- **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA
|
|
20
|
+
- **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA or Triton optimizing for `latency`, `throughput`, or `memory_bandwidth`.
|
|
12
21
|
- **Model Development**: Tune feature transformations or architectures, optimizing for `validation_accuracy`, `AUC`, or `Sharpe Ratio`.
|
|
13
22
|
- **Prompt Engineering**: Refine prompts for LLMs, optimizing for `win_rate`, `relevance`, or `format_adherence`
|
|
14
23
|
|
|
15
|
-
|
|
24
|
+

|
|
16
25
|
|
|
17
26
|
---
|
|
18
27
|
|
|
@@ -20,7 +29,7 @@ https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
|
|
|
20
29
|
|
|
21
30
|
The `weco` CLI leverages a tree search approach guided by Large Language Models (LLMs) to iteratively explore and refine your code. It automatically applies changes, runs your evaluation script, parses the results, and proposes further improvements based on the specified goal.
|
|
22
31
|
|
|
23
|
-
[image](https://github.com/user-attachments/assets/a6ed63fa-9c40-498e-aa98-a873e5786509)
|
|
32
|
+

|
|
24
33
|
|
|
25
34
|
---
|
|
26
35
|
|
|
@@ -79,7 +88,7 @@ This command starts the optimization process.
|
|
|
79
88
|
|
|
80
89
|
This basic example shows how to optimize a simple PyTorch function for speedup.
|
|
81
90
|
|
|
82
|
-
For more advanced examples, including
|
|
91
|
+
For more advanced examples, including [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md), [ML model optimization](/examples/spaceship-titanic/README.md), and [prompt engineering for math problems](https://github.com/WecoAI/weco-cli/tree/main/examples/prompt), please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
|
|
83
92
|
|
|
84
93
|
```bash
|
|
85
94
|
# Navigate to the example directory
|
|
@@ -114,7 +123,6 @@ weco run --source optimize.py \
|
|
|
114
123
|
| `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`. | Yes |
|
|
115
124
|
| `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
|
|
116
125
|
| `--log-dir` | (Optional) Path to the directory to log intermediate steps and final optimization result. Defaults to `.runs/`. | No |
|
|
117
|
-
| `--preserve-source` | (Optional) If set, do not overwrite the original `--source` file. Modifications and the best solution will still be saved in the `--log-dir`. | No |
|
|
118
126
|
|
|
119
127
|
---
|
|
120
128
|
|
|
Binary file
|
|
@@ -18,13 +18,13 @@ This example uses a separate Markdown file (`guide.md`) to provide detailed inst
|
|
|
18
18
|
Run the following command to start the optimization process:
|
|
19
19
|
|
|
20
20
|
```bash
|
|
21
|
-
weco --source optimize.py \
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
21
|
+
weco run --source optimize.py \
|
|
22
|
+
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
23
|
+
--metric speedup \
|
|
24
|
+
--maximize true \
|
|
25
|
+
--steps 30 \
|
|
26
|
+
--model gemini-2.5-pro-exp-03-25 \
|
|
27
|
+
--additional-instructions guide.md
|
|
28
28
|
```
|
|
29
29
|
|
|
30
30
|
### Explanation
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# weco-cli/examples/prompt/README.md
|
|
2
1
|
# AIME Prompt Engineering Example with Weco
|
|
3
2
|
|
|
4
3
|
This example shows how **Weco** can iteratively improve a prompt for solving American Invitational Mathematics Examination (AIME) problems. The experiment runs locally, requires only two short Python files, and aims to improve the accuracy metric.
|
|
@@ -34,12 +33,12 @@ This example uses `gpt-4o-mini` via the OpenAI API by default. Ensure your `OPEN
|
|
|
34
33
|
```
|
|
35
34
|
4. **Run Weco.** The command below iteratively modifies `EXTRA_INSTRUCTIONS` in `optimize.py`, runs `eval.py` to evaluate the prompt's effectiveness, reads the printed accuracy, and keeps the best prompt variations found.
|
|
36
35
|
```bash
|
|
37
|
-
weco --source optimize.py \
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
36
|
+
weco run --source optimize.py \
|
|
37
|
+
--eval-command "python eval.py" \
|
|
38
|
+
--metric accuracy \
|
|
39
|
+
--maximize true \
|
|
40
|
+
--steps 40 \
|
|
41
|
+
--model gemini-2.5-pro-exp-03-25
|
|
43
42
|
```
|
|
44
43
|
*Note: You can replace `--model gemini-2.5-pro-exp-03-25` with another powerful model like `o3` if you have the respective API keys set.*
|
|
45
44
|
|
|
@@ -97,4 +96,4 @@ Weco then mutates the config, tries again, and gradually pushes the accuracy hig
|
|
|
97
96
|
* `eval_aime.py` slices the **Maxwell‑Jia/AIME_2024** dataset to twenty problems for fast feedback. You can change the slice in one line.
|
|
98
97
|
* The script sends model calls in parallel via `ThreadPoolExecutor`, so network latency is hidden.
|
|
99
98
|
* Every five completed items, the script logs progress and elapsed time.
|
|
100
|
-
* The final line `accuracy: value` is the only part Weco needs for guidance.
|
|
99
|
+
* The final line `accuracy: value` is the only part Weco needs for guidance.
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
# Example: Optimizing a Kaggle Classification Model (Spaceship Titanic)
|
|
2
|
+
|
|
3
|
+
This example demonstrates using Weco to optimize a Python script designed for the [Spaceship Titanic Kaggle competition](https://www.kaggle.com/competitions/spaceship-titanic/overview). The goal is to improve the model's `accuracy` metric by directly optimizing the evaluate.py
|
|
4
|
+
|
|
5
|
+
## Setup
|
|
6
|
+
|
|
7
|
+
1. Ensure you are in the `examples/spaceship-titanic` directory.
|
|
8
|
+
2. **Kaggle Credentials:** You need your Kaggle API credentials (`kaggle.json`) configured to download the competition dataset. Place the `kaggle.json` file in `~/.kaggle/` or set the `KAGGLE_USERNAME` and `KAGGLE_KEY` environment variables. See [Kaggle API documentation](https://github.com/Kaggle/kaggle-api#api-credentials) for details.
|
|
9
|
+
3. **Install Dependencies:** Install the required Python packages:
|
|
10
|
+
```bash
|
|
11
|
+
pip install -r requirements-test.txt
|
|
12
|
+
```
|
|
13
|
+
4. **Prepare Data:** Run the utility script once to download the dataset from Kaggle and place it in the expected `./data/` subdirectories:
|
|
14
|
+
```bash
|
|
15
|
+
python get_data.py
|
|
16
|
+
```
|
|
17
|
+
After running `get_data.py`, your directory structure should look like this:
|
|
18
|
+
```
|
|
19
|
+
.
|
|
20
|
+
├── competition_description.md
|
|
21
|
+
├── data
|
|
22
|
+
│ ├── sample_submission.csv
|
|
23
|
+
│ ├── test.csv
|
|
24
|
+
│ └── train.csv
|
|
25
|
+
├── evaluate.py
|
|
26
|
+
├── get_data.py
|
|
27
|
+
├── README.md # This file
|
|
28
|
+
├── requirements-test.txt
|
|
29
|
+
└── submit.py
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Optimization Command
|
|
33
|
+
|
|
34
|
+
Run the following command to start optimizing the model:
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
weco run --source evaluate.py \
|
|
38
|
+
--eval-command "python evaluate.py --data-dir ./data" \
|
|
39
|
+
--metric accuracy \
|
|
40
|
+
--maximize true \
|
|
41
|
+
--steps 10 \
|
|
42
|
+
--model gemini-2.5-pro-exp-03-25 \
|
|
43
|
+
--additional-instructions "Improve feature engineering, model choice and hyper-parameters."
|
|
44
|
+
--log-dir .runs/spaceship-titanic
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Submit the solution
|
|
48
|
+
|
|
49
|
+
Once the optimization finished, you can submit your predictions to kaggle to see the results. Make sure `submission.csv` is present and then simply run the following command.
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
python submit.py
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### Explanation
|
|
56
|
+
|
|
57
|
+
* `--source evaluate.py`: The script provides a baseline as root node and directly optimize the evaluate.py
|
|
58
|
+
* `--eval-command "python evaluate.py --data-dir ./data/"`: The weco agent will run the `evaluate.py` and update it.
|
|
59
|
+
* [optional] `--data-dir`: path to the train and test data.
|
|
60
|
+
* [optional] `--seed`: Seed for reproduce the experiment.
|
|
61
|
+
* `--metric accuracy`: The target metric Weco should optimize.
|
|
62
|
+
* `--maximize true`: Weco aims to increase the accuracy.
|
|
63
|
+
* `--steps 10`: The number of optimization iterations.
|
|
64
|
+
* `--model gemini-2.5-pro-exp-03-25`: The LLM driving the optimization.
|
|
65
|
+
* `--additional-instructions "Improve feature engineering, model choice and hyper-parameters."`: A simple instruction for model improvement or you can put the path to [`comptition_description.md`](./competition_description.md) within the repo to feed the agent more detailed information.
|
|
66
|
+
|
|
67
|
+
Weco will iteratively modify the feature engineering or modeling code within `evaluate.py`, run the evaluation pipeline, and use the resulting `accuracy` to guide further improvements.
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
# Overview
|
|
2
|
+
|
|
3
|
+
## Description
|
|
4
|
+
Welcome to the year 2912, where your data science skills are needed to solve a cosmic mystery. We've received a transmission from four lightyears away and things aren't looking good.
|
|
5
|
+
|
|
6
|
+
The *Spaceship Titanic* was an interstellar passenger liner launched a month ago. With almost 13,000 passengers on board, the vessel set out on its maiden voyage transporting emigrants from our solar system to three newly habitable exoplanets orbiting nearby stars.
|
|
7
|
+
|
|
8
|
+
While rounding Alpha Centauri en route to its first destination—the torrid 55 Cancri E—the unwary *Spaceship Titanic* collided with a spacetime anomaly hidden within a dust cloud. Sadly, it met a similar fate as its namesake from 1000 years before. Though the ship stayed intact, almost half of the passengers were transported to an alternate dimension!
|
|
9
|
+
|
|
10
|
+

|
|
11
|
+
|
|
12
|
+
To help rescue crews and retrieve the lost passengers, you are challenged to predict which passengers were transported by the anomaly using records recovered from the spaceship’s damaged computer system.
|
|
13
|
+
|
|
14
|
+
Help save them and change history!
|
|
15
|
+
|
|
16
|
+
### Acknowledgments
|
|
17
|
+
|
|
18
|
+
Photos by [Joel Filipe](https://unsplash.com/@joelfilip?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText), [Richard Gatley](https://unsplash.com/@uncle_rickie?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) and [ActionVance](https://unsplash.com/@actionvance?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash.
|
|
19
|
+
|
|
20
|
+
## Evaluation
|
|
21
|
+
|
|
22
|
+
### Metric
|
|
23
|
+
|
|
24
|
+
Submissions are evaluated based on their [classification accuracy](https://developers.google.com/machine-learning/crash-course/classification/accuracy), the percentage of predicted labels that are correct.
|
|
25
|
+
|
|
26
|
+
### Submission Format
|
|
27
|
+
|
|
28
|
+
The submission format for the competition is a csv file with the following format:
|
|
29
|
+
|
|
30
|
+
```
|
|
31
|
+
PassengerId,Transported
|
|
32
|
+
0013_01,False
|
|
33
|
+
0018_01,False
|
|
34
|
+
0019_01,False
|
|
35
|
+
0021_01,False
|
|
36
|
+
etc.
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## Frequently Asked Questions
|
|
40
|
+
|
|
41
|
+
### What is a Getting Started competition?
|
|
42
|
+
|
|
43
|
+
Getting Started competitions were created by Kaggle data scientists for people who have little to no machine learning background. They are a great place to begin if you are new to data science or just finished a MOOC and want to get involved in Kaggle.
|
|
44
|
+
|
|
45
|
+
Getting Started competitions are a non-competitive way to get familiar with Kaggle’s platform, learn basic machine learning concepts, and start meeting people in the community. They have no cash prize and are on a rolling timeline.
|
|
46
|
+
|
|
47
|
+
### How do I create and manage a team?
|
|
48
|
+
|
|
49
|
+
When you accept the competition rules, a team will be created for you. You can invite others to your team, accept a merger with another team, and update basic information like team name by going to the [Team](https://www.kaggle.com/c/spaceship-titanic/team) page.
|
|
50
|
+
|
|
51
|
+
We've heard from many Kagglers that teaming up is the best way to learn new skills AND have fun. If you don't have a teammate already, consider asking if anyone wants to team up in the [discussion forum](https://www.kaggle.com/c/spaceship-titanic/discussion).
|
|
52
|
+
|
|
53
|
+
### What are Notebooks?
|
|
54
|
+
|
|
55
|
+
Kaggle Notebooks is a cloud computational environment that enables reproducible and collaborative analysis. Notebooks support scripts in Python and R, Jupyter Notebooks, and RMarkdown reports. You can visit the [Notebooks](https://www.kaggle.com/c/spaceship-titanic/notebooks) tab to view all of the publicly shared code for the Spaceship Titanic competition. For more on how to use Notebooks to learn data science, check out our [Courses](https://www.kaggle.com/learn/overview)!
|
|
56
|
+
|
|
57
|
+
### Why did my team disappear from the leaderboard?
|
|
58
|
+
|
|
59
|
+
To keep with the spirit of getting-started competitions, we have implemented a two month rolling window on submissions. Once a submission is more than two months old, it will be invalidated and no longer count towards the leaderboard.
|
|
60
|
+
|
|
61
|
+
If your team has no submissions in the previous two months, the team will also drop from the leaderboard. This will keep the leaderboard at a manageable size, freshen it up, and prevent newcomers from getting lost in a sea of abandoned scores.
|
|
62
|
+
|
|
63
|
+
*"I worked so hard to get that score! Give it back!"* Read more about our decision to implement a rolling leaderboard [here](https://www.kaggle.com/c/titanic/discussion/6240).
|
|
64
|
+
|
|
65
|
+
### How do I contact Support?
|
|
66
|
+
|
|
67
|
+
Kaggle does not have a dedicated support team so you’ll typically find that you receive a response more quickly by asking your question in the appropriate forum. (For this competition, you’ll want to use the [Spaceship Titanic discussion forum](https://www.kaggle.com/c/spaceship-titanic/discussion)).
|
|
68
|
+
|
|
69
|
+
Support is only able to help with issues that are being experienced by all participants. Before contacting support, please check the discussion forum for information on your problem. If you can’t find it, you can post your problem in the forum so a fellow participant or a Kaggle team member can provide help. The forums are full of useful information on the data, metric, and different approaches. We encourage you to use the forums often. If you share your knowledge, you'll find that others will share a lot in turn!
|
|
70
|
+
|
|
71
|
+
If your problem persists or it seems to be effective all participants then please [contact us](https://www.kaggle.com/contact).
|
|
72
|
+
|
|
73
|
+
# Dataset Description
|
|
74
|
+
|
|
75
|
+
In this competition your task is to predict whether a passenger was transported to an alternate dimension during the Spaceship Titanic's collision with the spacetime anomaly. To help you make these predictions, you're given a set of personal records recovered from the ship's damaged computer system.
|
|
76
|
+
|
|
77
|
+
## File and Data Field Descriptions
|
|
78
|
+
|
|
79
|
+
- **train.csv** - Personal records for about two-thirds (~8700) of the passengers, to be used as training data.
|
|
80
|
+
- `PassengerId` - A unique Id for each passenger. Each Id takes the form `gggg_pp` where `gggg` indicates a group the passenger is travelling with and `pp` is their number within the group. People in a group are often family members, but not always.
|
|
81
|
+
- `HomePlanet` - The planet the passenger departed from, typically their planet of permanent residence.
|
|
82
|
+
- `CryoSleep` - Indicates whether the passenger elected to be put into suspended animation for the duration of the voyage. Passengers in cryosleep are confined to their cabins.
|
|
83
|
+
- `Cabin` - The cabin number where the passenger is staying. Takes the form `deck/num/side`, where `side` can be either `P` for *Port* or `S` for *Starboard*.
|
|
84
|
+
- `Destination` - The planet the passenger will be debarking to.
|
|
85
|
+
- `Age` - The age of the passenger.
|
|
86
|
+
- `VIP` - Whether the passenger has paid for special VIP service during the voyage.
|
|
87
|
+
- `RoomService`, `FoodCourt`, `ShoppingMall`, `Spa`, `VRDeck` - Amount the passenger has billed at each of the *Spaceship Titanic*'s many luxury amenities.
|
|
88
|
+
- `Name` - The first and last names of the passenger.
|
|
89
|
+
- `Transported` - Whether the passenger was transported to another dimension. This is the target, the column you are trying to predict.
|
|
90
|
+
- **test.csv** - Personal records for the remaining one-third (~4300) of the passengers, to be used as test data. Your task is to predict the value of `Transported` for the passengers in this set.
|
|
91
|
+
- **sample_submission.csv** - A submission file in the correct format.
|
|
92
|
+
- `PassengerId` - Id for each passenger in the test set.
|
|
93
|
+
- `Transported` - The target. For each passenger, predict either `True` or `False`.
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
import pandas as pd
|
|
4
|
+
from sklearn.model_selection import train_test_split
|
|
5
|
+
from sklearn.dummy import DummyClassifier
|
|
6
|
+
from sklearn.metrics import accuracy_score
|
|
7
|
+
import joblib
|
|
8
|
+
import warnings
|
|
9
|
+
|
|
10
|
+
warnings.filterwarnings("ignore", category=UserWarning) # keep Weco's panel tidy
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def train(df: pd.DataFrame, test_df: pd.DataFrame, random_state: int = 0) -> float:
|
|
14
|
+
train_df, val_df = train_test_split(df, test_size=0.10, random_state=random_state, stratify=df["Transported"])
|
|
15
|
+
|
|
16
|
+
y_train = train_df.pop("Transported")
|
|
17
|
+
y_val = val_df.pop("Transported")
|
|
18
|
+
|
|
19
|
+
model = DummyClassifier(strategy="most_frequent", random_state=random_state)
|
|
20
|
+
model.fit(train_df, y_train)
|
|
21
|
+
preds = model.predict(val_df)
|
|
22
|
+
acc = accuracy_score(y_val, preds)
|
|
23
|
+
|
|
24
|
+
# **Important**: Keep this step!!!
|
|
25
|
+
# Save the model and generate a submission file on test
|
|
26
|
+
joblib.dump(model, "model.joblib")
|
|
27
|
+
test_preds = model.predict(test_df)
|
|
28
|
+
submission_df = pd.DataFrame({"PassengerId": test_df["PassengerId"], "Transported": test_preds.astype(bool)})
|
|
29
|
+
submission_df.to_csv("submission.csv", index=False)
|
|
30
|
+
|
|
31
|
+
return acc
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
if __name__ == "__main__":
|
|
35
|
+
p = argparse.ArgumentParser()
|
|
36
|
+
p.add_argument("--data-dir", type=Path, default=Path("./data/"))
|
|
37
|
+
p.add_argument("--seed", type=int, default=0)
|
|
38
|
+
args = p.parse_args()
|
|
39
|
+
|
|
40
|
+
train_df = pd.read_csv(args.data_dir / "train.csv")
|
|
41
|
+
test_df = pd.read_csv(args.data_dir / "test.csv")
|
|
42
|
+
acc = train(train_df, test_df, random_state=args.seed)
|
|
43
|
+
print(f"accuracy: {acc:.6f}")
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import kaggle
|
|
2
|
+
import zipfile
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def get_data():
|
|
7
|
+
kaggle.api.competition_download_files("spaceship-titanic")
|
|
8
|
+
# unzip the data
|
|
9
|
+
with zipfile.ZipFile("spaceship-titanic.zip", "r") as zip_ref:
|
|
10
|
+
zip_ref.extractall("data")
|
|
11
|
+
# remove the zip file
|
|
12
|
+
os.remove("spaceship-titanic.zip")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
if __name__ == "__main__":
|
|
16
|
+
get_data()
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import kaggle
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def submit_submission(submission_path: Path):
|
|
7
|
+
kaggle.api.competition_submit(submission_path, "My first submission using weco agent", "spaceship-titanic")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
if __name__ == "__main__":
|
|
11
|
+
parser = argparse.ArgumentParser()
|
|
12
|
+
parser.add_argument("--submission-path", "-p", type=Path, default="submission.csv")
|
|
13
|
+
args = parser.parse_args()
|
|
14
|
+
submit_submission(args.submission_path)
|
|
@@ -16,13 +16,13 @@ This example demonstrates using Weco to optimize a causal multi-head self-attent
|
|
|
16
16
|
Run the following command to start the optimization process:
|
|
17
17
|
|
|
18
18
|
```bash
|
|
19
|
-
weco --source optimize.py \
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
19
|
+
weco run --source optimize.py \
|
|
20
|
+
--eval-command "python evaluate.py --solution-path optimize.py" \
|
|
21
|
+
--metric speedup \
|
|
22
|
+
--maximize true \
|
|
23
|
+
--steps 30 \
|
|
24
|
+
--model gemini-2.5-pro-exp-03-25 \
|
|
25
|
+
--additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
|
|
26
26
|
```
|
|
27
27
|
|
|
28
28
|
### Explanation
|
|
@@ -10,7 +10,7 @@ authors = [
|
|
|
10
10
|
]
|
|
11
11
|
description = "Documentation for `weco`, a CLI for using Weco AI's code optimizer."
|
|
12
12
|
readme = "README.md"
|
|
13
|
-
version = "0.2.
|
|
13
|
+
version = "0.2.17"
|
|
14
14
|
license = {text = "MIT"}
|
|
15
15
|
requires-python = ">=3.8"
|
|
16
16
|
dependencies = ["requests", "rich"]
|
weco-0.2.17/weco/api.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from typing import Dict, Any
|
|
2
|
+
import rich
|
|
3
|
+
import requests
|
|
4
|
+
from weco import __pkg_version__, __base_url__
|
|
5
|
+
import sys
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def handle_api_error(e: requests.exceptions.HTTPError, console: rich.console.Console) -> None:
|
|
9
|
+
"""Extract and display error messages from API responses in a structured format."""
|
|
10
|
+
console.print(f"[bold red]{e.response.json()['detail']}[/]")
|
|
11
|
+
sys.exit(1)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def start_optimization_session(
|
|
15
|
+
console: rich.console.Console,
|
|
16
|
+
source_code: str,
|
|
17
|
+
evaluation_command: str,
|
|
18
|
+
metric_name: str,
|
|
19
|
+
maximize: bool,
|
|
20
|
+
steps: int,
|
|
21
|
+
code_generator_config: Dict[str, Any],
|
|
22
|
+
evaluator_config: Dict[str, Any],
|
|
23
|
+
search_policy_config: Dict[str, Any],
|
|
24
|
+
additional_instructions: str = None,
|
|
25
|
+
api_keys: Dict[str, Any] = {},
|
|
26
|
+
auth_headers: dict = {}, # Add auth_headers
|
|
27
|
+
timeout: int = 800,
|
|
28
|
+
) -> Dict[str, Any]:
|
|
29
|
+
"""Start the optimization session."""
|
|
30
|
+
with console.status("[bold green]Starting Optimization..."):
|
|
31
|
+
response = requests.post(
|
|
32
|
+
f"{__base_url__}/sessions", # Path is relative to base_url
|
|
33
|
+
json={
|
|
34
|
+
"source_code": source_code,
|
|
35
|
+
"additional_instructions": additional_instructions,
|
|
36
|
+
"objective": {"evaluation_command": evaluation_command, "metric_name": metric_name, "maximize": maximize},
|
|
37
|
+
"optimizer": {
|
|
38
|
+
"steps": steps,
|
|
39
|
+
"code_generator": code_generator_config,
|
|
40
|
+
"evaluator": evaluator_config,
|
|
41
|
+
"search_policy": search_policy_config,
|
|
42
|
+
},
|
|
43
|
+
"metadata": {"client_name": "cli", "client_version": __pkg_version__, **api_keys},
|
|
44
|
+
},
|
|
45
|
+
headers=auth_headers, # Add headers
|
|
46
|
+
timeout=timeout,
|
|
47
|
+
)
|
|
48
|
+
response.raise_for_status()
|
|
49
|
+
return response.json()
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def evaluate_feedback_then_suggest_next_solution(
|
|
53
|
+
session_id: str,
|
|
54
|
+
execution_output: str,
|
|
55
|
+
additional_instructions: str = None,
|
|
56
|
+
api_keys: Dict[str, Any] = {},
|
|
57
|
+
auth_headers: dict = {}, # Add auth_headers
|
|
58
|
+
timeout: int = 800,
|
|
59
|
+
) -> Dict[str, Any]:
|
|
60
|
+
"""Evaluate the feedback and suggest the next solution."""
|
|
61
|
+
response = requests.post(
|
|
62
|
+
f"{__base_url__}/sessions/{session_id}/suggest", # Path is relative to base_url
|
|
63
|
+
json={
|
|
64
|
+
"execution_output": execution_output,
|
|
65
|
+
"additional_instructions": additional_instructions,
|
|
66
|
+
"metadata": {**api_keys},
|
|
67
|
+
},
|
|
68
|
+
headers=auth_headers, # Add headers
|
|
69
|
+
timeout=timeout,
|
|
70
|
+
)
|
|
71
|
+
response.raise_for_status()
|
|
72
|
+
return response.json()
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def get_optimization_session_status(
|
|
76
|
+
session_id: str, include_history: bool = False, auth_headers: dict = {}, timeout: int = 800
|
|
77
|
+
) -> Dict[str, Any]:
|
|
78
|
+
"""Get the current status of the optimization session."""
|
|
79
|
+
response = requests.get(
|
|
80
|
+
f"{__base_url__}/sessions/{session_id}", # Path is relative to base_url
|
|
81
|
+
params={"include_history": include_history},
|
|
82
|
+
headers=auth_headers,
|
|
83
|
+
timeout=timeout,
|
|
84
|
+
)
|
|
85
|
+
response.raise_for_status()
|
|
86
|
+
return response.json()
|