weco 0.2.15__tar.gz → 0.2.16__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. {weco-0.2.15 → weco-0.2.16}/.github/workflows/release.yml +2 -0
  2. {weco-0.2.15 → weco-0.2.16}/PKG-INFO +6 -6
  3. {weco-0.2.15 → weco-0.2.16}/README.md +5 -5
  4. weco-0.2.16/assets/example-optimization.gif +0 -0
  5. {weco-0.2.15 → weco-0.2.16}/examples/cuda/README.md +7 -7
  6. {weco-0.2.15 → weco-0.2.16}/examples/cuda/evaluate.py +1 -0
  7. {weco-0.2.15 → weco-0.2.16}/examples/hello-kernel-world/evaluate.py +1 -0
  8. {weco-0.2.15 → weco-0.2.16}/examples/prompt/README.md +6 -6
  9. weco-0.2.16/examples/spaceship-titanic/README.md +67 -0
  10. weco-0.2.16/examples/spaceship-titanic/competition_description.md +93 -0
  11. weco-0.2.16/examples/spaceship-titanic/evaluate.py +43 -0
  12. weco-0.2.16/examples/spaceship-titanic/get_data.py +16 -0
  13. weco-0.2.16/examples/spaceship-titanic/submit.py +14 -0
  14. {weco-0.2.15 → weco-0.2.16}/examples/triton/README.md +7 -7
  15. {weco-0.2.15 → weco-0.2.16}/examples/triton/evaluate.py +1 -0
  16. {weco-0.2.15 → weco-0.2.16}/pyproject.toml +1 -1
  17. {weco-0.2.15 → weco-0.2.16}/weco/__init__.py +1 -1
  18. weco-0.2.16/weco/api.py +86 -0
  19. {weco-0.2.15 → weco-0.2.16}/weco/cli.py +9 -14
  20. {weco-0.2.15 → weco-0.2.16}/weco/panels.py +15 -10
  21. {weco-0.2.15 → weco-0.2.16}/weco.egg-info/PKG-INFO +6 -6
  22. {weco-0.2.15 → weco-0.2.16}/weco.egg-info/SOURCES.txt +4 -7
  23. weco-0.2.15/examples/metal/README.md +0 -39
  24. weco-0.2.15/examples/metal/evaluate.py +0 -141
  25. weco-0.2.15/examples/metal/examples.rst +0 -428
  26. weco-0.2.15/examples/metal/optimize.py +0 -28
  27. weco-0.2.15/examples/spaceship-titanic/README.md +0 -62
  28. weco-0.2.15/examples/spaceship-titanic/baseline.py +0 -27
  29. weco-0.2.15/examples/spaceship-titanic/evaluate.py +0 -71
  30. weco-0.2.15/examples/spaceship-titanic/optimize.py +0 -27
  31. weco-0.2.15/examples/spaceship-titanic/utils.py +0 -56
  32. weco-0.2.15/weco/api.py +0 -103
  33. {weco-0.2.15 → weco-0.2.16}/.github/workflows/lint.yml +0 -0
  34. {weco-0.2.15 → weco-0.2.16}/.gitignore +0 -0
  35. {weco-0.2.15 → weco-0.2.16}/.repomixignore +0 -0
  36. {weco-0.2.15 → weco-0.2.16}/LICENSE +0 -0
  37. {weco-0.2.15 → weco-0.2.16}/examples/cuda/guide.md +0 -0
  38. {weco-0.2.15 → weco-0.2.16}/examples/cuda/optimize.py +0 -0
  39. {weco-0.2.15 → weco-0.2.16}/examples/hello-kernel-world/optimize.py +0 -0
  40. {weco-0.2.15 → weco-0.2.16}/examples/prompt/eval.py +0 -0
  41. {weco-0.2.15 → weco-0.2.16}/examples/prompt/optimize.py +0 -0
  42. {weco-0.2.15 → weco-0.2.16}/examples/prompt/prompt_guide.md +0 -0
  43. {weco-0.2.15 → weco-0.2.16}/examples/spaceship-titanic/requirements-test.txt +0 -0
  44. {weco-0.2.15 → weco-0.2.16}/examples/triton/optimize.py +0 -0
  45. {weco-0.2.15 → weco-0.2.16}/setup.cfg +0 -0
  46. {weco-0.2.15 → weco-0.2.16}/weco/auth.py +0 -0
  47. {weco-0.2.15 → weco-0.2.16}/weco/utils.py +0 -0
  48. {weco-0.2.15 → weco-0.2.16}/weco.egg-info/dependency_links.txt +0 -0
  49. {weco-0.2.15 → weco-0.2.16}/weco.egg-info/entry_points.txt +0 -0
  50. {weco-0.2.15 → weco-0.2.16}/weco.egg-info/requires.txt +0 -0
  51. {weco-0.2.15 → weco-0.2.16}/weco.egg-info/top_level.txt +0 -0
@@ -43,6 +43,8 @@ jobs:
43
43
  OLD_VERSION=""
44
44
  fi
45
45
 
46
+ OLD_VERSION="0.2.15"
47
+
46
48
  echo "Previous version: $OLD_VERSION"
47
49
  echo "Current version: $NEW_VERSION"
48
50
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: weco
3
- Version: 0.2.15
3
+ Version: 0.2.16
4
4
  Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
5
5
  Author-email: Weco AI Team <contact@weco.ai>
6
6
  License: MIT
@@ -20,7 +20,7 @@ Requires-Dist: build; extra == "dev"
20
20
  Requires-Dist: setuptools_scm; extra == "dev"
21
21
  Dynamic: license-file
22
22
 
23
- # Weco: The Evaluation-Driven AI Code Optimizer
23
+ # Weco: The AI Research Engineer
24
24
 
25
25
  [![Python](https://img.shields.io/badge/Python-3.12.0-blue)](https://www.python.org)
26
26
  [![PyPI version](https://badge.fury.io/py/weco.svg)](https://badge.fury.io/py/weco)
@@ -30,11 +30,11 @@ Weco systematically optimizes your code, guided directly by your evaluation metr
30
30
 
31
31
  Example applications include:
32
32
 
33
- - **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA, Triton or Metal, optimizing for `latency`, `throughput`, or `memory_bandwidth`.
33
+ - **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA or Triton optimizing for `latency`, `throughput`, or `memory_bandwidth`.
34
34
  - **Model Development**: Tune feature transformations or architectures, optimizing for `validation_accuracy`, `AUC`, or `Sharpe Ratio`.
35
35
  - **Prompt Engineering**: Refine prompts for LLMs, optimizing for `win_rate`, `relevance`, or `format_adherence`
36
36
 
37
- https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
37
+ ![image](assets/example-optimization.gif)
38
38
 
39
39
  ---
40
40
 
@@ -42,7 +42,7 @@ https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
42
42
 
43
43
  The `weco` CLI leverages a tree search approach guided by Large Language Models (LLMs) to iteratively explore and refine your code. It automatically applies changes, runs your evaluation script, parses the results, and proposes further improvements based on the specified goal.
44
44
 
45
- [image](https://github.com/user-attachments/assets/a6ed63fa-9c40-498e-aa98-a873e5786509)
45
+ ![image](https://github.com/user-attachments/assets/a6ed63fa-9c40-498e-aa98-a873e5786509)
46
46
 
47
47
  ---
48
48
 
@@ -101,7 +101,7 @@ This command starts the optimization process.
101
101
 
102
102
  This basic example shows how to optimize a simple PyTorch function for speedup.
103
103
 
104
- For more advanced examples, including **[Metal/MLX](/examples/metal/README.md), [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md)**, and **[ML model optimization](/examples/spaceship-titanic/README.md)**, please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
104
+ For more advanced examples, including [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md)**, and **[ML model optimization](/examples/spaceship-titanic/README.md)**, please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
105
105
 
106
106
  ```bash
107
107
  # Navigate to the example directory
@@ -1,4 +1,4 @@
1
- # Weco: The Evaluation-Driven AI Code Optimizer
1
+ # Weco: The AI Research Engineer
2
2
 
3
3
  [![Python](https://img.shields.io/badge/Python-3.12.0-blue)](https://www.python.org)
4
4
  [![PyPI version](https://badge.fury.io/py/weco.svg)](https://badge.fury.io/py/weco)
@@ -8,11 +8,11 @@ Weco systematically optimizes your code, guided directly by your evaluation metr
8
8
 
9
9
  Example applications include:
10
10
 
11
- - **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA, Triton or Metal, optimizing for `latency`, `throughput`, or `memory_bandwidth`.
11
+ - **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA or Triton optimizing for `latency`, `throughput`, or `memory_bandwidth`.
12
12
  - **Model Development**: Tune feature transformations or architectures, optimizing for `validation_accuracy`, `AUC`, or `Sharpe Ratio`.
13
13
  - **Prompt Engineering**: Refine prompts for LLMs, optimizing for `win_rate`, `relevance`, or `format_adherence`
14
14
 
15
- https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
15
+ ![image](assets/example-optimization.gif)
16
16
 
17
17
  ---
18
18
 
@@ -20,7 +20,7 @@ https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
20
20
 
21
21
  The `weco` CLI leverages a tree search approach guided by Large Language Models (LLMs) to iteratively explore and refine your code. It automatically applies changes, runs your evaluation script, parses the results, and proposes further improvements based on the specified goal.
22
22
 
23
- [image](https://github.com/user-attachments/assets/a6ed63fa-9c40-498e-aa98-a873e5786509)
23
+ ![image](https://github.com/user-attachments/assets/a6ed63fa-9c40-498e-aa98-a873e5786509)
24
24
 
25
25
  ---
26
26
 
@@ -79,7 +79,7 @@ This command starts the optimization process.
79
79
 
80
80
  This basic example shows how to optimize a simple PyTorch function for speedup.
81
81
 
82
- For more advanced examples, including **[Metal/MLX](/examples/metal/README.md), [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md)**, and **[ML model optimization](/examples/spaceship-titanic/README.md)**, please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
82
+ For more advanced examples, including [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md)**, and **[ML model optimization](/examples/spaceship-titanic/README.md)**, please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
83
83
 
84
84
  ```bash
85
85
  # Navigate to the example directory
@@ -18,13 +18,13 @@ This example uses a separate Markdown file (`guide.md`) to provide detailed inst
18
18
  Run the following command to start the optimization process:
19
19
 
20
20
  ```bash
21
- weco --source optimize.py \
22
- --eval-command "python evaluate.py --solution-path optimize.py" \
23
- --metric speedup \
24
- --maximize true \
25
- --steps 30 \
26
- --model gemini-2.5-pro-exp-03-25 \
27
- --additional-instructions guide.md
21
+ weco run --source optimize.py \
22
+ --eval-command "python evaluate.py --solution-path optimize.py" \
23
+ --metric speedup \
24
+ --maximize true \
25
+ --steps 30 \
26
+ --model gemini-2.5-pro-exp-03-25 \
27
+ --additional-instructions guide.md
28
28
  ```
29
29
 
30
30
  ### Explanation
@@ -2,6 +2,7 @@ import sys
2
2
  import os
3
3
  import pathlib
4
4
  import importlib
5
+ import importlib.util
5
6
  import traceback
6
7
  import torch
7
8
  import torch.nn as nn
@@ -3,6 +3,7 @@ import sys
3
3
  import os
4
4
  import pathlib
5
5
  import importlib
6
+ import importlib.util
6
7
  import traceback
7
8
  import torch
8
9
  import torch.nn as nn
@@ -34,12 +34,12 @@ This example uses `gpt-4o-mini` via the OpenAI API by default. Ensure your `OPEN
34
34
  ```
35
35
  4. **Run Weco.** The command below iteratively modifies `EXTRA_INSTRUCTIONS` in `optimize.py`, runs `eval.py` to evaluate the prompt's effectiveness, reads the printed accuracy, and keeps the best prompt variations found.
36
36
  ```bash
37
- weco --source optimize.py \
38
- --eval-command "python eval.py" \
39
- --metric accuracy \
40
- --maximize true \
41
- --steps 40 \
42
- --model gemini-2.5-pro-exp-03-25
37
+ weco run --source optimize.py \
38
+ --eval-command "python eval.py" \
39
+ --metric accuracy \
40
+ --maximize true \
41
+ --steps 40 \
42
+ --model gemini-2.5-pro-exp-03-25
43
43
  ```
44
44
  *Note: You can replace `--model gemini-2.5-pro-exp-03-25` with another powerful model like `o3` if you have the respective API keys set.*
45
45
 
@@ -0,0 +1,67 @@
1
+ # Example: Optimizing a Kaggle Classification Model (Spaceship Titanic)
2
+
3
+ This example demonstrates using Weco to optimize a Python script designed for the [Spaceship Titanic Kaggle competition](https://www.kaggle.com/competitions/spaceship-titanic/overview). The goal is to improve the model's `accuracy` metric by directly optimizing the evaluate.py
4
+
5
+ ## Setup
6
+
7
+ 1. Ensure you are in the `examples/spaceship-titanic` directory.
8
+ 2. **Kaggle Credentials:** You need your Kaggle API credentials (`kaggle.json`) configured to download the competition dataset. Place the `kaggle.json` file in `~/.kaggle/` or set the `KAGGLE_USERNAME` and `KAGGLE_KEY` environment variables. See [Kaggle API documentation](https://github.com/Kaggle/kaggle-api#api-credentials) for details.
9
+ 3. **Install Dependencies:** Install the required Python packages:
10
+ ```bash
11
+ pip install -r requirements-test.txt
12
+ ```
13
+ 4. **Prepare Data:** Run the utility script once to download the dataset from Kaggle and place it in the expected `./data/` subdirectories:
14
+ ```bash
15
+ python get_data.py
16
+ ```
17
+ After running `get_data.py`, your directory structure should look like this:
18
+ ```
19
+ .
20
+ ├── competition_description.md
21
+ ├── data
22
+ │ ├── sample_submission.csv
23
+ │ ├── test.csv
24
+ │ └── train.csv
25
+ ├── evaluate.py
26
+ ├── get_data.py
27
+ ├── README.md # This file
28
+ ├── requirements-test.txt
29
+ └── submit.py
30
+ ```
31
+
32
+ ## Optimization Command
33
+
34
+ Run the following command to start optimizing the model:
35
+
36
+ ```bash
37
+ weco run --source evaluate.py \
38
+ --eval-command "python evaluate.py --data-dir ./data" \
39
+ --metric accuracy \
40
+ --maximize true \
41
+ --steps 10 \
42
+ --model gemini-2.5-pro-exp-03-25 \
43
+ --additional-instructions "Improve feature engineering, model choice and hyper-parameters."
44
+ --log-dir .runs/spaceship-titanic
45
+ ```
46
+
47
+ ## Submit the solution
48
+
49
+ Once the optimization finished, you can submit your predictions to kaggle to see the results. Make sure `submission.csv` is present and then simply run the following command.
50
+
51
+ ```bash
52
+ python submit.py
53
+ ```
54
+
55
+ ### Explanation
56
+
57
+ * `--source evaluate.py`: The script provides a baseline as root node and directly optimize the evaluate.py
58
+ * `--eval-command "python evaluate.py --data-dir ./data/"`: The weco agent will run the `evaluate.py` and update it.
59
+ * [optional] `--data-dir`: path to the train and test data.
60
+ * [optional] `--seed`: Seed for reproduce the experiment.
61
+ * `--metric accuracy`: The target metric Weco should optimize.
62
+ * `--maximize true`: Weco aims to increase the accuracy.
63
+ * `--steps 10`: The number of optimization iterations.
64
+ * `--model gemini-2.5-pro-exp-03-25`: The LLM driving the optimization.
65
+ * `--additional-instructions "Improve feature engineering, model choice and hyper-parameters."`: A simple instruction for model improvement or you can put the path to [`comptition_description.md`](./competition_description.md) within the repo to feed the agent more detailed information.
66
+
67
+ Weco will iteratively modify the feature engineering or modeling code within `evaluate.py`, run the evaluation pipeline, and use the resulting `accuracy` to guide further improvements.
@@ -0,0 +1,93 @@
1
+ # Overview
2
+
3
+ ## Description
4
+ Welcome to the year 2912, where your data science skills are needed to solve a cosmic mystery. We've received a transmission from four lightyears away and things aren't looking good.
5
+
6
+ The *Spaceship Titanic* was an interstellar passenger liner launched a month ago. With almost 13,000 passengers on board, the vessel set out on its maiden voyage transporting emigrants from our solar system to three newly habitable exoplanets orbiting nearby stars.
7
+
8
+ While rounding Alpha Centauri en route to its first destination—the torrid 55 Cancri E—the unwary *Spaceship Titanic* collided with a spacetime anomaly hidden within a dust cloud. Sadly, it met a similar fate as its namesake from 1000 years before. Though the ship stayed intact, almost half of the passengers were transported to an alternate dimension!
9
+
10
+ ![joel-filipe-QwoNAhbmLLo-unsplash.jpg](https://storage.googleapis.com/kaggle-media/competitions/Spaceship%20Titanic/joel-filipe-QwoNAhbmLLo-unsplash.jpg)
11
+
12
+ To help rescue crews and retrieve the lost passengers, you are challenged to predict which passengers were transported by the anomaly using records recovered from the spaceship’s damaged computer system.
13
+
14
+ Help save them and change history!
15
+
16
+ ### Acknowledgments
17
+
18
+ Photos by [Joel Filipe](https://unsplash.com/@joelfilip?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText), [Richard Gatley](https://unsplash.com/@uncle_rickie?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) and [ActionVance](https://unsplash.com/@actionvance?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash.
19
+
20
+ ## Evaluation
21
+
22
+ ### Metric
23
+
24
+ Submissions are evaluated based on their [classification accuracy](https://developers.google.com/machine-learning/crash-course/classification/accuracy), the percentage of predicted labels that are correct.
25
+
26
+ ### Submission Format
27
+
28
+ The submission format for the competition is a csv file with the following format:
29
+
30
+ ```
31
+ PassengerId,Transported
32
+ 0013_01,False
33
+ 0018_01,False
34
+ 0019_01,False
35
+ 0021_01,False
36
+ etc.
37
+ ```
38
+
39
+ ## Frequently Asked Questions
40
+
41
+ ### What is a Getting Started competition?
42
+
43
+ Getting Started competitions were created by Kaggle data scientists for people who have little to no machine learning background. They are a great place to begin if you are new to data science or just finished a MOOC and want to get involved in Kaggle.
44
+
45
+ Getting Started competitions are a non-competitive way to get familiar with Kaggle’s platform, learn basic machine learning concepts, and start meeting people in the community. They have no cash prize and are on a rolling timeline.
46
+
47
+ ### How do I create and manage a team?
48
+
49
+ When you accept the competition rules, a team will be created for you. You can invite others to your team, accept a merger with another team, and update basic information like team name by going to the [Team](https://www.kaggle.com/c/spaceship-titanic/team) page.
50
+
51
+ We've heard from many Kagglers that teaming up is the best way to learn new skills AND have fun. If you don't have a teammate already, consider asking if anyone wants to team up in the [discussion forum](https://www.kaggle.com/c/spaceship-titanic/discussion).
52
+
53
+ ### What are Notebooks?
54
+
55
+ Kaggle Notebooks is a cloud computational environment that enables reproducible and collaborative analysis. Notebooks support scripts in Python and R, Jupyter Notebooks, and RMarkdown reports. You can visit the [Notebooks](https://www.kaggle.com/c/spaceship-titanic/notebooks) tab to view all of the publicly shared code for the Spaceship Titanic competition. For more on how to use Notebooks to learn data science, check out our [Courses](https://www.kaggle.com/learn/overview)!
56
+
57
+ ### Why did my team disappear from the leaderboard?
58
+
59
+ To keep with the spirit of getting-started competitions, we have implemented a two month rolling window on submissions. Once a submission is more than two months old, it will be invalidated and no longer count towards the leaderboard.
60
+
61
+ If your team has no submissions in the previous two months, the team will also drop from the leaderboard. This will keep the leaderboard at a manageable size, freshen it up, and prevent newcomers from getting lost in a sea of abandoned scores.
62
+
63
+ *"I worked so hard to get that score! Give it back!"* Read more about our decision to implement a rolling leaderboard [here](https://www.kaggle.com/c/titanic/discussion/6240).
64
+
65
+ ### How do I contact Support?
66
+
67
+ Kaggle does not have a dedicated support team so you’ll typically find that you receive a response more quickly by asking your question in the appropriate forum. (For this competition, you’ll want to use the [Spaceship Titanic discussion forum](https://www.kaggle.com/c/spaceship-titanic/discussion)).
68
+
69
+ Support is only able to help with issues that are being experienced by all participants. Before contacting support, please check the discussion forum for information on your problem. If you can’t find it, you can post your problem in the forum so a fellow participant or a Kaggle team member can provide help. The forums are full of useful information on the data, metric, and different approaches. We encourage you to use the forums often. If you share your knowledge, you'll find that others will share a lot in turn!
70
+
71
+ If your problem persists or it seems to be effective all participants then please [contact us](https://www.kaggle.com/contact).
72
+
73
+ # Dataset Description
74
+
75
+ In this competition your task is to predict whether a passenger was transported to an alternate dimension during the Spaceship Titanic's collision with the spacetime anomaly. To help you make these predictions, you're given a set of personal records recovered from the ship's damaged computer system.
76
+
77
+ ## File and Data Field Descriptions
78
+
79
+ - **train.csv** - Personal records for about two-thirds (~8700) of the passengers, to be used as training data.
80
+ - `PassengerId` - A unique Id for each passenger. Each Id takes the form `gggg_pp` where `gggg` indicates a group the passenger is travelling with and `pp` is their number within the group. People in a group are often family members, but not always.
81
+ - `HomePlanet` - The planet the passenger departed from, typically their planet of permanent residence.
82
+ - `CryoSleep` - Indicates whether the passenger elected to be put into suspended animation for the duration of the voyage. Passengers in cryosleep are confined to their cabins.
83
+ - `Cabin` - The cabin number where the passenger is staying. Takes the form `deck/num/side`, where `side` can be either `P` for *Port* or `S` for *Starboard*.
84
+ - `Destination` - The planet the passenger will be debarking to.
85
+ - `Age` - The age of the passenger.
86
+ - `VIP` - Whether the passenger has paid for special VIP service during the voyage.
87
+ - `RoomService`, `FoodCourt`, `ShoppingMall`, `Spa`, `VRDeck` - Amount the passenger has billed at each of the *Spaceship Titanic*'s many luxury amenities.
88
+ - `Name` - The first and last names of the passenger.
89
+ - `Transported` - Whether the passenger was transported to another dimension. This is the target, the column you are trying to predict.
90
+ - **test.csv** - Personal records for the remaining one-third (~4300) of the passengers, to be used as test data. Your task is to predict the value of `Transported` for the passengers in this set.
91
+ - **sample_submission.csv** - A submission file in the correct format.
92
+ - `PassengerId` - Id for each passenger in the test set.
93
+ - `Transported` - The target. For each passenger, predict either `True` or `False`.
@@ -0,0 +1,43 @@
1
+ import argparse
2
+ from pathlib import Path
3
+ import pandas as pd
4
+ from sklearn.model_selection import train_test_split
5
+ from sklearn.dummy import DummyClassifier
6
+ from sklearn.metrics import accuracy_score
7
+ import joblib
8
+ import warnings
9
+
10
+ warnings.filterwarnings("ignore", category=UserWarning) # keep Weco's panel tidy
11
+
12
+
13
+ def train(df: pd.DataFrame, test_df: pd.DataFrame, random_state: int = 0) -> float:
14
+ train_df, val_df = train_test_split(df, test_size=0.10, random_state=random_state, stratify=df["Transported"])
15
+
16
+ y_train = train_df.pop("Transported")
17
+ y_val = val_df.pop("Transported")
18
+
19
+ model = DummyClassifier(strategy="most_frequent", random_state=random_state)
20
+ model.fit(train_df, y_train)
21
+ preds = model.predict(val_df)
22
+ acc = accuracy_score(y_val, preds)
23
+
24
+ # **Important**: Keep this step!!!
25
+ # Save the model and generate a submission file on test
26
+ joblib.dump(model, "model.joblib")
27
+ test_preds = model.predict(test_df)
28
+ submission_df = pd.DataFrame({"PassengerId": test_df["PassengerId"], "Transported": test_preds.astype(bool)})
29
+ submission_df.to_csv("submission.csv", index=False)
30
+
31
+ return acc
32
+
33
+
34
+ if __name__ == "__main__":
35
+ p = argparse.ArgumentParser()
36
+ p.add_argument("--data-dir", type=Path, default=Path("./data/"))
37
+ p.add_argument("--seed", type=int, default=0)
38
+ args = p.parse_args()
39
+
40
+ train_df = pd.read_csv(args.data_dir / "train.csv")
41
+ test_df = pd.read_csv(args.data_dir / "test.csv")
42
+ acc = train(train_df, test_df, random_state=args.seed)
43
+ print(f"accuracy: {acc:.6f}")
@@ -0,0 +1,16 @@
1
+ import kaggle
2
+ import zipfile
3
+ import os
4
+
5
+
6
+ def get_data():
7
+ kaggle.api.competition_download_files("spaceship-titanic")
8
+ # unzip the data
9
+ with zipfile.ZipFile("spaceship-titanic.zip", "r") as zip_ref:
10
+ zip_ref.extractall("data")
11
+ # remove the zip file
12
+ os.remove("spaceship-titanic.zip")
13
+
14
+
15
+ if __name__ == "__main__":
16
+ get_data()
@@ -0,0 +1,14 @@
1
+ import argparse
2
+ import kaggle
3
+ from pathlib import Path
4
+
5
+
6
+ def submit_submission(submission_path: Path):
7
+ kaggle.api.competition_submit(submission_path, "My first submission using weco agent", "spaceship-titanic")
8
+
9
+
10
+ if __name__ == "__main__":
11
+ parser = argparse.ArgumentParser()
12
+ parser.add_argument("--submission-path", "-p", type=Path, default="submission.csv")
13
+ args = parser.parse_args()
14
+ submit_submission(args.submission_path)
@@ -16,13 +16,13 @@ This example demonstrates using Weco to optimize a causal multi-head self-attent
16
16
  Run the following command to start the optimization process:
17
17
 
18
18
  ```bash
19
- weco --source optimize.py \
20
- --eval-command "python evaluate.py --solution-path optimize.py" \
21
- --metric speedup \
22
- --maximize true \
23
- --steps 30 \
24
- --model gemini-2.5-pro-exp-03-25 \
25
- --additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
19
+ weco run --source optimize.py \
20
+ --eval-command "python evaluate.py --solution-path optimize.py" \
21
+ --metric speedup \
22
+ --maximize true \
23
+ --steps 30 \
24
+ --model gemini-2.5-pro-exp-03-25 \
25
+ --additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
26
26
  ```
27
27
 
28
28
  ### Explanation
@@ -1,6 +1,7 @@
1
1
  import sys
2
2
  import pathlib
3
3
  import importlib
4
+ import importlib.util
4
5
  import traceback
5
6
  import torch
6
7
  import torch.nn as nn
@@ -10,7 +10,7 @@ authors = [
10
10
  ]
11
11
  description = "Documentation for `weco`, a CLI for using Weco AI's code optimizer."
12
12
  readme = "README.md"
13
- version = "0.2.15"
13
+ version = "0.2.16"
14
14
  license = {text = "MIT"}
15
15
  requires-python = ">=3.8"
16
16
  dependencies = ["requests", "rich"]
@@ -1,7 +1,7 @@
1
1
  import os
2
2
 
3
3
  # DO NOT EDIT
4
- __pkg_version__ = "0.2.15"
4
+ __pkg_version__ = "0.2.16"
5
5
  __api_version__ = "v1"
6
6
 
7
7
  __base_url__ = f"https://api.weco.ai/{__api_version__}"
@@ -0,0 +1,86 @@
1
+ from typing import Dict, Any
2
+ import rich
3
+ import requests
4
+ from weco import __pkg_version__, __base_url__
5
+ import sys
6
+
7
+
8
+ def handle_api_error(e: requests.exceptions.HTTPError, console: rich.console.Console) -> None:
9
+ """Extract and display error messages from API responses in a structured format."""
10
+ console.print(f"[bold red]{e.response.json()['detail']}[/]")
11
+ sys.exit(1)
12
+
13
+
14
+ def start_optimization_session(
15
+ console: rich.console.Console,
16
+ source_code: str,
17
+ evaluation_command: str,
18
+ metric_name: str,
19
+ maximize: bool,
20
+ steps: int,
21
+ code_generator_config: Dict[str, Any],
22
+ evaluator_config: Dict[str, Any],
23
+ search_policy_config: Dict[str, Any],
24
+ additional_instructions: str = None,
25
+ api_keys: Dict[str, Any] = {},
26
+ auth_headers: dict = {}, # Add auth_headers
27
+ timeout: int = 800,
28
+ ) -> Dict[str, Any]:
29
+ """Start the optimization session."""
30
+ with console.status("[bold green]Starting Optimization..."):
31
+ response = requests.post(
32
+ f"{__base_url__}/sessions", # Path is relative to base_url
33
+ json={
34
+ "source_code": source_code,
35
+ "additional_instructions": additional_instructions,
36
+ "objective": {"evaluation_command": evaluation_command, "metric_name": metric_name, "maximize": maximize},
37
+ "optimizer": {
38
+ "steps": steps,
39
+ "code_generator": code_generator_config,
40
+ "evaluator": evaluator_config,
41
+ "search_policy": search_policy_config,
42
+ },
43
+ "metadata": {"client_name": "cli", "client_version": __pkg_version__, **api_keys},
44
+ },
45
+ headers=auth_headers, # Add headers
46
+ timeout=timeout,
47
+ )
48
+ response.raise_for_status()
49
+ return response.json()
50
+
51
+
52
+ def evaluate_feedback_then_suggest_next_solution(
53
+ session_id: str,
54
+ execution_output: str,
55
+ additional_instructions: str = None,
56
+ api_keys: Dict[str, Any] = {},
57
+ auth_headers: dict = {}, # Add auth_headers
58
+ timeout: int = 800,
59
+ ) -> Dict[str, Any]:
60
+ """Evaluate the feedback and suggest the next solution."""
61
+ response = requests.post(
62
+ f"{__base_url__}/sessions/{session_id}/suggest", # Path is relative to base_url
63
+ json={
64
+ "execution_output": execution_output,
65
+ "additional_instructions": additional_instructions,
66
+ "metadata": {**api_keys},
67
+ },
68
+ headers=auth_headers, # Add headers
69
+ timeout=timeout,
70
+ )
71
+ response.raise_for_status()
72
+ return response.json()
73
+
74
+
75
+ def get_optimization_session_status(
76
+ session_id: str, include_history: bool = False, auth_headers: dict = {}, timeout: int = 800
77
+ ) -> Dict[str, Any]:
78
+ """Get the current status of the optimization session."""
79
+ response = requests.get(
80
+ f"{__base_url__}/sessions/{session_id}", # Path is relative to base_url
81
+ params={"include_history": include_history},
82
+ headers=auth_headers,
83
+ timeout=timeout,
84
+ )
85
+ response.raise_for_status()
86
+ return response.json()
@@ -256,10 +256,7 @@ def main() -> None:
256
256
  maximize = args.maximize == "true"
257
257
  steps = args.steps
258
258
  code_generator_config = {"model": args.model}
259
- evaluator_config = {
260
- "model": args.model,
261
- "include_analysis": False, # NOTE: False for now
262
- }
259
+ evaluator_config = {"model": args.model, "include_analysis": True}
263
260
  search_policy_config = {
264
261
  "num_drafts": max(1, math.ceil(0.15 * steps)),
265
262
  "debug_prob": 0.5,
@@ -388,7 +385,6 @@ def main() -> None:
388
385
 
389
386
  # Send feedback and get next suggestion
390
387
  eval_and_next_solution_response = evaluate_feedback_then_suggest_next_solution(
391
- console=console,
392
388
  session_id=session_id,
393
389
  execution_output=term_out,
394
390
  additional_instructions=current_additional_instructions, # Pass current instructions
@@ -408,11 +404,7 @@ def main() -> None:
408
404
  # Get the optimization session status for
409
405
  # the best solution, its score, and the history to plot the tree
410
406
  status_response = get_optimization_session_status(
411
- console=console,
412
- session_id=session_id,
413
- include_history=True,
414
- timeout=timeout,
415
- auth_headers=auth_headers,
407
+ session_id=session_id, include_history=True, timeout=timeout, auth_headers=auth_headers
416
408
  )
417
409
 
418
410
  # Update the step of the progress bar
@@ -493,7 +485,6 @@ def main() -> None:
493
485
 
494
486
  # Ensure we pass evaluation results for the last step's generated solution
495
487
  eval_and_next_solution_response = evaluate_feedback_then_suggest_next_solution(
496
- console=console,
497
488
  session_id=session_id,
498
489
  execution_output=term_out,
499
490
  additional_instructions=current_additional_instructions,
@@ -510,7 +501,7 @@ def main() -> None:
510
501
  # Get the optimization session status for
511
502
  # the best solution, its score, and the history to plot the tree
512
503
  status_response = get_optimization_session_status(
513
- console=console, session_id=session_id, include_history=True, timeout=timeout, auth_headers=auth_headers
504
+ session_id=session_id, include_history=True, timeout=timeout, auth_headers=auth_headers
514
505
  )
515
506
  # Build the metric tree
516
507
  tree_panel.build_metric_tree(nodes=status_response["history"])
@@ -575,7 +566,11 @@ def main() -> None:
575
566
  console.print(end_optimization_layout)
576
567
 
577
568
  except Exception as e:
578
- console.print(Panel(f"[bold red]Error: {str(e)}", title="[bold red]Error", border_style="red"))
569
+ try:
570
+ error_message = e.response.json()["detail"]
571
+ except Exception:
572
+ error_message = str(e)
573
+ console.print(Panel(f"[bold red]Error: {error_message}", title="[bold red]Error", border_style="red"))
579
574
  # Print traceback for debugging
580
- console.print_exception(show_locals=True)
575
+ # console.print_exception(show_locals=False)
581
576
  sys.exit(1)
@@ -121,6 +121,7 @@ class Node:
121
121
  self.metric = metric
122
122
  self.is_buggy = is_buggy
123
123
  self.evaluated = True
124
+ self.name = ""
124
125
 
125
126
 
126
127
  class MetricTree:
@@ -181,16 +182,17 @@ class MetricTreePanel:
181
182
  nodes.sort(key=lambda x: x["step"])
182
183
 
183
184
  # Finally build the new tree
184
- for node in nodes:
185
- self.metric_tree.add_node(
186
- Node(
187
- id=node["solution_id"],
188
- parent_id=node["parent_id"],
189
- code=node["code"],
190
- metric=node["metric_value"],
191
- is_buggy=node["is_buggy"],
192
- )
185
+ for i, node in enumerate(nodes):
186
+ node = Node(
187
+ id=node["solution_id"],
188
+ parent_id=node["parent_id"],
189
+ code=node["code"],
190
+ metric=node["metric_value"],
191
+ is_buggy=node["is_buggy"],
193
192
  )
193
+ if i == 0:
194
+ node.name = "baseline"
195
+ self.metric_tree.add_node(node)
194
196
 
195
197
  def set_unevaluated_node(self, node_id: str):
196
198
  """Set the unevaluated node."""
@@ -232,12 +234,15 @@ class MetricTreePanel:
232
234
  style = None
233
235
  text = f"{node.metric:.3f}"
234
236
 
237
+ # add the node name info
238
+ text = f"{node.name} {text}".strip()
239
+
235
240
  s = f"[{f'{style} ' if style is not None else ''}{color}]● {text}"
236
241
  subtree = tree.add(s)
237
242
  for child in node.children:
238
243
  append_rec(child, subtree)
239
244
 
240
- tree = Tree("🌳")
245
+ tree = Tree("", hide_root=True)
241
246
  for n in self.metric_tree.get_draft_nodes():
242
247
  append_rec(n, tree)
243
248