weco 0.2.24__tar.gz → 0.2.26__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {weco-0.2.24 → weco-0.2.26}/.gitignore +4 -1
- {weco-0.2.24 → weco-0.2.26}/PKG-INFO +2 -1
- {weco-0.2.24 → weco-0.2.26}/README.md +1 -0
- {weco-0.2.24 → weco-0.2.26}/examples/cuda/evaluate.py +1 -1
- weco-0.2.26/examples/hello-kernel-world/README.md +59 -0
- {weco-0.2.24 → weco-0.2.26}/examples/hello-kernel-world/evaluate.py +1 -1
- {weco-0.2.24 → weco-0.2.26}/examples/spaceship-titanic/evaluate.py +5 -5
- {weco-0.2.24 → weco-0.2.26}/examples/triton/evaluate.py +1 -1
- {weco-0.2.24 → weco-0.2.26}/pyproject.toml +1 -1
- {weco-0.2.24 → weco-0.2.26}/weco/api.py +16 -22
- {weco-0.2.24 → weco-0.2.26}/weco/chatbot.py +6 -2
- {weco-0.2.24 → weco-0.2.26}/weco/cli.py +1 -1
- {weco-0.2.24 → weco-0.2.26}/weco/optimizer.py +4 -0
- {weco-0.2.24 → weco-0.2.26}/weco.egg-info/PKG-INFO +2 -1
- {weco-0.2.24 → weco-0.2.26}/weco.egg-info/SOURCES.txt +1 -0
- {weco-0.2.24 → weco-0.2.26}/.github/workflows/lint.yml +0 -0
- {weco-0.2.24 → weco-0.2.26}/.github/workflows/release.yml +0 -0
- {weco-0.2.24 → weco-0.2.26}/LICENSE +0 -0
- {weco-0.2.24 → weco-0.2.26}/assets/example-optimization.gif +0 -0
- {weco-0.2.24 → weco-0.2.26}/assets/weco.svg +0 -0
- {weco-0.2.24 → weco-0.2.26}/contributing.md +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/cuda/README.md +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/cuda/guide.md +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/cuda/optimize.py +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/hello-kernel-world/optimize.py +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/prompt/README.md +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/prompt/eval.py +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/prompt/optimize.py +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/prompt/prompt_guide.md +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/spaceship-titanic/README.md +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/spaceship-titanic/competition_description.md +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/spaceship-titanic/data/sample_submission.csv +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/spaceship-titanic/data/test.csv +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/spaceship-titanic/data/train.csv +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/spaceship-titanic/train.py +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/triton/README.md +0 -0
- {weco-0.2.24 → weco-0.2.26}/examples/triton/optimize.py +0 -0
- {weco-0.2.24 → weco-0.2.26}/setup.cfg +0 -0
- {weco-0.2.24 → weco-0.2.26}/weco/__init__.py +0 -0
- {weco-0.2.24 → weco-0.2.26}/weco/auth.py +0 -0
- {weco-0.2.24 → weco-0.2.26}/weco/constants.py +0 -0
- {weco-0.2.24 → weco-0.2.26}/weco/panels.py +0 -0
- {weco-0.2.24 → weco-0.2.26}/weco/utils.py +0 -0
- {weco-0.2.24 → weco-0.2.26}/weco.egg-info/dependency_links.txt +0 -0
- {weco-0.2.24 → weco-0.2.26}/weco.egg-info/entry_points.txt +0 -0
- {weco-0.2.24 → weco-0.2.26}/weco.egg-info/requires.txt +0 -0
- {weco-0.2.24 → weco-0.2.26}/weco.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: weco
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.26
|
|
4
4
|
Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
|
|
5
5
|
Author-email: Weco AI Team <contact@weco.ai>
|
|
6
6
|
License: MIT
|
|
@@ -268,6 +268,7 @@ Weco supports the following LLM models:
|
|
|
268
268
|
- `gpt-4o-mini`
|
|
269
269
|
|
|
270
270
|
### Anthropic Models
|
|
271
|
+
- `claude-opus-4-1`
|
|
271
272
|
- `claude-opus-4-0`
|
|
272
273
|
- `claude-sonnet-4-0`
|
|
273
274
|
- `claude-3-7-sonnet-latest`
|
|
@@ -154,7 +154,7 @@ if __name__ == "__main__":
|
|
|
154
154
|
max_diff_avg /= n_correctness_trials
|
|
155
155
|
print(f"max float diff between values of baseline and optimized model: {max_diff_avg}")
|
|
156
156
|
if max_diff_avg > correctness_tolerance:
|
|
157
|
-
print("
|
|
157
|
+
print("Incorrect solution: max float diff is too high")
|
|
158
158
|
|
|
159
159
|
# measure performance
|
|
160
160
|
inputs = get_inputs(batch_size=batch_size, seq_len=seq_len, n_embd=n_embd, device="cuda")
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# Hello Kernel World
|
|
2
|
+
|
|
3
|
+
This example demonstrates the basics of using Weco to optimize a simple PyTorch model. The model performs a series of basic operations: matrix multiplication, division, summation, and scaling. It's designed as an introductory tutorial to help you understand how Weco works before moving on to more advanced optimization tasks.
|
|
4
|
+
|
|
5
|
+
## Setup
|
|
6
|
+
|
|
7
|
+
Install the CLI using `pip`:
|
|
8
|
+
```bash
|
|
9
|
+
pip install weco
|
|
10
|
+
```
|
|
11
|
+
|
|
12
|
+
Create your API key from one of the supported providers:
|
|
13
|
+
- **OpenAI:** Create your API key [here](https://platform.openai.com/api-keys), then run: `export OPENAI_API_KEY="your_key_here"`
|
|
14
|
+
- **Anthropic:** Create your API key [here](https://console.anthropic.com/settings/keys), then run: `export ANTHROPIC_API_KEY="your_key_here"`
|
|
15
|
+
- **Google:** Create your API key [here](https://aistudio.google.com/apikey), then run: `export GEMINI_API_KEY="your_key_here"`
|
|
16
|
+
|
|
17
|
+
Install the required dependencies:
|
|
18
|
+
```bash
|
|
19
|
+
pip install torch
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## Run Weco
|
|
23
|
+
|
|
24
|
+
Now run Weco to optimize your code:
|
|
25
|
+
```bash
|
|
26
|
+
weco run --source optimize.py \
|
|
27
|
+
--eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
|
|
28
|
+
--metric speedup \
|
|
29
|
+
--goal maximize \
|
|
30
|
+
--steps 15 \
|
|
31
|
+
--additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
**Note:** If you have an NVIDIA GPU, change the device in the `--eval-command` to `cuda`. If you are running this on Apple Silicon, set it to `mps`.
|
|
35
|
+
|
|
36
|
+
### Explanation
|
|
37
|
+
|
|
38
|
+
* `--source optimize.py`: The simple PyTorch model to be optimized.
|
|
39
|
+
* `--eval-command "python evaluate.py --solution-path optimize.py --device cpu"`: Runs the evaluation script, which benchmarks the optimized code against a baseline and prints the `speedup`.
|
|
40
|
+
* `--metric speedup`: The optimization target metric.
|
|
41
|
+
* `--goal maximize`: To increase the speedup.
|
|
42
|
+
* `--steps 15`: The number of optimization iterations.
|
|
43
|
+
* `--additional-instructions "..."`: Provides specific guidance to focus on operation fusion while maintaining correctness.
|
|
44
|
+
|
|
45
|
+
Weco will iteratively modify `optimize.py`, attempting to fuse and optimize the operations in the forward method, guided by the performance feedback from the evaluation script.
|
|
46
|
+
|
|
47
|
+
## Interactive Tutorial
|
|
48
|
+
****
|
|
49
|
+
For a hands-on walkthrough of this example, check out the [Colab notebook](colab_notebook_walkthrough.ipynb) that provides step-by-step guidance through the optimization process.
|
|
50
|
+
|
|
51
|
+
## Next Steps
|
|
52
|
+
|
|
53
|
+
Once you've mastered the basics with this example, explore more advanced optimization techniques:
|
|
54
|
+
- [Triton Optimization](/examples/triton/README.md) for GPU kernel programming
|
|
55
|
+
- [CUDA Optimization](/examples/cuda/README.md) for low-level GPU optimization
|
|
56
|
+
- [Model Development](/examples/spaceship-titanic/README.md) for ML model optimization
|
|
57
|
+
- [Prompt Engineering](/examples/prompt/README.md) for LLM prompt optimization
|
|
58
|
+
|
|
59
|
+
You can also check out our [CLI Reference](https://docs.weco.ai/cli/cli-reference) to learn more about what you can do with the tool.
|
|
@@ -132,7 +132,7 @@ if __name__ == "__main__":
|
|
|
132
132
|
max_diff_avg /= n_correctness_trials
|
|
133
133
|
print(f"max float diff between values of baseline and optimized model: {max_diff_avg}")
|
|
134
134
|
if max_diff_avg > correctness_tolerance:
|
|
135
|
-
print("
|
|
135
|
+
print("Incorrect solution: max float diff is too high")
|
|
136
136
|
|
|
137
137
|
# measure performance
|
|
138
138
|
inputs = get_inputs(batch_size, input_size, args.device)
|
|
@@ -5,7 +5,7 @@ from sklearn.metrics import accuracy_score
|
|
|
5
5
|
from sklearn.model_selection import train_test_split
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
class
|
|
8
|
+
class IncorrectSubmissionError(Exception):
|
|
9
9
|
pass
|
|
10
10
|
|
|
11
11
|
|
|
@@ -18,18 +18,18 @@ def evaluate_for_accuracy(
|
|
|
18
18
|
|
|
19
19
|
# Submission checks
|
|
20
20
|
if len(submission_df) != len(answers_df):
|
|
21
|
-
raise
|
|
21
|
+
raise IncorrectSubmissionError("Submission must have the same length as the answers.")
|
|
22
22
|
if target_column not in submission_df.columns:
|
|
23
|
-
raise
|
|
23
|
+
raise IncorrectSubmissionError(f"Submission must have a `{target_column}` column")
|
|
24
24
|
if id_column not in submission_df.columns:
|
|
25
|
-
raise
|
|
25
|
+
raise IncorrectSubmissionError(f"Submission must have a `{id_column}` column")
|
|
26
26
|
|
|
27
27
|
# Sort on id to ensure correct ordering
|
|
28
28
|
submission_df = submission_df.sort_values(by=id_column)
|
|
29
29
|
answers_df = answers_df.sort_values(by=id_column)
|
|
30
30
|
|
|
31
31
|
if (submission_df[id_column].values != answers_df[id_column].values).any():
|
|
32
|
-
raise
|
|
32
|
+
raise IncorrectSubmissionError(f"Submission and Answers `{id_column}`'s do not match")
|
|
33
33
|
|
|
34
34
|
return accuracy_score(submission_df[target_column], answers_df[target_column])
|
|
35
35
|
|
|
@@ -149,7 +149,7 @@ if __name__ == "__main__":
|
|
|
149
149
|
max_diff_avg /= n_correctness_trials
|
|
150
150
|
print(f"max float diff between values of baseline and optimized model: {max_diff_avg}")
|
|
151
151
|
if max_diff_avg > correctness_tolerance:
|
|
152
|
-
print("
|
|
152
|
+
print("Incorrect solution: max float diff is too high")
|
|
153
153
|
|
|
154
154
|
# measure performance
|
|
155
155
|
inputs = get_inputs(batch_size=batch_size, seq_len=seq_len, n_embd=n_embd, device="cuda")
|
|
@@ -8,7 +8,7 @@ name = "weco"
|
|
|
8
8
|
authors = [{ name = "Weco AI Team", email = "contact@weco.ai" }]
|
|
9
9
|
description = "Documentation for `weco`, a CLI for using Weco AI's code optimizer."
|
|
10
10
|
readme = "README.md"
|
|
11
|
-
version = "0.2.
|
|
11
|
+
version = "0.2.26"
|
|
12
12
|
license = { text = "MIT" }
|
|
13
13
|
requires-python = ">=3.8"
|
|
14
14
|
dependencies = [
|
|
@@ -14,8 +14,6 @@ def handle_api_error(e: requests.exceptions.HTTPError, console: Console) -> None
|
|
|
14
14
|
except (ValueError, KeyError): # Handle cases where response is not JSON or detail key is missing
|
|
15
15
|
detail = f"HTTP {e.response.status_code} Error: {e.response.text}"
|
|
16
16
|
console.print(f"[bold red]{detail}[/]")
|
|
17
|
-
# Avoid exiting here, let the caller decide if the error is fatal
|
|
18
|
-
# sys.exit(1)
|
|
19
17
|
|
|
20
18
|
|
|
21
19
|
def start_optimization_run(
|
|
@@ -32,7 +30,7 @@ def start_optimization_run(
|
|
|
32
30
|
api_keys: Dict[str, Any] = {},
|
|
33
31
|
auth_headers: dict = {},
|
|
34
32
|
timeout: Union[int, Tuple[int, int]] = DEFAULT_API_TIMEOUT,
|
|
35
|
-
) -> Dict[str, Any]:
|
|
33
|
+
) -> Optional[Dict[str, Any]]:
|
|
36
34
|
"""Start the optimization run."""
|
|
37
35
|
with console.status("[bold green]Starting Optimization..."):
|
|
38
36
|
try:
|
|
@@ -61,12 +59,12 @@ def start_optimization_run(
|
|
|
61
59
|
if result.get("code") is None:
|
|
62
60
|
result["code"] = ""
|
|
63
61
|
return result
|
|
64
|
-
except
|
|
62
|
+
except requests.exceptions.HTTPError as e:
|
|
65
63
|
handle_api_error(e, console)
|
|
66
|
-
|
|
64
|
+
return None
|
|
67
65
|
except Exception as e:
|
|
68
66
|
console.print(f"[bold red]Error starting run: {e}[/]")
|
|
69
|
-
|
|
67
|
+
return None
|
|
70
68
|
|
|
71
69
|
|
|
72
70
|
def evaluate_feedback_then_suggest_next_solution(
|
|
@@ -101,11 +99,11 @@ def evaluate_feedback_then_suggest_next_solution(
|
|
|
101
99
|
return result
|
|
102
100
|
except requests.exceptions.HTTPError as e:
|
|
103
101
|
# Allow caller to handle suggest errors, maybe retry or terminate
|
|
104
|
-
handle_api_error(e, console)
|
|
105
|
-
raise
|
|
102
|
+
handle_api_error(e, console)
|
|
103
|
+
raise
|
|
106
104
|
except Exception as e:
|
|
107
|
-
print(f"Error: {e}")
|
|
108
|
-
raise
|
|
105
|
+
console.print(f"[bold red]Error: {e}[/]")
|
|
106
|
+
raise
|
|
109
107
|
|
|
110
108
|
|
|
111
109
|
def get_optimization_run_status(
|
|
@@ -137,11 +135,11 @@ def get_optimization_run_status(
|
|
|
137
135
|
result["nodes"][i]["code"] = ""
|
|
138
136
|
return result
|
|
139
137
|
except requests.exceptions.HTTPError as e:
|
|
140
|
-
handle_api_error(e, console)
|
|
141
|
-
raise
|
|
138
|
+
handle_api_error(e, console)
|
|
139
|
+
raise
|
|
142
140
|
except Exception as e:
|
|
143
|
-
print(f"Error getting run status: {e}")
|
|
144
|
-
raise
|
|
141
|
+
console.print(f"[bold red]Error getting run status: {e}[/]")
|
|
142
|
+
raise
|
|
145
143
|
|
|
146
144
|
|
|
147
145
|
def send_heartbeat(run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] = (10, 10)) -> bool:
|
|
@@ -152,7 +150,7 @@ def send_heartbeat(run_id: str, auth_headers: dict = {}, timeout: Union[int, Tup
|
|
|
152
150
|
return True
|
|
153
151
|
except requests.exceptions.HTTPError as e:
|
|
154
152
|
if e.response.status_code == 409:
|
|
155
|
-
print("Polling ignore: Run {run_id} is not running.", file=sys.stderr)
|
|
153
|
+
print(f"Polling ignore: Run {run_id} is not running.", file=sys.stderr)
|
|
156
154
|
else:
|
|
157
155
|
print(f"Polling failed for run {run_id}: HTTP {e.response.status_code}", file=sys.stderr)
|
|
158
156
|
return False
|
|
@@ -221,7 +219,6 @@ def get_optimization_suggestions_from_codebase(
|
|
|
221
219
|
"""Analyze codebase and get optimization suggestions using the model-agnostic backend API."""
|
|
222
220
|
model, api_key_dict = _determine_model_and_api_key()
|
|
223
221
|
try:
|
|
224
|
-
model, api_key_dict = _determine_model_and_api_key()
|
|
225
222
|
response = requests.post(
|
|
226
223
|
f"{__base_url__}/onboard/analyze-codebase",
|
|
227
224
|
json={
|
|
@@ -238,7 +235,7 @@ def get_optimization_suggestions_from_codebase(
|
|
|
238
235
|
result = response.json()
|
|
239
236
|
return [option for option in result.get("options", [])]
|
|
240
237
|
|
|
241
|
-
except
|
|
238
|
+
except requests.exceptions.HTTPError as e:
|
|
242
239
|
handle_api_error(e, console)
|
|
243
240
|
return None
|
|
244
241
|
except Exception as e:
|
|
@@ -257,7 +254,6 @@ def generate_evaluation_script_and_metrics(
|
|
|
257
254
|
"""Generate evaluation script and determine metrics using the model-agnostic backend API."""
|
|
258
255
|
model, api_key_dict = _determine_model_and_api_key()
|
|
259
256
|
try:
|
|
260
|
-
model, api_key_dict = _determine_model_and_api_key()
|
|
261
257
|
response = requests.post(
|
|
262
258
|
f"{__base_url__}/onboard/generate-script",
|
|
263
259
|
json={
|
|
@@ -294,7 +290,6 @@ def analyze_evaluation_environment(
|
|
|
294
290
|
"""Analyze existing evaluation scripts and environment using the model-agnostic backend API."""
|
|
295
291
|
model, api_key_dict = _determine_model_and_api_key()
|
|
296
292
|
try:
|
|
297
|
-
model, api_key_dict = _determine_model_and_api_key()
|
|
298
293
|
response = requests.post(
|
|
299
294
|
f"{__base_url__}/onboard/analyze-environment",
|
|
300
295
|
json={
|
|
@@ -312,7 +307,7 @@ def analyze_evaluation_environment(
|
|
|
312
307
|
response.raise_for_status()
|
|
313
308
|
return response.json()
|
|
314
309
|
|
|
315
|
-
except
|
|
310
|
+
except requests.exceptions.HTTPError as e:
|
|
316
311
|
handle_api_error(e, console)
|
|
317
312
|
return None
|
|
318
313
|
except Exception as e:
|
|
@@ -331,7 +326,6 @@ def analyze_script_execution_requirements(
|
|
|
331
326
|
"""Analyze script to determine proper execution command using the model-agnostic backend API."""
|
|
332
327
|
model, api_key_dict = _determine_model_and_api_key()
|
|
333
328
|
try:
|
|
334
|
-
model, api_key_dict = _determine_model_and_api_key()
|
|
335
329
|
response = requests.post(
|
|
336
330
|
f"{__base_url__}/onboard/analyze-script",
|
|
337
331
|
json={
|
|
@@ -348,7 +342,7 @@ def analyze_script_execution_requirements(
|
|
|
348
342
|
result = response.json()
|
|
349
343
|
return result.get("command", f"python {script_path}")
|
|
350
344
|
|
|
351
|
-
except
|
|
345
|
+
except requests.exceptions.HTTPError as e:
|
|
352
346
|
handle_api_error(e, console)
|
|
353
347
|
return f"python {script_path}"
|
|
354
348
|
except Exception as e:
|
|
@@ -2,6 +2,7 @@ import pathlib
|
|
|
2
2
|
import shlex
|
|
3
3
|
import argparse
|
|
4
4
|
from typing import List, Optional, Dict, Any, Tuple
|
|
5
|
+
import sys
|
|
5
6
|
|
|
6
7
|
from rich.console import Console
|
|
7
8
|
from rich.prompt import Prompt
|
|
@@ -682,9 +683,9 @@ class Chatbot:
|
|
|
682
683
|
|
|
683
684
|
# Import and execute the actual optimization function
|
|
684
685
|
# (Import here to avoid circular imports)
|
|
685
|
-
from .optimizer import execute_optimization as
|
|
686
|
+
from .optimizer import execute_optimization as execute_optimization_run
|
|
686
687
|
|
|
687
|
-
success =
|
|
688
|
+
success = execute_optimization_run(
|
|
688
689
|
source=target_file,
|
|
689
690
|
eval_command=eval_config["eval_command"],
|
|
690
691
|
metric=eval_config["metric_name"],
|
|
@@ -702,6 +703,9 @@ class Chatbot:
|
|
|
702
703
|
else:
|
|
703
704
|
self.console.print("\n[bold yellow]⚠️ Optimization ended early or encountered issues.[/]")
|
|
704
705
|
|
|
706
|
+
exit_code = 0 if success else 1
|
|
707
|
+
sys.exit(exit_code)
|
|
708
|
+
|
|
705
709
|
def show_and_copy_command(self, command: str) -> None:
|
|
706
710
|
"""Show the command and copy it to clipboard."""
|
|
707
711
|
import subprocess
|
|
@@ -71,7 +71,7 @@ def configure_run_parser(run_parser: argparse.ArgumentParser) -> None:
|
|
|
71
71
|
|
|
72
72
|
def execute_run_command(args: argparse.Namespace) -> None:
|
|
73
73
|
"""Execute the 'weco run' command with all its logic."""
|
|
74
|
-
from .optimizer import execute_optimization
|
|
74
|
+
from .optimizer import execute_optimization
|
|
75
75
|
|
|
76
76
|
success = execute_optimization(
|
|
77
77
|
source=args.source,
|
|
@@ -183,6 +183,10 @@ def execute_optimization(
|
|
|
183
183
|
auth_headers=auth_headers,
|
|
184
184
|
timeout=api_timeout,
|
|
185
185
|
)
|
|
186
|
+
# Indicate the endpoint failed to return a response and the optimization was unsuccessful
|
|
187
|
+
if run_response is None:
|
|
188
|
+
return False
|
|
189
|
+
|
|
186
190
|
run_id = run_response["run_id"]
|
|
187
191
|
run_name = run_response["run_name"]
|
|
188
192
|
current_run_id_for_heartbeat = run_id
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: weco
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.26
|
|
4
4
|
Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
|
|
5
5
|
Author-email: Weco AI Team <contact@weco.ai>
|
|
6
6
|
License: MIT
|
|
@@ -268,6 +268,7 @@ Weco supports the following LLM models:
|
|
|
268
268
|
- `gpt-4o-mini`
|
|
269
269
|
|
|
270
270
|
### Anthropic Models
|
|
271
|
+
- `claude-opus-4-1`
|
|
271
272
|
- `claude-opus-4-0`
|
|
272
273
|
- `claude-sonnet-4-0`
|
|
273
274
|
- `claude-3-7-sonnet-latest`
|
|
@@ -11,6 +11,7 @@ examples/cuda/README.md
|
|
|
11
11
|
examples/cuda/evaluate.py
|
|
12
12
|
examples/cuda/guide.md
|
|
13
13
|
examples/cuda/optimize.py
|
|
14
|
+
examples/hello-kernel-world/README.md
|
|
14
15
|
examples/hello-kernel-world/colab_notebook_walkthrough.ipynb
|
|
15
16
|
examples/hello-kernel-world/evaluate.py
|
|
16
17
|
examples/hello-kernel-world/optimize.py
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|