weco 0.2.19__tar.gz → 0.2.20__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {weco-0.2.19 → weco-0.2.20}/PKG-INFO +4 -1
- {weco-0.2.19 → weco-0.2.20}/README.md +3 -0
- weco-0.2.20/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb +310 -0
- {weco-0.2.19 → weco-0.2.20}/pyproject.toml +1 -1
- {weco-0.2.19 → weco-0.2.20}/weco/api.py +24 -40
- {weco-0.2.19 → weco-0.2.20}/weco/cli.py +193 -235
- {weco-0.2.19 → weco-0.2.20}/weco/panels.py +13 -10
- {weco-0.2.19 → weco-0.2.20}/weco.egg-info/PKG-INFO +4 -1
- {weco-0.2.19 → weco-0.2.20}/weco.egg-info/SOURCES.txt +1 -0
- {weco-0.2.19 → weco-0.2.20}/.github/workflows/lint.yml +0 -0
- {weco-0.2.19 → weco-0.2.20}/.github/workflows/release.yml +0 -0
- {weco-0.2.19 → weco-0.2.20}/.gitignore +0 -0
- {weco-0.2.19 → weco-0.2.20}/.repomixignore +0 -0
- {weco-0.2.19 → weco-0.2.20}/LICENSE +0 -0
- {weco-0.2.19 → weco-0.2.20}/assets/example-optimization.gif +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/cuda/README.md +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/cuda/evaluate.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/cuda/guide.md +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/cuda/optimize.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/hello-kernel-world/evaluate.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/hello-kernel-world/optimize.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/prompt/README.md +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/prompt/eval.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/prompt/optimize.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/prompt/prompt_guide.md +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/spaceship-titanic/README.md +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/spaceship-titanic/competition_description.md +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/spaceship-titanic/data/sample_submission.csv +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/spaceship-titanic/data/test.csv +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/spaceship-titanic/data/train.csv +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/spaceship-titanic/evaluate.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/spaceship-titanic/requirements-test.txt +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/triton/README.md +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/triton/evaluate.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/examples/triton/optimize.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/setup.cfg +0 -0
- {weco-0.2.19 → weco-0.2.20}/weco/__init__.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/weco/auth.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/weco/utils.py +0 -0
- {weco-0.2.19 → weco-0.2.20}/weco.egg-info/dependency_links.txt +0 -0
- {weco-0.2.19 → weco-0.2.20}/weco.egg-info/entry_points.txt +0 -0
- {weco-0.2.19 → weco-0.2.20}/weco.egg-info/requires.txt +0 -0
- {weco-0.2.19 → weco-0.2.20}/weco.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: weco
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.20
|
|
4
4
|
Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
|
|
5
5
|
Author-email: Weco AI Team <contact@weco.ai>
|
|
6
6
|
License: MIT
|
|
@@ -29,6 +29,9 @@ Dynamic: license-file
|
|
|
29
29
|
[](https://docs.weco.ai/)
|
|
30
30
|
[](https://badge.fury.io/py/weco)
|
|
31
31
|
[](https://arxiv.org/abs/2502.13138)
|
|
32
|
+
[](https://colab.research.google.com/github/WecoAI/weco-cli/blob/main/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb)
|
|
33
|
+
|
|
34
|
+
`pip install weco`
|
|
32
35
|
|
|
33
36
|
</div>
|
|
34
37
|
|
|
@@ -6,6 +6,9 @@
|
|
|
6
6
|
[](https://docs.weco.ai/)
|
|
7
7
|
[](https://badge.fury.io/py/weco)
|
|
8
8
|
[](https://arxiv.org/abs/2502.13138)
|
|
9
|
+
[](https://colab.research.google.com/github/WecoAI/weco-cli/blob/main/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb)
|
|
10
|
+
|
|
11
|
+
`pip install weco`
|
|
9
12
|
|
|
10
13
|
</div>
|
|
11
14
|
|
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
{
|
|
2
|
+
"nbformat": 4,
|
|
3
|
+
"nbformat_minor": 0,
|
|
4
|
+
"metadata": {
|
|
5
|
+
"colab": {
|
|
6
|
+
"provenance": [],
|
|
7
|
+
"gpuType": "T4"
|
|
8
|
+
},
|
|
9
|
+
"kernelspec": {
|
|
10
|
+
"name": "python3",
|
|
11
|
+
"display_name": "Python 3"
|
|
12
|
+
},
|
|
13
|
+
"language_info": {
|
|
14
|
+
"name": "python"
|
|
15
|
+
},
|
|
16
|
+
"accelerator": "GPU"
|
|
17
|
+
},
|
|
18
|
+
"cells": [
|
|
19
|
+
{
|
|
20
|
+
"cell_type": "markdown",
|
|
21
|
+
"source": [
|
|
22
|
+
"# Hello Kernel World 🔥"
|
|
23
|
+
],
|
|
24
|
+
"metadata": {
|
|
25
|
+
"id": "RO1o5fS5W8xc"
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
"cell_type": "markdown",
|
|
30
|
+
"source": [
|
|
31
|
+
"<p align=\"left\">\n",
|
|
32
|
+
" <img src=\"https://raw.githubusercontent.com/WecoAI/weco-cli/main/assets/example-optimization.gif\"\n",
|
|
33
|
+
" alt=\"Optimization demo\"\n",
|
|
34
|
+
" width=\"720\">\n",
|
|
35
|
+
"</p>\n",
|
|
36
|
+
"\n",
|
|
37
|
+
"## 🖥️ Weco CLI Resources\n",
|
|
38
|
+
"\n",
|
|
39
|
+
"- 📖 [CLI Reference](https://docs.weco.ai/cli/cli-reference) - Explore our docs for an in-depth look at what the tool can do\n",
|
|
40
|
+
"- ✨ [Examples](https://docs.weco.ai/examples) - Explore automated R&D across kernel engineering, ML engineering and prompt engineering"
|
|
41
|
+
],
|
|
42
|
+
"metadata": {
|
|
43
|
+
"id": "yorBWlqGuC7-"
|
|
44
|
+
}
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"cell_type": "markdown",
|
|
48
|
+
"source": [
|
|
49
|
+
"## Setup Dependencies"
|
|
50
|
+
],
|
|
51
|
+
"metadata": {
|
|
52
|
+
"id": "5BQGGqbJW2Eq"
|
|
53
|
+
}
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
"cell_type": "code",
|
|
57
|
+
"source": [
|
|
58
|
+
"# Install requirements\n",
|
|
59
|
+
"%pip install -q weco ipywidgets numpy torch\n",
|
|
60
|
+
"\n",
|
|
61
|
+
"# Enable custom widgets\n",
|
|
62
|
+
"from google.colab import output\n",
|
|
63
|
+
"output.enable_custom_widget_manager()"
|
|
64
|
+
],
|
|
65
|
+
"metadata": {
|
|
66
|
+
"id": "89doT3fbWcGi"
|
|
67
|
+
},
|
|
68
|
+
"execution_count": null,
|
|
69
|
+
"outputs": []
|
|
70
|
+
},
|
|
71
|
+
{
|
|
72
|
+
"cell_type": "markdown",
|
|
73
|
+
"source": [
|
|
74
|
+
"Now we need to determine what `DEVICE` we can run this on, a CPU or GPU..."
|
|
75
|
+
],
|
|
76
|
+
"metadata": {
|
|
77
|
+
"id": "gYxLwOXzfmiF"
|
|
78
|
+
}
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
"cell_type": "code",
|
|
82
|
+
"source": [
|
|
83
|
+
"import torch\n",
|
|
84
|
+
"from rich import print as rprint\n",
|
|
85
|
+
"\n",
|
|
86
|
+
"# Check if you're connected to a GPU (it's free!)\n",
|
|
87
|
+
"if not torch.cuda.is_available():\n",
|
|
88
|
+
" DEVICE = \"cpu\"\n",
|
|
89
|
+
" rprint(\n",
|
|
90
|
+
" \"\"\"\n",
|
|
91
|
+
"[bold yellow]⚠️ GPU is not enabled.[/bold yellow] The notebook will fall back to [bold]CPU[/bold], but [italic]performance may be lower[/italic].\n",
|
|
92
|
+
"\n",
|
|
93
|
+
"[bold]👉 To enable GPU (FREE):[/bold]\n",
|
|
94
|
+
"• Go to [green]Runtime > Change runtime type[/green]\n",
|
|
95
|
+
"• Set [bold]'Hardware Accelerator'[/bold] to [bold green]'GPU'[/bold green]\n",
|
|
96
|
+
"• Click [bold]Save[/bold] and [bold]rerun all cells[/bold]\n",
|
|
97
|
+
"\n",
|
|
98
|
+
"[dim]Continuing with CPU for now...[/dim]\n",
|
|
99
|
+
"\"\"\"\n",
|
|
100
|
+
" )\n",
|
|
101
|
+
"else:\n",
|
|
102
|
+
" DEVICE = \"cuda\"\n",
|
|
103
|
+
" rprint(\"[bold green]✅ GPU is enabled.[/bold green] Proceeding with [bold green]CUDA[/bold green]...\")"
|
|
104
|
+
],
|
|
105
|
+
"metadata": {
|
|
106
|
+
"id": "PFAn_bzAXLGO"
|
|
107
|
+
},
|
|
108
|
+
"execution_count": null,
|
|
109
|
+
"outputs": []
|
|
110
|
+
},
|
|
111
|
+
{
|
|
112
|
+
"cell_type": "code",
|
|
113
|
+
"source": [
|
|
114
|
+
"# Download the example files from CLI repo\n",
|
|
115
|
+
"!wget https://github.com/WecoAI/weco-cli/archive/refs/heads/main.zip -O repo.zip\n",
|
|
116
|
+
"!unzip -j repo.zip \"weco-cli-main/examples/hello-kernel-world/*\" -d .\n",
|
|
117
|
+
"!rm repo.zip"
|
|
118
|
+
],
|
|
119
|
+
"metadata": {
|
|
120
|
+
"id": "dFGClqxpzwyM"
|
|
121
|
+
},
|
|
122
|
+
"execution_count": null,
|
|
123
|
+
"outputs": []
|
|
124
|
+
},
|
|
125
|
+
{
|
|
126
|
+
"cell_type": "markdown",
|
|
127
|
+
"source": [
|
|
128
|
+
"Google AI Studio has a free API usage quota. Create a key [here](https://aistudio.google.com/apikey) to use `weco` for free!"
|
|
129
|
+
],
|
|
130
|
+
"metadata": {
|
|
131
|
+
"id": "PGlEsI78bMtN"
|
|
132
|
+
}
|
|
133
|
+
},
|
|
134
|
+
{
|
|
135
|
+
"cell_type": "code",
|
|
136
|
+
"source": [
|
|
137
|
+
"import os\n",
|
|
138
|
+
"\n",
|
|
139
|
+
"# Pass your API key below\n",
|
|
140
|
+
"os.environ[\"GEMINI_API_KEY\"] = \"\"\n",
|
|
141
|
+
"# os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
|
|
142
|
+
"# os.environ[\"ANTHROPIC_API_KEY\"] = \"\"\n",
|
|
143
|
+
"\n",
|
|
144
|
+
"\n",
|
|
145
|
+
"if not any([os.environ.get(key) for key in [\"GEMINI_API_KEY\", \"OPENAI_API_KEY\", \"ANTHROPIC_API_KEY\"]]):\n",
|
|
146
|
+
" rprint(\n",
|
|
147
|
+
"\"[bold red]❌ No API keys found.[/bold red]\\n\"\n",
|
|
148
|
+
"\"\\n\"\n",
|
|
149
|
+
"\"Please set one of the following environment variables:\\n\"\n",
|
|
150
|
+
"\" • [cyan]GEMINI_API_KEY[/cyan]\\n\"\n",
|
|
151
|
+
"\" • [cyan]OPENAI_API_KEY[/cyan]\\n\"\n",
|
|
152
|
+
"\" • [cyan]ANTHROPIC_API_KEY[/cyan]\\n\"\n",
|
|
153
|
+
"\"\\n\"\n",
|
|
154
|
+
"\"Setup your [cyan]GEMINI_API_KEY[/cyan] for free - [underline white]https://aistudio.google.com/apikey[/underline white] !\"\n",
|
|
155
|
+
" )\n",
|
|
156
|
+
"else:\n",
|
|
157
|
+
" rprint(\"[bold green]✅ API keys found.[/bold green]\\n\\nWe'll only be able to know if they are correct once the optimization starts.\")"
|
|
158
|
+
],
|
|
159
|
+
"metadata": {
|
|
160
|
+
"id": "b4XuOeNzYTdp"
|
|
161
|
+
},
|
|
162
|
+
"execution_count": null,
|
|
163
|
+
"outputs": []
|
|
164
|
+
},
|
|
165
|
+
{
|
|
166
|
+
"cell_type": "markdown",
|
|
167
|
+
"source": [
|
|
168
|
+
"## Let's Start Optimizing!"
|
|
169
|
+
],
|
|
170
|
+
"metadata": {
|
|
171
|
+
"id": "sbvA8oQceOt5"
|
|
172
|
+
}
|
|
173
|
+
},
|
|
174
|
+
{
|
|
175
|
+
"cell_type": "markdown",
|
|
176
|
+
"source": [
|
|
177
|
+
"Now that we've got our dependecies, GPU and LLM API key sorted out, let's take a look at what code we're optimizing!\n",
|
|
178
|
+
"\n",
|
|
179
|
+
"Earlier, we downloaded two files:\n",
|
|
180
|
+
"1. An evaluation script to help score *how good a solution is* (`evaluate.py`)\n",
|
|
181
|
+
"2. A snippet of code we'd like to optimize (`optimize.py`)\n",
|
|
182
|
+
"\n",
|
|
183
|
+
"Let's take a look at what the code we want to optimize looks like..."
|
|
184
|
+
],
|
|
185
|
+
"metadata": {
|
|
186
|
+
"id": "4OjXTBkjc4Id"
|
|
187
|
+
}
|
|
188
|
+
},
|
|
189
|
+
{
|
|
190
|
+
"cell_type": "code",
|
|
191
|
+
"source": [
|
|
192
|
+
"from IPython.display import display, HTML\n",
|
|
193
|
+
"from pygments import highlight\n",
|
|
194
|
+
"from pygments.lexers import PythonLexer\n",
|
|
195
|
+
"from pygments.formatters import HtmlFormatter\n",
|
|
196
|
+
"\n",
|
|
197
|
+
"def view_code_block(path: str):\n",
|
|
198
|
+
" with open(path) as f:\n",
|
|
199
|
+
" display(HTML(highlight(f.read(), PythonLexer(), HtmlFormatter(full=True, style=\"monokai\"))))\n",
|
|
200
|
+
"\n",
|
|
201
|
+
"view_code_block(\"optimize.py\")"
|
|
202
|
+
],
|
|
203
|
+
"metadata": {
|
|
204
|
+
"id": "rUTxqxWgcC34"
|
|
205
|
+
},
|
|
206
|
+
"execution_count": null,
|
|
207
|
+
"outputs": []
|
|
208
|
+
},
|
|
209
|
+
{
|
|
210
|
+
"cell_type": "markdown",
|
|
211
|
+
"source": [
|
|
212
|
+
"Real-world code is often more complex but this is a good place to start. You can find more advanced examples [here](https://docs.weco.ai/examples), however, we'd recommend starting with this notebook as the optimization setup is the exact same, no matter the complexity!"
|
|
213
|
+
],
|
|
214
|
+
"metadata": {
|
|
215
|
+
"id": "5C5dvasXdmNw"
|
|
216
|
+
}
|
|
217
|
+
},
|
|
218
|
+
{
|
|
219
|
+
"cell_type": "markdown",
|
|
220
|
+
"source": [
|
|
221
|
+
"It's simple to start optimizing any piece of code! You just need to set:\n",
|
|
222
|
+
"1. Path to source code - we can point this to our `optimize.py`\n",
|
|
223
|
+
"2. Command to run evaluation - notice how we are using the `DEVICE` we setup earlier\n",
|
|
224
|
+
"3. The metric we are optimizing for - in this case, the evaluation script (`evaluate.py`) prints the `'speedup'` achieved to the terminal\n",
|
|
225
|
+
"4. Whether you want to maximize or minimize the metric you mentioned above - in our case, we want to make this code faster!\n",
|
|
226
|
+
"5. Number of steps to optimize for - we'll keep it low to avoid any rate limits being hit on your free Gemini API key\n",
|
|
227
|
+
"6. Additional context - anything information you think should guide the optimization process"
|
|
228
|
+
],
|
|
229
|
+
"metadata": {
|
|
230
|
+
"id": "YfDg-pP9fAdC"
|
|
231
|
+
}
|
|
232
|
+
},
|
|
233
|
+
{
|
|
234
|
+
"cell_type": "markdown",
|
|
235
|
+
"source": [
|
|
236
|
+
"Now let's get straight into it. Keep an eye on the `Best Solution` panel!\n",
|
|
237
|
+
"\n",
|
|
238
|
+
"Note that you can track the optimization in the logs directory (`.runs/`) and on our dashboard (links shown in the `Summary` panel)."
|
|
239
|
+
],
|
|
240
|
+
"metadata": {
|
|
241
|
+
"id": "TbG_3nwEhs5G"
|
|
242
|
+
}
|
|
243
|
+
},
|
|
244
|
+
{
|
|
245
|
+
"cell_type": "code",
|
|
246
|
+
"source": [
|
|
247
|
+
"import sys, weco.cli as weco_cli\n",
|
|
248
|
+
"\n",
|
|
249
|
+
"# When running in a terminal, you can use this instead:\n",
|
|
250
|
+
"# weco run --source optimize.py \\\n",
|
|
251
|
+
"# --eval-command f\"python evaluate.py --solution-path optimize.py --device {DEVICE}\" \\\n",
|
|
252
|
+
"# --metric speedup \\\n",
|
|
253
|
+
"# --goal maximize \\\n",
|
|
254
|
+
"# --steps 10 \\\n",
|
|
255
|
+
"# --additional-instructions \"Fuse operations in the forward method while ensuring the max float deviation remains small.\"\n",
|
|
256
|
+
"\n",
|
|
257
|
+
"sys.argv = [\n",
|
|
258
|
+
" \"weco\", \"run\",\n",
|
|
259
|
+
" \"--source\", \"optimize.py\",\n",
|
|
260
|
+
" \"--eval-command\", f\"python evaluate.py --solution-path optimize.py --device {DEVICE}\",\n",
|
|
261
|
+
" \"--metric\", \"speedup\",\n",
|
|
262
|
+
" \"--goal\", \"maximize\",\n",
|
|
263
|
+
" \"--steps\", \"10\",\n",
|
|
264
|
+
" \"--additional-instructions\", \"Fuse operations in the forward method while ensuring the max float deviation remains small.\"\n",
|
|
265
|
+
"]\n",
|
|
266
|
+
"\n",
|
|
267
|
+
"try: weco_cli.main()\n",
|
|
268
|
+
"except SystemExit: pass"
|
|
269
|
+
],
|
|
270
|
+
"metadata": {
|
|
271
|
+
"id": "17YZ2euZplDJ"
|
|
272
|
+
},
|
|
273
|
+
"execution_count": null,
|
|
274
|
+
"outputs": []
|
|
275
|
+
},
|
|
276
|
+
{
|
|
277
|
+
"cell_type": "markdown",
|
|
278
|
+
"source": [
|
|
279
|
+
"Let's take a look at what our optimized code looks like (`optimize.py`)!"
|
|
280
|
+
],
|
|
281
|
+
"metadata": {
|
|
282
|
+
"id": "990ueX_JsO_1"
|
|
283
|
+
}
|
|
284
|
+
},
|
|
285
|
+
{
|
|
286
|
+
"cell_type": "code",
|
|
287
|
+
"source": [
|
|
288
|
+
"view_code_block(\"optimize.py\")"
|
|
289
|
+
],
|
|
290
|
+
"metadata": {
|
|
291
|
+
"id": "9dqfzXkajQKs"
|
|
292
|
+
},
|
|
293
|
+
"execution_count": null,
|
|
294
|
+
"outputs": []
|
|
295
|
+
},
|
|
296
|
+
{
|
|
297
|
+
"cell_type": "markdown",
|
|
298
|
+
"source": [
|
|
299
|
+
"Happy Optimizing from the [Weco](https://www.weco.ai/) Team!\n",
|
|
300
|
+
"\n",
|
|
301
|
+
"If you'd like to learn more about what Weco can do, here are some spots to check out:\n",
|
|
302
|
+
"- 📖 [CLI Reference](https://docs.weco.ai/cli/cli-reference) - Explore our docs for an in-depth look at what the tool can do\n",
|
|
303
|
+
"- ✨ [Examples](https://docs.weco.ai/examples) - Explore automated R&D across kernel engineering, ML engineering and prompt engineering"
|
|
304
|
+
],
|
|
305
|
+
"metadata": {
|
|
306
|
+
"id": "17oICow9yjn8"
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
]
|
|
310
|
+
}
|
|
@@ -8,7 +8,7 @@ name = "weco"
|
|
|
8
8
|
authors = [{ name = "Weco AI Team", email = "contact@weco.ai" }]
|
|
9
9
|
description = "Documentation for `weco`, a CLI for using Weco AI's code optimizer."
|
|
10
10
|
readme = "README.md"
|
|
11
|
-
version = "0.2.
|
|
11
|
+
version = "0.2.20"
|
|
12
12
|
license = { text = "MIT" }
|
|
13
13
|
requires-python = ">=3.8"
|
|
14
14
|
dependencies = ["requests", "rich", "packaging"]
|
|
@@ -17,7 +17,7 @@ def handle_api_error(e: requests.exceptions.HTTPError, console: rich.console.Con
|
|
|
17
17
|
# sys.exit(1)
|
|
18
18
|
|
|
19
19
|
|
|
20
|
-
def
|
|
20
|
+
def start_optimization_run(
|
|
21
21
|
console: rich.console.Console,
|
|
22
22
|
source_code: str,
|
|
23
23
|
evaluation_command: str,
|
|
@@ -29,14 +29,14 @@ def start_optimization_session(
|
|
|
29
29
|
search_policy_config: Dict[str, Any],
|
|
30
30
|
additional_instructions: str = None,
|
|
31
31
|
api_keys: Dict[str, Any] = {},
|
|
32
|
-
auth_headers: dict = {},
|
|
32
|
+
auth_headers: dict = {},
|
|
33
33
|
timeout: int = 800,
|
|
34
34
|
) -> Dict[str, Any]:
|
|
35
|
-
"""Start the optimization
|
|
35
|
+
"""Start the optimization run."""
|
|
36
36
|
with console.status("[bold green]Starting Optimization..."):
|
|
37
37
|
try:
|
|
38
38
|
response = requests.post(
|
|
39
|
-
f"{__base_url__}/
|
|
39
|
+
f"{__base_url__}/runs",
|
|
40
40
|
json={
|
|
41
41
|
"source_code": source_code,
|
|
42
42
|
"additional_instructions": additional_instructions,
|
|
@@ -49,37 +49,37 @@ def start_optimization_session(
|
|
|
49
49
|
},
|
|
50
50
|
"metadata": {"client_name": "cli", "client_version": __pkg_version__, **api_keys},
|
|
51
51
|
},
|
|
52
|
-
headers=auth_headers,
|
|
52
|
+
headers=auth_headers,
|
|
53
53
|
timeout=timeout,
|
|
54
54
|
)
|
|
55
55
|
response.raise_for_status()
|
|
56
56
|
return response.json()
|
|
57
57
|
except requests.exceptions.HTTPError as e:
|
|
58
58
|
handle_api_error(e, console)
|
|
59
|
-
sys.exit(1)
|
|
59
|
+
sys.exit(1)
|
|
60
60
|
except requests.exceptions.RequestException as e:
|
|
61
|
-
console.print(f"[bold red]Network Error starting
|
|
61
|
+
console.print(f"[bold red]Network Error starting run: {e}[/]")
|
|
62
62
|
sys.exit(1)
|
|
63
63
|
|
|
64
64
|
|
|
65
65
|
def evaluate_feedback_then_suggest_next_solution(
|
|
66
|
-
|
|
66
|
+
run_id: str,
|
|
67
67
|
execution_output: str,
|
|
68
68
|
additional_instructions: str = None,
|
|
69
69
|
api_keys: Dict[str, Any] = {},
|
|
70
|
-
auth_headers: dict = {},
|
|
70
|
+
auth_headers: dict = {},
|
|
71
71
|
timeout: int = 800,
|
|
72
72
|
) -> Dict[str, Any]:
|
|
73
73
|
"""Evaluate the feedback and suggest the next solution."""
|
|
74
74
|
try:
|
|
75
75
|
response = requests.post(
|
|
76
|
-
f"{__base_url__}/
|
|
76
|
+
f"{__base_url__}/runs/{run_id}/suggest",
|
|
77
77
|
json={
|
|
78
78
|
"execution_output": execution_output,
|
|
79
79
|
"additional_instructions": additional_instructions,
|
|
80
80
|
"metadata": {**api_keys},
|
|
81
81
|
},
|
|
82
|
-
headers=auth_headers,
|
|
82
|
+
headers=auth_headers,
|
|
83
83
|
timeout=timeout,
|
|
84
84
|
)
|
|
85
85
|
response.raise_for_status()
|
|
@@ -93,16 +93,13 @@ def evaluate_feedback_then_suggest_next_solution(
|
|
|
93
93
|
raise # Re-raise the exception
|
|
94
94
|
|
|
95
95
|
|
|
96
|
-
def
|
|
97
|
-
|
|
96
|
+
def get_optimization_run_status(
|
|
97
|
+
run_id: str, include_history: bool = False, auth_headers: dict = {}, timeout: int = 800
|
|
98
98
|
) -> Dict[str, Any]:
|
|
99
|
-
"""Get the current status of the optimization
|
|
99
|
+
"""Get the current status of the optimization run."""
|
|
100
100
|
try:
|
|
101
101
|
response = requests.get(
|
|
102
|
-
f"{__base_url__}/
|
|
103
|
-
params={"include_history": include_history},
|
|
104
|
-
headers=auth_headers,
|
|
105
|
-
timeout=timeout,
|
|
102
|
+
f"{__base_url__}/runs/{run_id}", params={"include_history": include_history}, headers=auth_headers, timeout=timeout
|
|
106
103
|
)
|
|
107
104
|
response.raise_for_status()
|
|
108
105
|
return response.json()
|
|
@@ -114,42 +111,30 @@ def get_optimization_session_status(
|
|
|
114
111
|
raise # Re-raise
|
|
115
112
|
|
|
116
113
|
|
|
117
|
-
def send_heartbeat(
|
|
118
|
-
session_id: str,
|
|
119
|
-
auth_headers: dict = {},
|
|
120
|
-
timeout: int = 10, # Shorter timeout for non-critical heartbeat
|
|
121
|
-
) -> bool:
|
|
114
|
+
def send_heartbeat(run_id: str, auth_headers: dict = {}, timeout: int = 10) -> bool:
|
|
122
115
|
"""Send a heartbeat signal to the backend."""
|
|
123
116
|
try:
|
|
124
|
-
response = requests.put(f"{__base_url__}/
|
|
125
|
-
response.raise_for_status()
|
|
117
|
+
response = requests.put(f"{__base_url__}/runs/{run_id}/heartbeat", headers=auth_headers, timeout=timeout)
|
|
118
|
+
response.raise_for_status()
|
|
126
119
|
return True
|
|
127
120
|
except requests.exceptions.HTTPError as e:
|
|
128
|
-
# Log non-critical errors like 409 Conflict (session not running)
|
|
129
121
|
if e.response.status_code == 409:
|
|
130
|
-
print(f"Heartbeat ignored:
|
|
122
|
+
print(f"Heartbeat ignored: Run {run_id} is not running.", file=sys.stderr)
|
|
131
123
|
else:
|
|
132
|
-
print(f"Heartbeat failed for
|
|
133
|
-
# Don't exit, just report failure
|
|
124
|
+
print(f"Heartbeat failed for run {run_id}: HTTP {e.response.status_code}", file=sys.stderr)
|
|
134
125
|
return False
|
|
135
126
|
except requests.exceptions.RequestException as e:
|
|
136
|
-
|
|
137
|
-
print(f"Heartbeat network error for session {session_id}: {e}", file=sys.stderr)
|
|
127
|
+
print(f"Heartbeat network error for run {run_id}: {e}", file=sys.stderr)
|
|
138
128
|
return False
|
|
139
129
|
|
|
140
130
|
|
|
141
131
|
def report_termination(
|
|
142
|
-
|
|
143
|
-
status_update: str,
|
|
144
|
-
reason: str,
|
|
145
|
-
details: Optional[str] = None,
|
|
146
|
-
auth_headers: dict = {},
|
|
147
|
-
timeout: int = 30, # Reasonably longer timeout for important termination message
|
|
132
|
+
run_id: str, status_update: str, reason: str, details: Optional[str] = None, auth_headers: dict = {}, timeout: int = 30
|
|
148
133
|
) -> bool:
|
|
149
134
|
"""Report the termination reason to the backend."""
|
|
150
135
|
try:
|
|
151
136
|
response = requests.post(
|
|
152
|
-
f"{__base_url__}/
|
|
137
|
+
f"{__base_url__}/runs/{run_id}/terminate",
|
|
153
138
|
json={"status_update": status_update, "termination_reason": reason, "termination_details": details},
|
|
154
139
|
headers=auth_headers,
|
|
155
140
|
timeout=timeout,
|
|
@@ -157,6 +142,5 @@ def report_termination(
|
|
|
157
142
|
response.raise_for_status()
|
|
158
143
|
return True
|
|
159
144
|
except requests.exceptions.RequestException as e:
|
|
160
|
-
|
|
161
|
-
print(f"Warning: Failed to report termination to backend for session {session_id}: {e}", file=sys.stderr)
|
|
145
|
+
print(f"Warning: Failed to report termination to backend for run {run_id}: {e}", file=sys.stderr)
|
|
162
146
|
return False
|