weco 0.3.0__tar.gz → 0.3.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {weco-0.3.0 → weco-0.3.1}/PKG-INFO +1 -1
- {weco-0.3.0 → weco-0.3.1}/pyproject.toml +1 -1
- {weco-0.3.0 → weco-0.3.1}/weco/api.py +84 -46
- {weco-0.3.0 → weco-0.3.1}/weco/constants.py +0 -4
- {weco-0.3.0 → weco-0.3.1}/weco/optimizer.py +0 -2
- {weco-0.3.0 → weco-0.3.1}/weco.egg-info/PKG-INFO +1 -1
- {weco-0.3.0 → weco-0.3.1}/.github/workflows/lint.yml +0 -0
- {weco-0.3.0 → weco-0.3.1}/.github/workflows/release.yml +0 -0
- {weco-0.3.0 → weco-0.3.1}/.gitignore +0 -0
- {weco-0.3.0 → weco-0.3.1}/LICENSE +0 -0
- {weco-0.3.0 → weco-0.3.1}/README.md +0 -0
- {weco-0.3.0 → weco-0.3.1}/assets/example-optimization.gif +0 -0
- {weco-0.3.0 → weco-0.3.1}/assets/weco.svg +0 -0
- {weco-0.3.0 → weco-0.3.1}/contributing.md +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/README.md +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/cuda/README.md +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/cuda/evaluate.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/cuda/optimize.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/hello-kernel-world/README.md +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/hello-kernel-world/evaluate.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/hello-kernel-world/optimize.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/prompt/README.md +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/prompt/eval.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/prompt/optimize.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/prompt/prompt_guide.md +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/spaceship-titanic/README.md +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/spaceship-titanic/competition_description.md +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/spaceship-titanic/data/sample_submission.csv +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/spaceship-titanic/data/test.csv +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/spaceship-titanic/data/train.csv +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/spaceship-titanic/evaluate.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/spaceship-titanic/train.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/triton/README.md +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/triton/evaluate.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/examples/triton/optimize.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/setup.cfg +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco/__init__.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco/auth.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco/chatbot.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco/cli.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco/credits.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco/panels.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco/utils.py +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco.egg-info/SOURCES.txt +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco.egg-info/dependency_links.txt +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco.egg-info/entry_points.txt +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco.egg-info/requires.txt +0 -0
- {weco-0.3.0 → weco-0.3.1}/weco.egg-info/top_level.txt +0 -0
|
@@ -8,7 +8,7 @@ name = "weco"
|
|
|
8
8
|
authors = [{ name = "Weco AI Team", email = "contact@weco.ai" }]
|
|
9
9
|
description = "Documentation for `weco`, a CLI for using Weco AI's code optimizer."
|
|
10
10
|
readme = "README.md"
|
|
11
|
-
version = "0.3.
|
|
11
|
+
version = "0.3.1"
|
|
12
12
|
license = { file = "LICENSE" }
|
|
13
13
|
requires-python = ">=3.8"
|
|
14
14
|
dependencies = [
|
|
@@ -4,7 +4,6 @@ import requests
|
|
|
4
4
|
from rich.console import Console
|
|
5
5
|
|
|
6
6
|
from weco import __pkg_version__, __base_url__
|
|
7
|
-
from .constants import CODEGEN_API_TIMEOUT, STATUS_API_TIMEOUT
|
|
8
7
|
from .utils import truncate_output, determine_model_for_onboarding
|
|
9
8
|
|
|
10
9
|
|
|
@@ -48,6 +47,51 @@ def handle_api_error(e: requests.exceptions.HTTPError, console: Console) -> None
|
|
|
48
47
|
_render(detail)
|
|
49
48
|
|
|
50
49
|
|
|
50
|
+
def _recover_suggest_after_transport_error(
|
|
51
|
+
console: Console, run_id: str, step: int, auth_headers: dict
|
|
52
|
+
) -> Optional[Dict[str, Any]]:
|
|
53
|
+
"""
|
|
54
|
+
Try to reconstruct the /suggest response after a transport error (ReadTimeout/502/RemoteDisconnected)
|
|
55
|
+
by fetching run status and using the latest nodes.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
console: The console object to use for logging.
|
|
59
|
+
run_id: The ID of the run to recover.
|
|
60
|
+
step: The step of the solution to recover.
|
|
61
|
+
auth_headers: The authentication headers to use for the request.
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
The recovered response if the run is in a valid state, otherwise None.
|
|
65
|
+
"""
|
|
66
|
+
run_status_recovery_response = get_optimization_run_status(
|
|
67
|
+
console=console, run_id=run_id, include_history=True, auth_headers=auth_headers
|
|
68
|
+
)
|
|
69
|
+
current_step = run_status_recovery_response.get("current_step")
|
|
70
|
+
current_status = run_status_recovery_response.get("status")
|
|
71
|
+
# The run should be "running" and the current step should correspond to the solution step we are attempting to generate
|
|
72
|
+
is_valid_run_state = current_status is not None and current_status == "running"
|
|
73
|
+
is_valid_step = current_step is not None and current_step == step
|
|
74
|
+
if is_valid_run_state and is_valid_step:
|
|
75
|
+
nodes = run_status_recovery_response.get("nodes") or []
|
|
76
|
+
# We need at least 2 nodes to reconstruct the expected response i.e., the last two nodes
|
|
77
|
+
if len(nodes) >= 2:
|
|
78
|
+
nodes_sorted_ascending = sorted(nodes, key=lambda n: n["step"])
|
|
79
|
+
latest_node = nodes_sorted_ascending[-1]
|
|
80
|
+
penultimate_node = nodes_sorted_ascending[-2]
|
|
81
|
+
# If the server finished generating the next candidate, it should be exactly this step
|
|
82
|
+
if latest_node and latest_node["step"] == step:
|
|
83
|
+
# Try to reconstruct the expected response from the /suggest endpoint using the run status info
|
|
84
|
+
return {
|
|
85
|
+
"run_id": run_id,
|
|
86
|
+
"previous_solution_metric_value": penultimate_node.get("metric_value"),
|
|
87
|
+
"solution_id": latest_node.get("solution_id"),
|
|
88
|
+
"code": latest_node.get("code"),
|
|
89
|
+
"plan": latest_node.get("plan"),
|
|
90
|
+
"is_done": False,
|
|
91
|
+
}
|
|
92
|
+
return None
|
|
93
|
+
|
|
94
|
+
|
|
51
95
|
def start_optimization_run(
|
|
52
96
|
console: Console,
|
|
53
97
|
source_code: str,
|
|
@@ -64,7 +108,7 @@ def start_optimization_run(
|
|
|
64
108
|
save_logs: bool = False,
|
|
65
109
|
log_dir: str = ".runs",
|
|
66
110
|
auth_headers: dict = {},
|
|
67
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
111
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
68
112
|
) -> Optional[Dict[str, Any]]:
|
|
69
113
|
"""Start the optimization run."""
|
|
70
114
|
with console.status("[bold green]Starting Optimization..."):
|
|
@@ -107,7 +151,7 @@ def start_optimization_run(
|
|
|
107
151
|
|
|
108
152
|
|
|
109
153
|
def resume_optimization_run(
|
|
110
|
-
console: Console, run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] =
|
|
154
|
+
console: Console, run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] = (5, 10)
|
|
111
155
|
) -> Optional[Dict[str, Any]]:
|
|
112
156
|
"""Request the backend to resume an interrupted run."""
|
|
113
157
|
with console.status("[bold green]Resuming run..."):
|
|
@@ -136,7 +180,7 @@ def evaluate_feedback_then_suggest_next_solution(
|
|
|
136
180
|
execution_output: str,
|
|
137
181
|
additional_instructions: str = None,
|
|
138
182
|
auth_headers: dict = {},
|
|
139
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
183
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
140
184
|
) -> Dict[str, Any]:
|
|
141
185
|
"""Evaluate the feedback and suggest the next solution."""
|
|
142
186
|
try:
|
|
@@ -158,44 +202,38 @@ def evaluate_feedback_then_suggest_next_solution(
|
|
|
158
202
|
result["code"] = ""
|
|
159
203
|
return result
|
|
160
204
|
except requests.exceptions.ReadTimeout as e:
|
|
161
|
-
#
|
|
162
|
-
# 1
|
|
163
|
-
# 2
|
|
164
|
-
#
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
# client didn't receive the response and timed out
|
|
168
|
-
run_status_recovery_response = get_optimization_run_status(
|
|
169
|
-
console=console, run_id=run_id, include_history=True, auth_headers=auth_headers
|
|
205
|
+
# ReadTimeout can mean either:
|
|
206
|
+
# 1) the server truly didn't finish before the client's read timeout, or
|
|
207
|
+
# 2) the server finished but an intermediary (proxy/LB) dropped the response.
|
|
208
|
+
# We only try to recover case (2): fetch run status to confirm the step completed and reconstruct the response.
|
|
209
|
+
recovered = _recover_suggest_after_transport_error(
|
|
210
|
+
console=console, run_id=run_id, step=step, auth_headers=auth_headers
|
|
170
211
|
)
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
#
|
|
174
|
-
is_valid_run_state = current_status is not None and current_status == "running"
|
|
175
|
-
is_valid_step = current_step is not None and current_step == step
|
|
176
|
-
if is_valid_run_state and is_valid_step:
|
|
177
|
-
nodes = run_status_recovery_response.get("nodes") or []
|
|
178
|
-
# We need at least 2 nodes to reconstruct the expected response i.e., the last two nodes
|
|
179
|
-
if len(nodes) >= 2:
|
|
180
|
-
nodes_sorted_ascending = sorted(nodes, key=lambda n: n["step"])
|
|
181
|
-
latest_node = nodes_sorted_ascending[-1]
|
|
182
|
-
penultimate_node = nodes_sorted_ascending[-2]
|
|
183
|
-
# If the server finished generating the next candidate, it should be exactly this step
|
|
184
|
-
if latest_node and latest_node["step"] == step:
|
|
185
|
-
# Try to reconstruct the expected response from the /suggest endpoint using the run status info
|
|
186
|
-
reconstructed_expected_response = {
|
|
187
|
-
"run_id": run_id,
|
|
188
|
-
"previous_solution_metric_value": penultimate_node.get("metric_value"),
|
|
189
|
-
"solution_id": latest_node.get("solution_id"),
|
|
190
|
-
"code": latest_node.get("code"),
|
|
191
|
-
"plan": latest_node.get("plan"),
|
|
192
|
-
"is_done": False,
|
|
193
|
-
}
|
|
194
|
-
return reconstructed_expected_response
|
|
195
|
-
# If we couldn't recover, raise the timeout error so the run can be resumed by the user
|
|
212
|
+
if recovered is not None:
|
|
213
|
+
return recovered
|
|
214
|
+
# If we cannot confirm completion, bubble up the timeout so the caller can resume later.
|
|
196
215
|
raise requests.exceptions.ReadTimeout(e)
|
|
197
216
|
except requests.exceptions.HTTPError as e:
|
|
198
|
-
#
|
|
217
|
+
# Treat only 502 Bad Gateway as a transient transport/gateway issue (akin to a dropped response).
|
|
218
|
+
# For 502, attempt the status-based recovery method used for ReadTimeout errors; otherwise render the HTTP error normally.
|
|
219
|
+
if (resp := getattr(e, "response", None)) is not None and resp.status_code == 502:
|
|
220
|
+
recovered = _recover_suggest_after_transport_error(
|
|
221
|
+
console=console, run_id=run_id, step=step, auth_headers=auth_headers
|
|
222
|
+
)
|
|
223
|
+
if recovered is not None:
|
|
224
|
+
return recovered
|
|
225
|
+
# Surface non-502 HTTP errors to the user.
|
|
226
|
+
handle_api_error(e, console)
|
|
227
|
+
raise
|
|
228
|
+
except requests.exceptions.ConnectionError as e:
|
|
229
|
+
# Covers connection resets with no HTTP response (e.g., RemoteDisconnected).
|
|
230
|
+
# Treat as a potential "response lost after completion": try status-based recovery first similar to how ReadTimeout errors are handled.
|
|
231
|
+
recovered = _recover_suggest_after_transport_error(
|
|
232
|
+
console=console, run_id=run_id, step=step, auth_headers=auth_headers
|
|
233
|
+
)
|
|
234
|
+
if recovered is not None:
|
|
235
|
+
return recovered
|
|
236
|
+
# Surface the connection error to the user.
|
|
199
237
|
handle_api_error(e, console)
|
|
200
238
|
raise
|
|
201
239
|
except Exception as e:
|
|
@@ -208,7 +246,7 @@ def get_optimization_run_status(
|
|
|
208
246
|
run_id: str,
|
|
209
247
|
include_history: bool = False,
|
|
210
248
|
auth_headers: dict = {},
|
|
211
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
249
|
+
timeout: Union[int, Tuple[int, int]] = (5, 10),
|
|
212
250
|
) -> Dict[str, Any]:
|
|
213
251
|
"""Get the current status of the optimization run."""
|
|
214
252
|
try:
|
|
@@ -239,7 +277,7 @@ def get_optimization_run_status(
|
|
|
239
277
|
raise
|
|
240
278
|
|
|
241
279
|
|
|
242
|
-
def send_heartbeat(run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] = (
|
|
280
|
+
def send_heartbeat(run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] = (5, 10)) -> bool:
|
|
243
281
|
"""Send a heartbeat signal to the backend."""
|
|
244
282
|
try:
|
|
245
283
|
response = requests.put(f"{__base_url__}/runs/{run_id}/heartbeat", headers=auth_headers, timeout=timeout)
|
|
@@ -262,7 +300,7 @@ def report_termination(
|
|
|
262
300
|
reason: str,
|
|
263
301
|
details: Optional[str] = None,
|
|
264
302
|
auth_headers: dict = {},
|
|
265
|
-
timeout: Union[int, Tuple[int, int]] = (
|
|
303
|
+
timeout: Union[int, Tuple[int, int]] = (5, 10),
|
|
266
304
|
) -> bool:
|
|
267
305
|
"""Report the termination reason to the backend."""
|
|
268
306
|
try:
|
|
@@ -285,7 +323,7 @@ def get_optimization_suggestions_from_codebase(
|
|
|
285
323
|
gitingest_tree: str,
|
|
286
324
|
gitingest_content_str: str,
|
|
287
325
|
auth_headers: dict = {},
|
|
288
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
326
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
289
327
|
) -> Optional[List[Dict[str, Any]]]:
|
|
290
328
|
"""Analyze codebase and get optimization suggestions using the model-agnostic backend API."""
|
|
291
329
|
try:
|
|
@@ -320,7 +358,7 @@ def generate_evaluation_script_and_metrics(
|
|
|
320
358
|
description: str,
|
|
321
359
|
gitingest_content_str: str,
|
|
322
360
|
auth_headers: dict = {},
|
|
323
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
361
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
324
362
|
) -> Tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:
|
|
325
363
|
"""Generate evaluation script and determine metrics using the model-agnostic backend API."""
|
|
326
364
|
try:
|
|
@@ -356,7 +394,7 @@ def analyze_evaluation_environment(
|
|
|
356
394
|
gitingest_tree: str,
|
|
357
395
|
gitingest_content_str: str,
|
|
358
396
|
auth_headers: dict = {},
|
|
359
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
397
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
360
398
|
) -> Optional[Dict[str, Any]]:
|
|
361
399
|
"""Analyze existing evaluation scripts and environment using the model-agnostic backend API."""
|
|
362
400
|
try:
|
|
@@ -392,7 +430,7 @@ def analyze_script_execution_requirements(
|
|
|
392
430
|
script_path: str,
|
|
393
431
|
target_file: str,
|
|
394
432
|
auth_headers: dict = {},
|
|
395
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
433
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
396
434
|
) -> Optional[str]:
|
|
397
435
|
"""Analyze script to determine proper execution command using the model-agnostic backend API."""
|
|
398
436
|
try:
|
|
@@ -3,10 +3,6 @@
|
|
|
3
3
|
Constants for the Weco CLI package.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
-
# API timeout configuration (connect_timeout, read_timeout) in seconds
|
|
7
|
-
CODEGEN_API_TIMEOUT = (30, 3650)
|
|
8
|
-
STATUS_API_TIMEOUT = (10, 180)
|
|
9
|
-
|
|
10
6
|
# Output truncation configuration
|
|
11
7
|
TRUNCATION_THRESHOLD = 51000 # Maximum length before truncation
|
|
12
8
|
TRUNCATION_KEEP_LENGTH = 25000 # Characters to keep from beginning and end
|
|
@@ -168,7 +168,6 @@ def execute_optimization(
|
|
|
168
168
|
reason=f"user_terminated_{signal_name.lower()}",
|
|
169
169
|
details=f"Process terminated by signal {signal_name} ({signum}).",
|
|
170
170
|
auth_headers=current_auth_headers_for_heartbeat,
|
|
171
|
-
timeout=3,
|
|
172
171
|
)
|
|
173
172
|
console.print(f"\n[cyan]To resume this run, use:[/] [bold cyan]weco resume {current_run_id_for_heartbeat}[/]")
|
|
174
173
|
|
|
@@ -547,7 +546,6 @@ def resume_optimization(run_id: str, console: Optional[Console] = None) -> bool:
|
|
|
547
546
|
reason=f"user_terminated_{signal_name.lower()}",
|
|
548
547
|
details=f"Process terminated by signal {signal_name} ({signum}).",
|
|
549
548
|
auth_headers=current_auth_headers_for_heartbeat,
|
|
550
|
-
timeout=3,
|
|
551
549
|
)
|
|
552
550
|
console.print(f"\n[cyan]To resume this run, use:[/] [bold cyan]weco resume {current_run_id_for_heartbeat}[/]")
|
|
553
551
|
sys.exit(0)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|