weco 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- weco/api.py +84 -46
- weco/constants.py +0 -4
- weco/optimizer.py +0 -2
- weco/utils.py +41 -11
- {weco-0.3.0.dist-info → weco-0.3.2.dist-info}/METADATA +2 -1
- weco-0.3.2.dist-info/RECORD +16 -0
- weco-0.3.0.dist-info/RECORD +0 -16
- {weco-0.3.0.dist-info → weco-0.3.2.dist-info}/WHEEL +0 -0
- {weco-0.3.0.dist-info → weco-0.3.2.dist-info}/entry_points.txt +0 -0
- {weco-0.3.0.dist-info → weco-0.3.2.dist-info}/licenses/LICENSE +0 -0
- {weco-0.3.0.dist-info → weco-0.3.2.dist-info}/top_level.txt +0 -0
weco/api.py
CHANGED
|
@@ -4,7 +4,6 @@ import requests
|
|
|
4
4
|
from rich.console import Console
|
|
5
5
|
|
|
6
6
|
from weco import __pkg_version__, __base_url__
|
|
7
|
-
from .constants import CODEGEN_API_TIMEOUT, STATUS_API_TIMEOUT
|
|
8
7
|
from .utils import truncate_output, determine_model_for_onboarding
|
|
9
8
|
|
|
10
9
|
|
|
@@ -48,6 +47,51 @@ def handle_api_error(e: requests.exceptions.HTTPError, console: Console) -> None
|
|
|
48
47
|
_render(detail)
|
|
49
48
|
|
|
50
49
|
|
|
50
|
+
def _recover_suggest_after_transport_error(
|
|
51
|
+
console: Console, run_id: str, step: int, auth_headers: dict
|
|
52
|
+
) -> Optional[Dict[str, Any]]:
|
|
53
|
+
"""
|
|
54
|
+
Try to reconstruct the /suggest response after a transport error (ReadTimeout/502/RemoteDisconnected)
|
|
55
|
+
by fetching run status and using the latest nodes.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
console: The console object to use for logging.
|
|
59
|
+
run_id: The ID of the run to recover.
|
|
60
|
+
step: The step of the solution to recover.
|
|
61
|
+
auth_headers: The authentication headers to use for the request.
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
The recovered response if the run is in a valid state, otherwise None.
|
|
65
|
+
"""
|
|
66
|
+
run_status_recovery_response = get_optimization_run_status(
|
|
67
|
+
console=console, run_id=run_id, include_history=True, auth_headers=auth_headers
|
|
68
|
+
)
|
|
69
|
+
current_step = run_status_recovery_response.get("current_step")
|
|
70
|
+
current_status = run_status_recovery_response.get("status")
|
|
71
|
+
# The run should be "running" and the current step should correspond to the solution step we are attempting to generate
|
|
72
|
+
is_valid_run_state = current_status is not None and current_status == "running"
|
|
73
|
+
is_valid_step = current_step is not None and current_step == step
|
|
74
|
+
if is_valid_run_state and is_valid_step:
|
|
75
|
+
nodes = run_status_recovery_response.get("nodes") or []
|
|
76
|
+
# We need at least 2 nodes to reconstruct the expected response i.e., the last two nodes
|
|
77
|
+
if len(nodes) >= 2:
|
|
78
|
+
nodes_sorted_ascending = sorted(nodes, key=lambda n: n["step"])
|
|
79
|
+
latest_node = nodes_sorted_ascending[-1]
|
|
80
|
+
penultimate_node = nodes_sorted_ascending[-2]
|
|
81
|
+
# If the server finished generating the next candidate, it should be exactly this step
|
|
82
|
+
if latest_node and latest_node["step"] == step:
|
|
83
|
+
# Try to reconstruct the expected response from the /suggest endpoint using the run status info
|
|
84
|
+
return {
|
|
85
|
+
"run_id": run_id,
|
|
86
|
+
"previous_solution_metric_value": penultimate_node.get("metric_value"),
|
|
87
|
+
"solution_id": latest_node.get("solution_id"),
|
|
88
|
+
"code": latest_node.get("code"),
|
|
89
|
+
"plan": latest_node.get("plan"),
|
|
90
|
+
"is_done": False,
|
|
91
|
+
}
|
|
92
|
+
return None
|
|
93
|
+
|
|
94
|
+
|
|
51
95
|
def start_optimization_run(
|
|
52
96
|
console: Console,
|
|
53
97
|
source_code: str,
|
|
@@ -64,7 +108,7 @@ def start_optimization_run(
|
|
|
64
108
|
save_logs: bool = False,
|
|
65
109
|
log_dir: str = ".runs",
|
|
66
110
|
auth_headers: dict = {},
|
|
67
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
111
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
68
112
|
) -> Optional[Dict[str, Any]]:
|
|
69
113
|
"""Start the optimization run."""
|
|
70
114
|
with console.status("[bold green]Starting Optimization..."):
|
|
@@ -107,7 +151,7 @@ def start_optimization_run(
|
|
|
107
151
|
|
|
108
152
|
|
|
109
153
|
def resume_optimization_run(
|
|
110
|
-
console: Console, run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] =
|
|
154
|
+
console: Console, run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] = (5, 10)
|
|
111
155
|
) -> Optional[Dict[str, Any]]:
|
|
112
156
|
"""Request the backend to resume an interrupted run."""
|
|
113
157
|
with console.status("[bold green]Resuming run..."):
|
|
@@ -136,7 +180,7 @@ def evaluate_feedback_then_suggest_next_solution(
|
|
|
136
180
|
execution_output: str,
|
|
137
181
|
additional_instructions: str = None,
|
|
138
182
|
auth_headers: dict = {},
|
|
139
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
183
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
140
184
|
) -> Dict[str, Any]:
|
|
141
185
|
"""Evaluate the feedback and suggest the next solution."""
|
|
142
186
|
try:
|
|
@@ -158,44 +202,38 @@ def evaluate_feedback_then_suggest_next_solution(
|
|
|
158
202
|
result["code"] = ""
|
|
159
203
|
return result
|
|
160
204
|
except requests.exceptions.ReadTimeout as e:
|
|
161
|
-
#
|
|
162
|
-
# 1
|
|
163
|
-
# 2
|
|
164
|
-
#
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
# client didn't receive the response and timed out
|
|
168
|
-
run_status_recovery_response = get_optimization_run_status(
|
|
169
|
-
console=console, run_id=run_id, include_history=True, auth_headers=auth_headers
|
|
205
|
+
# ReadTimeout can mean either:
|
|
206
|
+
# 1) the server truly didn't finish before the client's read timeout, or
|
|
207
|
+
# 2) the server finished but an intermediary (proxy/LB) dropped the response.
|
|
208
|
+
# We only try to recover case (2): fetch run status to confirm the step completed and reconstruct the response.
|
|
209
|
+
recovered = _recover_suggest_after_transport_error(
|
|
210
|
+
console=console, run_id=run_id, step=step, auth_headers=auth_headers
|
|
170
211
|
)
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
#
|
|
174
|
-
is_valid_run_state = current_status is not None and current_status == "running"
|
|
175
|
-
is_valid_step = current_step is not None and current_step == step
|
|
176
|
-
if is_valid_run_state and is_valid_step:
|
|
177
|
-
nodes = run_status_recovery_response.get("nodes") or []
|
|
178
|
-
# We need at least 2 nodes to reconstruct the expected response i.e., the last two nodes
|
|
179
|
-
if len(nodes) >= 2:
|
|
180
|
-
nodes_sorted_ascending = sorted(nodes, key=lambda n: n["step"])
|
|
181
|
-
latest_node = nodes_sorted_ascending[-1]
|
|
182
|
-
penultimate_node = nodes_sorted_ascending[-2]
|
|
183
|
-
# If the server finished generating the next candidate, it should be exactly this step
|
|
184
|
-
if latest_node and latest_node["step"] == step:
|
|
185
|
-
# Try to reconstruct the expected response from the /suggest endpoint using the run status info
|
|
186
|
-
reconstructed_expected_response = {
|
|
187
|
-
"run_id": run_id,
|
|
188
|
-
"previous_solution_metric_value": penultimate_node.get("metric_value"),
|
|
189
|
-
"solution_id": latest_node.get("solution_id"),
|
|
190
|
-
"code": latest_node.get("code"),
|
|
191
|
-
"plan": latest_node.get("plan"),
|
|
192
|
-
"is_done": False,
|
|
193
|
-
}
|
|
194
|
-
return reconstructed_expected_response
|
|
195
|
-
# If we couldn't recover, raise the timeout error so the run can be resumed by the user
|
|
212
|
+
if recovered is not None:
|
|
213
|
+
return recovered
|
|
214
|
+
# If we cannot confirm completion, bubble up the timeout so the caller can resume later.
|
|
196
215
|
raise requests.exceptions.ReadTimeout(e)
|
|
197
216
|
except requests.exceptions.HTTPError as e:
|
|
198
|
-
#
|
|
217
|
+
# Treat only 502 Bad Gateway as a transient transport/gateway issue (akin to a dropped response).
|
|
218
|
+
# For 502, attempt the status-based recovery method used for ReadTimeout errors; otherwise render the HTTP error normally.
|
|
219
|
+
if (resp := getattr(e, "response", None)) is not None and resp.status_code == 502:
|
|
220
|
+
recovered = _recover_suggest_after_transport_error(
|
|
221
|
+
console=console, run_id=run_id, step=step, auth_headers=auth_headers
|
|
222
|
+
)
|
|
223
|
+
if recovered is not None:
|
|
224
|
+
return recovered
|
|
225
|
+
# Surface non-502 HTTP errors to the user.
|
|
226
|
+
handle_api_error(e, console)
|
|
227
|
+
raise
|
|
228
|
+
except requests.exceptions.ConnectionError as e:
|
|
229
|
+
# Covers connection resets with no HTTP response (e.g., RemoteDisconnected).
|
|
230
|
+
# Treat as a potential "response lost after completion": try status-based recovery first similar to how ReadTimeout errors are handled.
|
|
231
|
+
recovered = _recover_suggest_after_transport_error(
|
|
232
|
+
console=console, run_id=run_id, step=step, auth_headers=auth_headers
|
|
233
|
+
)
|
|
234
|
+
if recovered is not None:
|
|
235
|
+
return recovered
|
|
236
|
+
# Surface the connection error to the user.
|
|
199
237
|
handle_api_error(e, console)
|
|
200
238
|
raise
|
|
201
239
|
except Exception as e:
|
|
@@ -208,7 +246,7 @@ def get_optimization_run_status(
|
|
|
208
246
|
run_id: str,
|
|
209
247
|
include_history: bool = False,
|
|
210
248
|
auth_headers: dict = {},
|
|
211
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
249
|
+
timeout: Union[int, Tuple[int, int]] = (5, 10),
|
|
212
250
|
) -> Dict[str, Any]:
|
|
213
251
|
"""Get the current status of the optimization run."""
|
|
214
252
|
try:
|
|
@@ -239,7 +277,7 @@ def get_optimization_run_status(
|
|
|
239
277
|
raise
|
|
240
278
|
|
|
241
279
|
|
|
242
|
-
def send_heartbeat(run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] = (
|
|
280
|
+
def send_heartbeat(run_id: str, auth_headers: dict = {}, timeout: Union[int, Tuple[int, int]] = (5, 10)) -> bool:
|
|
243
281
|
"""Send a heartbeat signal to the backend."""
|
|
244
282
|
try:
|
|
245
283
|
response = requests.put(f"{__base_url__}/runs/{run_id}/heartbeat", headers=auth_headers, timeout=timeout)
|
|
@@ -262,7 +300,7 @@ def report_termination(
|
|
|
262
300
|
reason: str,
|
|
263
301
|
details: Optional[str] = None,
|
|
264
302
|
auth_headers: dict = {},
|
|
265
|
-
timeout: Union[int, Tuple[int, int]] = (
|
|
303
|
+
timeout: Union[int, Tuple[int, int]] = (5, 10),
|
|
266
304
|
) -> bool:
|
|
267
305
|
"""Report the termination reason to the backend."""
|
|
268
306
|
try:
|
|
@@ -285,7 +323,7 @@ def get_optimization_suggestions_from_codebase(
|
|
|
285
323
|
gitingest_tree: str,
|
|
286
324
|
gitingest_content_str: str,
|
|
287
325
|
auth_headers: dict = {},
|
|
288
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
326
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
289
327
|
) -> Optional[List[Dict[str, Any]]]:
|
|
290
328
|
"""Analyze codebase and get optimization suggestions using the model-agnostic backend API."""
|
|
291
329
|
try:
|
|
@@ -320,7 +358,7 @@ def generate_evaluation_script_and_metrics(
|
|
|
320
358
|
description: str,
|
|
321
359
|
gitingest_content_str: str,
|
|
322
360
|
auth_headers: dict = {},
|
|
323
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
361
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
324
362
|
) -> Tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:
|
|
325
363
|
"""Generate evaluation script and determine metrics using the model-agnostic backend API."""
|
|
326
364
|
try:
|
|
@@ -356,7 +394,7 @@ def analyze_evaluation_environment(
|
|
|
356
394
|
gitingest_tree: str,
|
|
357
395
|
gitingest_content_str: str,
|
|
358
396
|
auth_headers: dict = {},
|
|
359
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
397
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
360
398
|
) -> Optional[Dict[str, Any]]:
|
|
361
399
|
"""Analyze existing evaluation scripts and environment using the model-agnostic backend API."""
|
|
362
400
|
try:
|
|
@@ -392,7 +430,7 @@ def analyze_script_execution_requirements(
|
|
|
392
430
|
script_path: str,
|
|
393
431
|
target_file: str,
|
|
394
432
|
auth_headers: dict = {},
|
|
395
|
-
timeout: Union[int, Tuple[int, int]] =
|
|
433
|
+
timeout: Union[int, Tuple[int, int]] = (10, 3650),
|
|
396
434
|
) -> Optional[str]:
|
|
397
435
|
"""Analyze script to determine proper execution command using the model-agnostic backend API."""
|
|
398
436
|
try:
|
weco/constants.py
CHANGED
|
@@ -3,10 +3,6 @@
|
|
|
3
3
|
Constants for the Weco CLI package.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
-
# API timeout configuration (connect_timeout, read_timeout) in seconds
|
|
7
|
-
CODEGEN_API_TIMEOUT = (30, 3650)
|
|
8
|
-
STATUS_API_TIMEOUT = (10, 180)
|
|
9
|
-
|
|
10
6
|
# Output truncation configuration
|
|
11
7
|
TRUNCATION_THRESHOLD = 51000 # Maximum length before truncation
|
|
12
8
|
TRUNCATION_KEEP_LENGTH = 25000 # Characters to keep from beginning and end
|
weco/optimizer.py
CHANGED
|
@@ -168,7 +168,6 @@ def execute_optimization(
|
|
|
168
168
|
reason=f"user_terminated_{signal_name.lower()}",
|
|
169
169
|
details=f"Process terminated by signal {signal_name} ({signum}).",
|
|
170
170
|
auth_headers=current_auth_headers_for_heartbeat,
|
|
171
|
-
timeout=3,
|
|
172
171
|
)
|
|
173
172
|
console.print(f"\n[cyan]To resume this run, use:[/] [bold cyan]weco resume {current_run_id_for_heartbeat}[/]")
|
|
174
173
|
|
|
@@ -547,7 +546,6 @@ def resume_optimization(run_id: str, console: Optional[Console] = None) -> bool:
|
|
|
547
546
|
reason=f"user_terminated_{signal_name.lower()}",
|
|
548
547
|
details=f"Process terminated by signal {signal_name} ({signum}).",
|
|
549
548
|
auth_headers=current_auth_headers_for_heartbeat,
|
|
550
|
-
timeout=3,
|
|
551
549
|
)
|
|
552
550
|
console.print(f"\n[cyan]To resume this run, use:[/] [bold cyan]weco resume {current_run_id_for_heartbeat}[/]")
|
|
553
551
|
sys.exit(0)
|
weco/utils.py
CHANGED
|
@@ -2,13 +2,13 @@ from typing import Any, Dict, List, Tuple, Union
|
|
|
2
2
|
import json
|
|
3
3
|
import time
|
|
4
4
|
import subprocess
|
|
5
|
+
import psutil
|
|
5
6
|
from rich.layout import Layout
|
|
6
7
|
from rich.live import Live
|
|
7
8
|
from rich.panel import Panel
|
|
8
9
|
import pathlib
|
|
9
10
|
import requests
|
|
10
11
|
from packaging.version import parse as parse_version
|
|
11
|
-
|
|
12
12
|
from .constants import TRUNCATION_THRESHOLD, TRUNCATION_KEEP_LENGTH, DEFAULT_MODEL, SUPPORTED_FILE_EXTENSIONS
|
|
13
13
|
|
|
14
14
|
|
|
@@ -108,22 +108,52 @@ def truncate_output(output: str) -> str:
|
|
|
108
108
|
|
|
109
109
|
def run_evaluation(eval_command: str, timeout: int | None = None) -> str:
|
|
110
110
|
"""Run the evaluation command on the code and return the output."""
|
|
111
|
+
process = subprocess.Popen(
|
|
112
|
+
eval_command, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True
|
|
113
|
+
)
|
|
111
114
|
|
|
112
|
-
# Run the eval command as is
|
|
113
115
|
try:
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
output
|
|
117
|
-
|
|
118
|
-
if len(output) > 0:
|
|
119
|
-
output += "\n"
|
|
120
|
-
output += result.stdout
|
|
121
|
-
return output # Return full output, no truncation
|
|
116
|
+
# NOTE: Process tree cleanup only happens on timeout. Normal completion relies on the OS/shell to clean up child processes, which works for typical evaluation scripts.
|
|
117
|
+
output, _ = process.communicate(timeout=timeout)
|
|
118
|
+
return output
|
|
119
|
+
|
|
122
120
|
except subprocess.TimeoutExpired:
|
|
121
|
+
# Kill process tree
|
|
122
|
+
try:
|
|
123
|
+
parent = psutil.Process(process.pid)
|
|
124
|
+
children = parent.children(recursive=True)
|
|
125
|
+
|
|
126
|
+
# Terminate gracefully
|
|
127
|
+
for child in children:
|
|
128
|
+
try:
|
|
129
|
+
child.terminate()
|
|
130
|
+
except psutil.NoSuchProcess:
|
|
131
|
+
pass
|
|
132
|
+
try:
|
|
133
|
+
parent.terminate()
|
|
134
|
+
except psutil.NoSuchProcess:
|
|
135
|
+
pass
|
|
136
|
+
|
|
137
|
+
# Wait, then force kill survivors
|
|
138
|
+
_, alive = psutil.wait_procs(children + [parent], timeout=1)
|
|
139
|
+
for proc in alive:
|
|
140
|
+
try:
|
|
141
|
+
proc.kill()
|
|
142
|
+
except psutil.NoSuchProcess:
|
|
143
|
+
pass
|
|
144
|
+
|
|
145
|
+
except psutil.NoSuchProcess:
|
|
146
|
+
pass
|
|
147
|
+
|
|
148
|
+
# Drain pipes
|
|
149
|
+
try:
|
|
150
|
+
process.communicate(timeout=1)
|
|
151
|
+
except (subprocess.TimeoutExpired, ValueError, OSError):
|
|
152
|
+
pass
|
|
153
|
+
|
|
123
154
|
return f"Evaluation timed out after {'an unspecified duration' if timeout is None else f'{timeout} seconds'}."
|
|
124
155
|
|
|
125
156
|
|
|
126
|
-
# Update Check Function
|
|
127
157
|
def check_for_cli_updates():
|
|
128
158
|
"""Checks PyPI for a newer version of the weco package and notifies the user."""
|
|
129
159
|
try:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: weco
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.2
|
|
4
4
|
Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
|
|
5
5
|
Author-email: Weco AI Team <contact@weco.ai>
|
|
6
6
|
License:
|
|
@@ -219,6 +219,7 @@ Requires-Dist: packaging
|
|
|
219
219
|
Requires-Dist: gitingest
|
|
220
220
|
Requires-Dist: fastapi
|
|
221
221
|
Requires-Dist: slowapi
|
|
222
|
+
Requires-Dist: psutil
|
|
222
223
|
Provides-Extra: dev
|
|
223
224
|
Requires-Dist: ruff; extra == "dev"
|
|
224
225
|
Requires-Dist: build; extra == "dev"
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
weco/__init__.py,sha256=ClO0uT6GKOA0iSptvP0xbtdycf0VpoPTq37jHtvlhtw,303
|
|
2
|
+
weco/api.py,sha256=dUjzuOKKvayzZ_1B4j40eK9Ofk264jsc6vOR1afsszY,18523
|
|
3
|
+
weco/auth.py,sha256=O31Hoj-Loi8DWJJG2LfeWgUMuNqAUeGDpd2ZGjA9Ah0,9997
|
|
4
|
+
weco/chatbot.py,sha256=EIK2WaOul9gn_yHLThjsZV7RnE8t3XQPwgRkO5tybSU,38415
|
|
5
|
+
weco/cli.py,sha256=579f6jf-ZWuFAmNXDisRY7zWr7vw2YZQuC_QX8-qxx0,11460
|
|
6
|
+
weco/constants.py,sha256=V6yFugTznKm5EC2_jr4I_whd7sqI80HiPggRn0az580,406
|
|
7
|
+
weco/credits.py,sha256=C08x-TRcLg3ccfKqMGNRY7zBn7t3r7LZ119bxgfztaI,7629
|
|
8
|
+
weco/optimizer.py,sha256=mJU8_0bo_6dS2PEj1E3dQHvNH9V4e8NSLNE55tmvspw,42291
|
|
9
|
+
weco/panels.py,sha256=fnGPtmvxpx21AuBCtCFu1f_BpSxybNr2lhjIIKIutrY,16133
|
|
10
|
+
weco/utils.py,sha256=erDDrA_g3KSlel6YEAGALlV_k8ftT-VQnPT1BrmzK8k,7021
|
|
11
|
+
weco-0.3.2.dist-info/licenses/LICENSE,sha256=9LUfoGHjLPtak2zps2kL2tm65HAZIICx_FbLaRuS4KU,11337
|
|
12
|
+
weco-0.3.2.dist-info/METADATA,sha256=TDJIvT1vw3VFrjEj9o8VkLuxis2MWWAL0pnDYqpFfak,31878
|
|
13
|
+
weco-0.3.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
14
|
+
weco-0.3.2.dist-info/entry_points.txt,sha256=ixJ2uClALbCpBvnIR6BXMNck8SHAab8eVkM9pIUowcs,39
|
|
15
|
+
weco-0.3.2.dist-info/top_level.txt,sha256=F0N7v6e2zBSlsorFv-arAq2yDxQbzX3KVO8GxYhPUeE,5
|
|
16
|
+
weco-0.3.2.dist-info/RECORD,,
|
weco-0.3.0.dist-info/RECORD
DELETED
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
weco/__init__.py,sha256=ClO0uT6GKOA0iSptvP0xbtdycf0VpoPTq37jHtvlhtw,303
|
|
2
|
-
weco/api.py,sha256=JQ-zqRUMVuWP1eq9ojihMCHlX7ziQjrDCyJjgbqT5e4,16909
|
|
3
|
-
weco/auth.py,sha256=O31Hoj-Loi8DWJJG2LfeWgUMuNqAUeGDpd2ZGjA9Ah0,9997
|
|
4
|
-
weco/chatbot.py,sha256=EIK2WaOul9gn_yHLThjsZV7RnE8t3XQPwgRkO5tybSU,38415
|
|
5
|
-
weco/cli.py,sha256=579f6jf-ZWuFAmNXDisRY7zWr7vw2YZQuC_QX8-qxx0,11460
|
|
6
|
-
weco/constants.py,sha256=_0a50O4nRdkEwMhNIOi04bTrGhc06ZJeotjGae6t-l4,542
|
|
7
|
-
weco/credits.py,sha256=C08x-TRcLg3ccfKqMGNRY7zBn7t3r7LZ119bxgfztaI,7629
|
|
8
|
-
weco/optimizer.py,sha256=Tjnnx3Xq5ZcMggW8IMHhjsb3RwVrtdh7Z4rbJV0VjAs,42345
|
|
9
|
-
weco/panels.py,sha256=fnGPtmvxpx21AuBCtCFu1f_BpSxybNr2lhjIIKIutrY,16133
|
|
10
|
-
weco/utils.py,sha256=TT57S0YGMuMWPFNsn0tcexNHZd-kBEjDeiOLWxANiQU,6117
|
|
11
|
-
weco-0.3.0.dist-info/licenses/LICENSE,sha256=9LUfoGHjLPtak2zps2kL2tm65HAZIICx_FbLaRuS4KU,11337
|
|
12
|
-
weco-0.3.0.dist-info/METADATA,sha256=OOML7kA7wFKR6HWNt7D7xXa6TBySS8iMa_K6GwA7TNw,31856
|
|
13
|
-
weco-0.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
14
|
-
weco-0.3.0.dist-info/entry_points.txt,sha256=ixJ2uClALbCpBvnIR6BXMNck8SHAab8eVkM9pIUowcs,39
|
|
15
|
-
weco-0.3.0.dist-info/top_level.txt,sha256=F0N7v6e2zBSlsorFv-arAq2yDxQbzX3KVO8GxYhPUeE,5
|
|
16
|
-
weco-0.3.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|