weco 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
weco/api.py CHANGED
@@ -178,7 +178,6 @@ def evaluate_feedback_then_suggest_next_solution(
178
178
  run_id: str,
179
179
  step: int,
180
180
  execution_output: str,
181
- additional_instructions: str = None,
182
181
  auth_headers: dict = {},
183
182
  timeout: Union[int, Tuple[int, int]] = (10, 3650),
184
183
  ) -> Dict[str, Any]:
@@ -189,7 +188,7 @@ def evaluate_feedback_then_suggest_next_solution(
189
188
 
190
189
  response = requests.post(
191
190
  f"{__base_url__}/runs/{run_id}/suggest",
192
- json={"execution_output": truncated_output, "additional_instructions": additional_instructions, "metadata": {}},
191
+ json={"execution_output": truncated_output, "metadata": {}},
193
192
  headers=auth_headers,
194
193
  timeout=timeout,
195
194
  )
weco/cli.py CHANGED
@@ -49,7 +49,7 @@ def configure_run_parser(run_parser: argparse.ArgumentParser) -> None:
49
49
  "--model",
50
50
  type=str,
51
51
  default=None,
52
- help="Model to use for optimization. Defaults to `o4-mini` when `OPENAI_API_KEY` is set, `claude-sonnet-4-0` when `ANTHROPIC_API_KEY` is set, and `gemini-2.5-pro` when `GEMINI_API_KEY` is set. When multiple keys are set, the priority is `OPENAI_API_KEY` > `ANTHROPIC_API_KEY` > `GEMINI_API_KEY`.",
52
+ help="Model to use for optimization. Defaults to `o4-mini`. See full list at https://docs.weco.ai/cli/supported-models",
53
53
  )
54
54
  run_parser.add_argument(
55
55
  "-l", "--log-dir", type=str, default=".runs", help="Directory to store logs and results. Defaults to `.runs`."
@@ -164,7 +164,7 @@ def main() -> None:
164
164
  "--model",
165
165
  type=str,
166
166
  default=None,
167
- help="Model to use for optimization. Defaults to `o4-mini` when `OPENAI_API_KEY` is set, `claude-sonnet-4-0` when `ANTHROPIC_API_KEY` is set, and `gemini-2.5-pro` when `GEMINI_API_KEY` is set. When multiple keys are set, the priority is `OPENAI_API_KEY` > `ANTHROPIC_API_KEY` > `GEMINI_API_KEY`.",
167
+ help="Model to use for optimization. Defaults to `o4-mini`. See full list at docs.weco.ai/cli/supported-models",
168
168
  )
169
169
 
170
170
  subparsers = parser.add_subparsers(
weco/optimizer.py CHANGED
@@ -337,8 +337,6 @@ def execute_optimization(
337
337
 
338
338
  # Starting from step 1 to steps (inclusive) because the baseline solution is step 0, so we want to optimize for steps worth of steps
339
339
  for step in range(1, steps + 1):
340
- # Re-read instructions from the original source (file path or string) BEFORE each suggest call
341
- current_additional_instructions = read_additional_instructions(additional_instructions=additional_instructions)
342
340
  if run_id:
343
341
  try:
344
342
  current_status_response = get_optimization_run_status(
@@ -356,12 +354,7 @@ def execute_optimization(
356
354
 
357
355
  # Send feedback and get next suggestion
358
356
  eval_and_next_solution_response = evaluate_feedback_then_suggest_next_solution(
359
- console=console,
360
- step=step,
361
- run_id=run_id,
362
- execution_output=term_out,
363
- additional_instructions=current_additional_instructions,
364
- auth_headers=auth_headers,
357
+ console=console, step=step, run_id=run_id, execution_output=term_out, auth_headers=auth_headers
365
358
  )
366
359
  # Save next solution (.runs/<run-id>/step_<step>.<extension>)
367
360
  write_to_path(fp=runs_dir / f"step_{step}{source_fp.suffix}", content=eval_and_next_solution_response["code"])
@@ -415,16 +408,9 @@ def execute_optimization(
415
408
  )
416
409
 
417
410
  if not user_stop_requested_flag:
418
- # Re-read instructions from the original source (file path or string) BEFORE each suggest call
419
- current_additional_instructions = read_additional_instructions(additional_instructions=additional_instructions)
420
411
  # Evaluate the final solution thats been generated
421
412
  eval_and_next_solution_response = evaluate_feedback_then_suggest_next_solution(
422
- console=console,
423
- step=steps,
424
- run_id=run_id,
425
- execution_output=term_out,
426
- additional_instructions=current_additional_instructions,
427
- auth_headers=auth_headers,
413
+ console=console, step=steps, run_id=run_id, execution_output=term_out, auth_headers=auth_headers
428
414
  )
429
415
  summary_panel.set_step(step=steps)
430
416
  status_response = get_optimization_run_status(
@@ -632,7 +618,6 @@ def resume_optimization(run_id: str, console: Optional[Console] = None) -> bool:
632
618
  log_dir = resume_resp.get("log_dir", ".runs")
633
619
  save_logs = bool(resume_resp.get("save_logs", False))
634
620
  eval_timeout = resume_resp.get("eval_timeout")
635
- additional_instructions = resume_resp.get("additional_instructions")
636
621
 
637
622
  # Write last solution code to source path
638
623
  source_fp = pathlib.Path(source_path)
@@ -739,7 +724,6 @@ def resume_optimization(run_id: str, console: Optional[Console] = None) -> bool:
739
724
  step=step,
740
725
  run_id=resume_resp["run_id"],
741
726
  execution_output=term_out,
742
- additional_instructions=additional_instructions,
743
727
  auth_headers=auth_headers,
744
728
  )
745
729
 
@@ -795,7 +779,6 @@ def resume_optimization(run_id: str, console: Optional[Console] = None) -> bool:
795
779
  step=total_steps,
796
780
  run_id=resume_resp["run_id"],
797
781
  execution_output=term_out,
798
- additional_instructions=additional_instructions,
799
782
  auth_headers=auth_headers,
800
783
  )
801
784
  summary_panel.set_step(step=total_steps)
weco/utils.py CHANGED
@@ -2,13 +2,13 @@ from typing import Any, Dict, List, Tuple, Union
2
2
  import json
3
3
  import time
4
4
  import subprocess
5
+ import psutil
5
6
  from rich.layout import Layout
6
7
  from rich.live import Live
7
8
  from rich.panel import Panel
8
9
  import pathlib
9
10
  import requests
10
11
  from packaging.version import parse as parse_version
11
-
12
12
  from .constants import TRUNCATION_THRESHOLD, TRUNCATION_KEEP_LENGTH, DEFAULT_MODEL, SUPPORTED_FILE_EXTENSIONS
13
13
 
14
14
 
@@ -108,22 +108,52 @@ def truncate_output(output: str) -> str:
108
108
 
109
109
  def run_evaluation(eval_command: str, timeout: int | None = None) -> str:
110
110
  """Run the evaluation command on the code and return the output."""
111
+ process = subprocess.Popen(
112
+ eval_command, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True
113
+ )
111
114
 
112
- # Run the eval command as is
113
115
  try:
114
- result = subprocess.run(eval_command, shell=True, capture_output=True, text=True, check=False, timeout=timeout)
115
- # Combine stdout and stderr for complete output
116
- output = result.stderr if result.stderr else ""
117
- if result.stdout:
118
- if len(output) > 0:
119
- output += "\n"
120
- output += result.stdout
121
- return output # Return full output, no truncation
116
+ # NOTE: Process tree cleanup only happens on timeout. Normal completion relies on the OS/shell to clean up child processes, which works for typical evaluation scripts.
117
+ output, _ = process.communicate(timeout=timeout)
118
+ return output
119
+
122
120
  except subprocess.TimeoutExpired:
121
+ # Kill process tree
122
+ try:
123
+ parent = psutil.Process(process.pid)
124
+ children = parent.children(recursive=True)
125
+
126
+ # Terminate gracefully
127
+ for child in children:
128
+ try:
129
+ child.terminate()
130
+ except psutil.NoSuchProcess:
131
+ pass
132
+ try:
133
+ parent.terminate()
134
+ except psutil.NoSuchProcess:
135
+ pass
136
+
137
+ # Wait, then force kill survivors
138
+ _, alive = psutil.wait_procs(children + [parent], timeout=1)
139
+ for proc in alive:
140
+ try:
141
+ proc.kill()
142
+ except psutil.NoSuchProcess:
143
+ pass
144
+
145
+ except psutil.NoSuchProcess:
146
+ pass
147
+
148
+ # Drain pipes
149
+ try:
150
+ process.communicate(timeout=1)
151
+ except (subprocess.TimeoutExpired, ValueError, OSError):
152
+ pass
153
+
123
154
  return f"Evaluation timed out after {'an unspecified duration' if timeout is None else f'{timeout} seconds'}."
124
155
 
125
156
 
126
- # Update Check Function
127
157
  def check_for_cli_updates():
128
158
  """Checks PyPI for a newer version of the weco package and notifies the user."""
129
159
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: weco
3
- Version: 0.3.1
3
+ Version: 0.3.3
4
4
  Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
5
5
  Author-email: Weco AI Team <contact@weco.ai>
6
6
  License:
@@ -219,6 +219,7 @@ Requires-Dist: packaging
219
219
  Requires-Dist: gitingest
220
220
  Requires-Dist: fastapi
221
221
  Requires-Dist: slowapi
222
+ Requires-Dist: psutil
222
223
  Provides-Extra: dev
223
224
  Requires-Dist: ruff; extra == "dev"
224
225
  Requires-Dist: build; extra == "dev"
@@ -229,7 +230,7 @@ Dynamic: license-file
229
230
 
230
231
  <div align="center">
231
232
  <img src="assets/weco.svg" alt="Weco Logo" width="120" height="120" style="margin-bottom: 20px;">
232
- <h1>Weco: The Platform for Self-Improving Code</h1>
233
+ <h1>Weco: The Code Optimization Agent</h1>
233
234
  </div>
234
235
 
235
236
  [![Python](https://img.shields.io/badge/Python-3.8.0+-blue)](https://www.python.org)
@@ -237,7 +238,7 @@ Dynamic: license-file
237
238
  [![docs](https://img.shields.io/website?url=https://docs.weco.ai/&label=docs)](https://docs.weco.ai/)
238
239
  [![PyPI Downloads](https://static.pepy.tech/badge/weco?color=4c1)](https://pepy.tech/projects/weco)
239
240
  [![arXiv on AIDE](https://img.shields.io/badge/arXiv-AIDE-b31b1b?logo=arxiv&logoColor=white)](https://arxiv.org/abs/2502.13138)
240
- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg?labelColor=ffffff&color=F17E01)](https://colab.research.google.com/github/WecoAI/weco-cli/blob/main/examples/hello-kernel-world/colab_notebook_walkthrough.ipynb)
241
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg?labelColor=ffffff&color=F17E01)](https://colab.research.google.com/github/WecoAI/weco-cli/blob/main/examples/hello-world/colab_notebook_walkthrough.ipynb)
241
242
 
242
243
  `pip install weco`
243
244
 
@@ -261,77 +262,32 @@ Example applications include:
261
262
 
262
263
  The `weco` CLI leverages a tree search approach guided by LLMs to iteratively explore and refine your code. It automatically applies changes, runs your evaluation script, parses the results, and proposes further improvements based on the specified goal.
263
264
 
264
- ![image](https://github.com/user-attachments/assets/a6ed63fa-9c40-498e-aa98-a873e5786509)
265
265
 
266
- ---
267
-
268
- ## Setup
269
-
270
- 1. **Install the Package:**
271
-
272
- ```bash
273
- pip install weco
274
- ```
275
-
276
- 2. **Authenticate (Required):**
277
-
278
- `weco` now uses a **credit-based billing system** with centralized LLM access. You need to authenticate to use the service:
279
-
280
- - **Run the CLI**: `weco` will prompt you to authenticate via your web browser
281
- - **Free Credits**: New users receive **free credits** upon signup
282
- - **Centralized Keys**: All LLM provider API keys are managed by Weco (no BYOK required)
283
- - **Credit Top-ups**: Purchase additional credits through the dashboard at [dashboard.weco.ai](https://dashboard.weco.ai)
284
-
285
- ---
286
-
287
- ## Get Started
288
-
289
- ### Quick Start (Recommended for New Users)
290
-
291
- The easiest way to get started with Weco is to use the **interactive copilot**. Simply navigate to your project directory and run:
292
-
293
- ```bash
294
- weco
295
- ```
296
-
297
- Or specify a project path:
266
+ ## Install the Package
298
267
 
299
268
  ```bash
300
- weco /path/to/your/project
269
+ pip install weco
301
270
  ```
302
271
 
303
- This launches Weco's interactive copilot that will:
272
+ ## Getting Started
304
273
 
305
- 1. **Analyze your codebase** using AI to understand your project structure and identify optimization opportunities
306
- 2. **Suggest specific optimizations** tailored to your code (e.g., GPU kernel optimization, model improvements, prompt engineering)
307
- 3. **Generate evaluation scripts** automatically or help you configure existing ones
308
- 4. **Set up the complete optimization pipeline** with appropriate metrics and commands
309
- 5. **Run the optimization** or provide you with the exact command to execute
310
-
311
- <div style="background-color: #fff3cd; border: 1px solid #ffeeba; padding: 15px; border-radius: 4px; margin-bottom: 15px;">
312
- <strong>⚠️ Warning: Code Modification</strong><br>
313
- <code>weco</code> directly modifies the file specified by <code>--source</code> during the optimization process. It is <strong>strongly recommended</strong> to use version control (like Git) to track changes and revert if needed. Alternatively, ensure you have a backup of your original file before running the command. Upon completion, the file will contain the best-performing version of the code found during the run.
314
- </div>
315
-
316
- ### Manual Setup
274
+ ### Quickstart with an example project
317
275
 
318
276
  **Configure optimization parameters yourself** - If you need precise control over the optimization parameters, you can use the direct `weco run` command:
319
277
 
320
278
  **Example: Optimizing Simple PyTorch Operations**
321
279
 
322
280
  ```bash
323
- # Navigate to the example directory
324
- cd examples/hello-kernel-world
325
-
326
- # Install dependencies
327
- pip install torch
281
+ git clone https://github.com/WecoAI/weco-cli.git
282
+ cd weco-cli/examples/hello-world/
283
+ pip install -r requirements.txt
328
284
 
329
- # Run Weco with manual configuration
330
- weco run --source optimize.py \
331
- --eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
285
+ # Run Weco with configuration
286
+ weco run --source module.py \
287
+ --eval-command "python evaluate.py --path module.py" \
332
288
  --metric speedup \
333
289
  --goal maximize \
334
- --steps 15 \
290
+ --steps 10 \
335
291
  --additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
336
292
  ```
337
293
 
@@ -361,7 +317,7 @@ For more advanced examples, including [Triton](/examples/triton/README.md), [CUD
361
317
  | Argument | Description | Default | Example |
362
318
  | :----------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------ | :------------------ |
363
319
  | `-n, --steps` | Number of optimization steps (LLM iterations) to run. | 100 | `-n 50` |
364
- | `-M, --model` | Model identifier for the LLM to use (e.g., `o4-mini`, `claude-sonnet-4-0`). | `o4-mini` | `-M o4-mini` |
320
+ | `-M, --model` | Model identifier for the LLM to use (e.g., `o4-mini`, `claude-sonnet-4-5`, `gpt-5`). | `o4-mini` | `-M o4-mini` |
365
321
  | `-i, --additional-instructions`| Natural language description of specific instructions **or** path to a file containing detailed instructions to guide the LLM. Supported file formats include - `.txt`, `.md`, and `.rst`. | `None` | `-i instructions.md` or `-i "Optimize the model for faster inference"`|
366
322
  | `-l, --log-dir` | Path to the directory to log intermediate steps and final optimization result. | `.runs/` | `-l ./logs/` |
367
323
  | `--eval-timeout` | Timeout in seconds for each step in evaluation. | No timeout (unlimited) | `--eval-timeout 3600` |
@@ -369,37 +325,12 @@ For more advanced examples, including [Triton](/examples/triton/README.md), [CUD
369
325
 
370
326
  ---
371
327
 
372
- ### Authentication & Dashboard
373
-
374
- The CLI requires a Weco account for authentication and billing.
375
-
376
- #### Credit-Based Authentication (Required)
377
- Weco now requires authentication for all operations. This enables our credit-based billing system and provides access to powerful optimizations:
378
-
379
- 1. **During onboarding**: When you run `weco` for the first time, you'll be prompted to log in
380
- 2. **Manual login**: Use `weco logout` to clear credentials, then run `weco` again to re-authenticate
381
- 3. **Device flow**: Weco will open your browser automatically and guide you through a secure OAuth-style authentication
382
-
383
- ![image (16)](https://github.com/user-attachments/assets/8a0a285b-4894-46fa-b6a2-4990017ca0c6)
384
-
385
- **Benefits:**
386
- - **No API Key Management**: All LLM provider keys are managed centrally
387
- - **Cost Transparency**: See exactly how many credits each optimization consumes
388
- - **Free Trial**: Free credits to get started with optimization projects
389
- - **Run History**: View all your optimization runs on the Weco dashboard
390
- - **Progress Tracking**: Monitor long-running optimizations remotely
391
- - **Budget Control**: Set spending limits and auto top-up preferences
392
-
393
- ---
394
-
395
328
  ## Command Reference
396
329
 
397
330
  ### Basic Usage Patterns
398
331
 
399
332
  | Command | Description | When to Use |
400
333
  |---------|-------------|-------------|
401
- | `weco` | Launch interactive onboarding | **Recommended for beginners** - Analyzes your codebase and guides you through setup |
402
- | `weco /path/to/project` | Launch onboarding for specific project | When working with a project in a different directory |
403
334
  | `weco run [options]` | Direct optimization execution | **For advanced users** - When you know exactly what to optimize and how |
404
335
  | `weco resume <run-id>` | Resume an interrupted run | Continue from the last completed step |
405
336
  | `weco logout` | Clear authentication credentials | To switch accounts or troubleshoot authentication issues |
@@ -409,19 +340,23 @@ Weco now requires authentication for all operations. This enables our credit-bas
409
340
  You can specify which LLM model to use with the `-M` or `--model` flag:
410
341
 
411
342
  ```bash
412
- # Use with onboarding
413
- weco --model gpt-4o
414
-
415
- # Use with direct execution
416
- weco run --model claude-3.5-sonnet --source optimize.py [other options...]
343
+ weco run --model gpt-5 --source optimize.py [other options...]
417
344
  ```
418
345
 
419
- **Available models:**
420
- - `o4-mini`, `o3-mini`, `gpt-4o` (OpenAI models)
421
- - `claude-sonnet-4-0`, `claude-opus-4-0` (Anthropic models)
422
- - `gemini-2.5-pro`, `gemini-2.5-flash` (Google models)
346
+ **Available models (30 total):**
347
+
348
+ **OpenAI Models:**
349
+ - GPT-5 Series: `gpt-5.1`, `gpt-5.1-codex`, `gpt-5.1-codex-mini`, `gpt-5-codex`, `gpt-5-pro`, `gpt-5`, `gpt-5-mini`, `gpt-5-nano`
350
+ - O-Series Reasoning: `o3-pro`, `o3`, `o3-mini`, `o4-mini`, `o1-pro`, `o1`, `codex-mini-latest`
351
+ - GPT-4 Series: `gpt-4.1`, `gpt-4.1-mini`, `gpt-4.1-nano`, `gpt-4o`, `gpt-4o-mini`
352
+
353
+ **Anthropic Claude (via Vertex AI):**
354
+ - `claude-opus-4-5`, `claude-opus-4-1`, `claude-opus-4`, `claude-sonnet-4-5`, `claude-sonnet-4`, `claude-haiku-4-5`
355
+
356
+ **Google Gemini:**
357
+ - `gemini-3-pro-preview`, `gemini-2.5-pro`, `gemini-2.5-flash`, `gemini-2.5-flash-lite`
423
358
 
424
- All models are available through Weco's centralized system. If no model is specified, Weco automatically selects the best model for your optimization task.
359
+ All models are available through Weco. If no model is specified, Weco automatically selects the best model for your optimization task.
425
360
 
426
361
  ---
427
362
 
@@ -0,0 +1,16 @@
1
+ weco/__init__.py,sha256=ClO0uT6GKOA0iSptvP0xbtdycf0VpoPTq37jHtvlhtw,303
2
+ weco/api.py,sha256=zKcI4riwruK6CjV_vcL8RlsJGRXO40iP0WxeETtzPIY,18430
3
+ weco/auth.py,sha256=O31Hoj-Loi8DWJJG2LfeWgUMuNqAUeGDpd2ZGjA9Ah0,9997
4
+ weco/chatbot.py,sha256=EIK2WaOul9gn_yHLThjsZV7RnE8t3XQPwgRkO5tybSU,38415
5
+ weco/cli.py,sha256=5TusCKQ8o4CUdqxOSkvWyjMzt86O9sj21SJoeU0Ni5w,11098
6
+ weco/constants.py,sha256=V6yFugTznKm5EC2_jr4I_whd7sqI80HiPggRn0az580,406
7
+ weco/credits.py,sha256=C08x-TRcLg3ccfKqMGNRY7zBn7t3r7LZ119bxgfztaI,7629
8
+ weco/optimizer.py,sha256=nOKFmwPdFLcQ7RF4ielsD7iRfPMxvGr07pS9ocbW9C8,41282
9
+ weco/panels.py,sha256=fnGPtmvxpx21AuBCtCFu1f_BpSxybNr2lhjIIKIutrY,16133
10
+ weco/utils.py,sha256=erDDrA_g3KSlel6YEAGALlV_k8ftT-VQnPT1BrmzK8k,7021
11
+ weco-0.3.3.dist-info/licenses/LICENSE,sha256=9LUfoGHjLPtak2zps2kL2tm65HAZIICx_FbLaRuS4KU,11337
12
+ weco-0.3.3.dist-info/METADATA,sha256=aAE9oMp_SKJiQSG189k7fsb5ovvidj2t1DTuqNfvc68,28700
13
+ weco-0.3.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
+ weco-0.3.3.dist-info/entry_points.txt,sha256=ixJ2uClALbCpBvnIR6BXMNck8SHAab8eVkM9pIUowcs,39
15
+ weco-0.3.3.dist-info/top_level.txt,sha256=F0N7v6e2zBSlsorFv-arAq2yDxQbzX3KVO8GxYhPUeE,5
16
+ weco-0.3.3.dist-info/RECORD,,
@@ -1,16 +0,0 @@
1
- weco/__init__.py,sha256=ClO0uT6GKOA0iSptvP0xbtdycf0VpoPTq37jHtvlhtw,303
2
- weco/api.py,sha256=dUjzuOKKvayzZ_1B4j40eK9Ofk264jsc6vOR1afsszY,18523
3
- weco/auth.py,sha256=O31Hoj-Loi8DWJJG2LfeWgUMuNqAUeGDpd2ZGjA9Ah0,9997
4
- weco/chatbot.py,sha256=EIK2WaOul9gn_yHLThjsZV7RnE8t3XQPwgRkO5tybSU,38415
5
- weco/cli.py,sha256=579f6jf-ZWuFAmNXDisRY7zWr7vw2YZQuC_QX8-qxx0,11460
6
- weco/constants.py,sha256=V6yFugTznKm5EC2_jr4I_whd7sqI80HiPggRn0az580,406
7
- weco/credits.py,sha256=C08x-TRcLg3ccfKqMGNRY7zBn7t3r7LZ119bxgfztaI,7629
8
- weco/optimizer.py,sha256=mJU8_0bo_6dS2PEj1E3dQHvNH9V4e8NSLNE55tmvspw,42291
9
- weco/panels.py,sha256=fnGPtmvxpx21AuBCtCFu1f_BpSxybNr2lhjIIKIutrY,16133
10
- weco/utils.py,sha256=TT57S0YGMuMWPFNsn0tcexNHZd-kBEjDeiOLWxANiQU,6117
11
- weco-0.3.1.dist-info/licenses/LICENSE,sha256=9LUfoGHjLPtak2zps2kL2tm65HAZIICx_FbLaRuS4KU,11337
12
- weco-0.3.1.dist-info/METADATA,sha256=e5xozCmFPB7ih2ntFNYQAMXAU_O8Kw3NDSiRhaNEu4c,31856
13
- weco-0.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
- weco-0.3.1.dist-info/entry_points.txt,sha256=ixJ2uClALbCpBvnIR6BXMNck8SHAab8eVkM9pIUowcs,39
15
- weco-0.3.1.dist-info/top_level.txt,sha256=F0N7v6e2zBSlsorFv-arAq2yDxQbzX3KVO8GxYhPUeE,5
16
- weco-0.3.1.dist-info/RECORD,,
File without changes