weco 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
weco/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # DO NOT EDIT
2
- __pkg_version__ = "0.2.6"
2
+ __pkg_version__ = "0.2.8"
3
3
  __api_version__ = "v1"
4
4
  __base_url__ = f"https://api.aide.weco.ai/{__api_version__}"
weco/api.py CHANGED
@@ -6,14 +6,9 @@ import sys
6
6
 
7
7
 
8
8
  def handle_api_error(e: requests.exceptions.HTTPError, console: rich.console.Console) -> None:
9
- """Extract and display error messages from API responses."""
10
- try:
11
- error_data = e.response.json()
12
- error_message = error_data.get("detail", str(e))
13
- console.print(f"[bold red]Server Error:[/] {error_message}")
14
- except Exception:
15
- # If we can't parse the JSON, just show the original error
16
- console.print(f"[bold red]Server Error:[/] {str(e)}")
9
+ """Extract and display error messages from API responses in a structured format."""
10
+ error_message = str(e) # Default message
11
+ console.print(f"[bold red]Error:[/] {error_message}")
17
12
  sys.exit(1)
18
13
 
19
14
 
weco/cli.py CHANGED
@@ -36,7 +36,7 @@ def main() -> None:
36
36
  parser = argparse.ArgumentParser(
37
37
  description="[bold cyan]Weco CLI[/]", formatter_class=argparse.RawDescriptionHelpFormatter
38
38
  )
39
- parser.add_argument("--source", type=str, required=True, help="Path to the Python source code (e.g. optimize.py)")
39
+ parser.add_argument("--source", type=str, required=True, help="Path to the source code (e.g. optimize.py)")
40
40
  parser.add_argument(
41
41
  "--eval-command", type=str, required=True, help="Command to run for evaluation (e.g. 'python eval.py --arg1=val1')"
42
42
  )
@@ -50,6 +50,7 @@ def main() -> None:
50
50
  )
51
51
  parser.add_argument("--steps", type=int, required=True, help="Number of steps to run")
52
52
  parser.add_argument("--model", type=str, required=True, help="Model to use for optimization")
53
+ parser.add_argument("--log-dir", type=str, default=".runs", help="Directory to store logs and results")
53
54
  parser.add_argument(
54
55
  "--additional-instructions",
55
56
  default=None,
@@ -83,9 +84,11 @@ def main() -> None:
83
84
  timeout = 800
84
85
 
85
86
  # Initialize panels
86
- summary_panel = SummaryPanel(maximize=maximize, metric_name=metric_name, total_steps=steps, model=args.model)
87
+ summary_panel = SummaryPanel(
88
+ maximize=maximize, metric_name=metric_name, total_steps=steps, model=args.model, runs_dir=args.log_dir
89
+ )
87
90
  plan_panel = PlanPanel()
88
- solution_panels = SolutionPanels(metric_name=metric_name)
91
+ solution_panels = SolutionPanels(metric_name=metric_name, source_fp=source_fp)
89
92
  eval_output_panel = EvaluationOutputPanel()
90
93
  tree_panel = MetricTreePanel(maximize=maximize)
91
94
  layout = create_optimization_layout()
@@ -112,11 +115,11 @@ def main() -> None:
112
115
  with Live(layout, refresh_per_second=refresh_rate, screen=True) as live:
113
116
  # Define the runs directory (.runs/<session-id>)
114
117
  session_id = session_response["session_id"]
115
- runs_dir = pathlib.Path(".runs") / session_id
118
+ runs_dir = pathlib.Path(args.log_dir) / session_id
116
119
  runs_dir.mkdir(parents=True, exist_ok=True)
117
120
 
118
- # Save the original code (.runs/<session-id>/original.py)
119
- runs_copy_source_fp = runs_dir / "original.py"
121
+ # Save the original code (.runs/<session-id>/original.<extension>)
122
+ runs_copy_source_fp = runs_dir / f"original.{source_fp.suffix}"
120
123
  write_to_path(fp=runs_copy_source_fp, content=source_code)
121
124
 
122
125
  # Write the code string to the source file path
@@ -197,8 +200,8 @@ def main() -> None:
197
200
  api_keys=api_keys,
198
201
  timeout=timeout,
199
202
  )
200
- # Save next solution (.runs/<session-id>/step_<step>.py)
201
- write_to_path(fp=runs_dir / f"step_{step}.py", content=eval_and_next_solution_response["code"])
203
+ # Save next solution (.runs/<session-id>/step_<step>.<extension>)
204
+ write_to_path(fp=runs_dir / f"step_{step}.{source_fp.suffix}", content=eval_and_next_solution_response["code"])
202
205
 
203
206
  # Write the next solution to the source file
204
207
  write_to_path(fp=source_fp, content=eval_and_next_solution_response["code"])
@@ -348,8 +351,8 @@ def main() -> None:
348
351
  )
349
352
  best_solution_content = f"# Best solution from Weco with a score of {best_score_str}\n\n{best_solution_code}"
350
353
 
351
- # Save best solution to .runs/<session-id>/best.py
352
- write_to_path(fp=runs_dir / "best.py", content=best_solution_content)
354
+ # Save best solution to .runs/<session-id>/best.<extension>
355
+ write_to_path(fp=runs_dir / f"best.{source_fp.suffix}", content=best_solution_content)
353
356
 
354
357
  # write the best solution to the source file
355
358
  write_to_path(fp=source_fp, content=best_solution_content)
weco/panels.py CHANGED
@@ -6,12 +6,13 @@ from rich.panel import Panel
6
6
  from rich.syntax import Syntax
7
7
  from typing import Dict, List, Optional, Union, Tuple
8
8
  from .utils import format_number
9
+ import pathlib
9
10
 
10
11
 
11
12
  class SummaryPanel:
12
13
  """Holds a summary of the optimization session."""
13
14
 
14
- def __init__(self, maximize: bool, metric_name: str, total_steps: int, model: str, session_id: str = None):
15
+ def __init__(self, maximize: bool, metric_name: str, total_steps: int, model: str, runs_dir: str, session_id: str = None):
15
16
  self.maximize = maximize
16
17
  self.metric_name = metric_name
17
18
  self.goal = ("Maximizing" if self.maximize else "Minimizing") + f" {self.metric_name}..."
@@ -19,7 +20,8 @@ class SummaryPanel:
19
20
  self.total_output_tokens = 0
20
21
  self.total_steps = total_steps
21
22
  self.model = model
22
- self.session_id = session_id or "N/A"
23
+ self.runs_dir = runs_dir
24
+ self.session_id = session_id if session_id is not None else "N/A"
23
25
  self.progress = Progress(
24
26
  TextColumn("[progress.description]{task.description}"),
25
27
  BarColumn(bar_width=20),
@@ -45,6 +47,8 @@ class SummaryPanel:
45
47
  """Create a summary panel with the relevant information."""
46
48
  layout = Layout(name="summary")
47
49
  summary_table = Table(show_header=False, box=None, padding=(0, 1))
50
+
51
+ summary_table.add_row("")
48
52
  # Goal
49
53
  if final_message is not None:
50
54
  summary_table.add_row(f"[bold cyan]Result:[/] {final_message}")
@@ -55,8 +59,7 @@ class SummaryPanel:
55
59
  summary_table.add_row(f"[bold cyan]Model:[/] {self.model}")
56
60
  summary_table.add_row("")
57
61
  # Log directory
58
- runs_dir = f".runs/{self.session_id}"
59
- summary_table.add_row(f"[bold cyan]Logs:[/] [blue underline]{runs_dir}[/]")
62
+ summary_table.add_row(f"[bold cyan]Logs:[/] [blue underline]{self.runs_dir}/{self.session_id}[/]")
60
63
  summary_table.add_row("")
61
64
  # Token counts
62
65
  summary_table.add_row(
@@ -256,13 +259,19 @@ class EvaluationOutputPanel:
256
259
  class SolutionPanels:
257
260
  """Displays the current and best solutions side by side."""
258
261
 
259
- def __init__(self, metric_name: str):
262
+ def __init__(self, metric_name: str, source_fp: pathlib.Path):
260
263
  # Current solution
261
264
  self.current_node = None
262
265
  # Best solution
263
266
  self.best_node = None
264
267
  # Metric name
265
268
  self.metric_name = metric_name.capitalize()
269
+ # Determine the lexer for the source file
270
+ self.lexer = self._determine_lexer(source_fp)
271
+
272
+ def _determine_lexer(self, source_fp: pathlib.Path) -> str:
273
+ """Determine the lexer for the source file."""
274
+ return Syntax.from_path(source_fp).lexer
266
275
 
267
276
  def update(self, current_node: Union[Node, None], best_node: Union[Node, None]):
268
277
  """Update the current and best solutions."""
@@ -280,7 +289,7 @@ class SolutionPanels:
280
289
  # Current solution (without score)
281
290
  current_title = f"[bold]💡 Current Solution (Step {current_step})"
282
291
  current_panel = Panel(
283
- Syntax(str(current_code), "python", theme="monokai", line_numbers=True, word_wrap=False),
292
+ Syntax(str(current_code), self.lexer, theme="monokai", line_numbers=True, word_wrap=False),
284
293
  title=current_title,
285
294
  border_style="yellow",
286
295
  expand=True,
@@ -290,7 +299,7 @@ class SolutionPanels:
290
299
  # Best solution
291
300
  best_title = f"[bold]🏆 Best Solution ([green]{self.metric_name}: {f'{best_score:.4f}' if best_score is not None else 'N/A'}[/])"
292
301
  best_panel = Panel(
293
- Syntax(str(best_code), "python", theme="monokai", line_numbers=True, word_wrap=False),
302
+ Syntax(str(best_code), self.lexer, theme="monokai", line_numbers=True, word_wrap=False),
294
303
  title=best_title,
295
304
  border_style="green",
296
305
  expand=True,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: weco
3
- Version: 0.2.6
3
+ Version: 0.2.8
4
4
  Summary: Documentation for `weco`, a CLI for using Weco AI's code optimizer.
5
5
  Author-email: Weco AI Team <contact@weco.ai>
6
6
  License: MIT
@@ -9,7 +9,7 @@ Keywords: AI,Code Optimization,Code Generation
9
9
  Classifier: Programming Language :: Python :: 3
10
10
  Classifier: Operating System :: OS Independent
11
11
  Classifier: License :: OSI Approved :: MIT License
12
- Requires-Python: >=3.12
12
+ Requires-Python: >=3.8
13
13
  Description-Content-Type: text/markdown
14
14
  License-File: LICENSE
15
15
  Requires-Dist: requests
@@ -20,13 +20,19 @@ Requires-Dist: build; extra == "dev"
20
20
  Requires-Dist: setuptools_scm; extra == "dev"
21
21
  Dynamic: license-file
22
22
 
23
- # Weco CLI Code Optimizer for Machine Learning Engineers
23
+ # Weco: The Evaluation-Driven AI Code Optimizer
24
24
 
25
25
  [![Python](https://img.shields.io/badge/Python-3.12.0-blue)](https://www.python.org)
26
- [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE)
27
26
  [![PyPI version](https://badge.fury.io/py/weco.svg)](https://badge.fury.io/py/weco)
27
+ [![AIDE](https://img.shields.io/badge/AI--Driven_Exploration-arXiv-orange?style=flat-square&logo=arxiv)](https://arxiv.org/abs/2502.13138)
28
28
 
29
- `weco` is a command-line interface for interacting with Weco AI's code optimizer, powered by [AI-Driven Exploration](https://arxiv.org/abs/2502.13138). It helps you automate the improvement of your code for tasks like GPU kernel optimization, feature engineering, model development, and prompt engineering.
29
+ Weco systematically optimizes your code, guided directly by your evaluation metrics.
30
+
31
+ Example applications include:
32
+
33
+ - **GPU Kernel Optimization**: Reimplement PyTorch functions using CUDA, Triton or Metal, optimizing for `latency`, `throughput`, or `memory_bandwidth`.
34
+ - **Model Development**: Tune feature transformations or architectures, optimizing for `validation_accuracy`, `AUC`, or `Sharpe Ratio`.
35
+ - **Prompt Engineering**: Refine prompts for LLMs, optimizing for `win_rate`, `relevance`, or `format_adherence`
30
36
 
31
37
  https://github.com/user-attachments/assets/cb724ef1-bff6-4757-b457-d3b2201ede81
32
38
 
@@ -40,37 +46,6 @@ The `weco` CLI leverages a tree search approach guided by Large Language Models
40
46
 
41
47
  ---
42
48
 
43
- ## Example Use Cases
44
-
45
- Here's how `weco` can be applied to common ML engineering tasks:
46
-
47
- * **GPU Kernel Optimization:**
48
- * **Goal:** Improve the speed or efficiency of low-level GPU code.
49
- * **How:** `weco` iteratively refines CUDA, Triton, Metal, or other kernel code specified in your `--source` file.
50
- * **`--eval-command`:** Typically runs a script that compiles the kernel, executes it, and benchmarks performance (e.g., latency, throughput).
51
- * **`--metric`:** Examples include `latency`, `throughput`, `TFLOPS`, `memory_bandwidth`. Optimize to `minimize` latency or `maximize` throughput.
52
-
53
- * **Feature Engineering:**
54
- * **Goal:** Discover better data transformations or feature combinations for your machine learning models.
55
- * **How:** `weco` explores different processing steps or parameters within your feature transformation code (`--source`).
56
- * **`--eval-command`:** Executes a script that applies the features, trains/validates a model using those features, and prints a performance score.
57
- * **`--metric`:** Examples include `accuracy`, `AUC`, `F1-score`, `validation_loss`. Usually optimized to `maximize` accuracy/AUC/F1 or `minimize` loss.
58
-
59
- * **Model Development:**
60
- * **Goal:** Tune hyperparameters or experiment with small architectural changes directly within your model's code.
61
- * **How:** `weco` modifies hyperparameter values (like learning rate, layer sizes if defined in the code) or structural elements in your model definition (`--source`).
62
- * **`--eval-command`:** Runs your model training and evaluation script, printing the key performance indicator.
63
- * **`--metric`:** Examples include `validation_accuracy`, `test_loss`, `inference_time`, `perplexity`. Optimize according to the metric's nature (e.g., `maximize` accuracy, `minimize` loss).
64
-
65
- * **Prompt Engineering:**
66
- * **Goal:** Refine prompts used within larger systems (e.g., for LLM interactions) to achieve better or more consistent outputs.
67
- * **How:** `weco` modifies prompt templates, examples, or instructions stored in the `--source` file.
68
- * **`--eval-command`:** Executes a script that uses the prompt, generates an output, evaluates that output against desired criteria (e.g., using another LLM, checking for keywords, format validation), and prints a score.
69
- * **`--metric`:** Examples include `quality_score`, `relevance`, `task_success_rate`, `format_adherence`. Usually optimized to `maximize`.
70
-
71
- ---
72
-
73
-
74
49
  ## Setup
75
50
 
76
51
  1. **Install the Package:**
@@ -97,70 +72,30 @@ Here's how `weco` can be applied to common ML engineering tasks:
97
72
 
98
73
  ---
99
74
 
100
- ### Examples
75
+ ### Example: Optimizing Simple PyTorch Operations
76
+
77
+ This basic example shows how to optimize a simple PyTorch function for speedup.
101
78
 
102
- **Example 1: Optimizing PyTorch simple operations**
79
+ For more advanced examples, including **[Metal/MLX](/examples/metal/README.md), [Triton](/examples/triton/README.md), [CUDA kernel optimization](/examples/cuda/README.md)**, and **[ML model optimization](/examples/spaceship-titanic/README.md)t**, please see the `README.md` files within the corresponding subdirectories under the [`examples/`](./examples/) folder.
103
80
 
104
81
  ```bash
82
+ # Navigate to the example directory
105
83
  cd examples/hello-kernel-world
106
- pip install torch
84
+
85
+ # Install dependencies
86
+ pip install torch
87
+
88
+ # Run Weco
107
89
  weco --source optimize.py \
108
90
  --eval-command "python evaluate.py --solution-path optimize.py --device cpu" \
109
91
  --metric speedup \
110
92
  --maximize true \
111
93
  --steps 15 \
112
- --model claude-3-7-sonnet-20250219 \
94
+ --model gemini-2.5-pro-exp-03-25 \
113
95
  --additional-instructions "Fuse operations in the forward method while ensuring the max float deviation remains small. Maintain the same format of the code."
114
96
  ```
115
97
 
116
- Note that if you have an NVIDIA gpu, change the device to `cuda`. If you are running this on Apple Silicon, set it to `mps`.
117
-
118
- **Example 2: Optimizing MLX operations with instructions from a file**
119
-
120
- Lets optimize a 2D convolution operation in [`mlx`](https://github.com/ml-explore/mlx) using [Metal](https://developer.apple.com/documentation/metal/). Sometimes, additional context or instructions are too complex for a single command-line string. You can provide a path to a file containing these instructions.
121
-
122
- ```bash
123
- cd examples/metal
124
- pip install mlx
125
- weco --source optimize.py \
126
- --eval-command "python evaluate.py --solution-path optimize.py" \
127
- --metric speedup \
128
- --maximize true \
129
- --steps 30 \
130
- --model o3-mini \
131
- --additional-instructions examples.rst
132
- ```
133
-
134
- **Example 3: Level Agnostic Optimization: Causal Self Attention with Triton & CUDA**
135
-
136
- Given how useful causal multihead self attention is to transformers, we've seen its wide adoption across ML engineering and AI research. Its great to keep things at a high-level (in PyTorch) when doing research, but when moving to production you often need to write highly customized low-level kernels to make things run as fast as they can. The `weco` CLI can optimize kernels across a variety of different abstraction levels and frameworks. Example 2 uses Metal but lets explore two more frameworks:
137
-
138
- 1. [Triton](https://github.com/triton-lang/triton)
139
- ```bash
140
- cd examples/triton
141
- pip install torch triton
142
- weco --source optimize.py \
143
- --eval-command "python evaluate.py --solution-path optimize.py" \
144
- --metric speedup \
145
- --maximize true \
146
- --steps 30 \
147
- --model gemini-2.5-pro-preview-03-25 \
148
- --additional-instructions "Use triton to optimize the code while ensuring a small max float diff. Maintain the same code format."
149
- ```
150
-
151
- 2. [CUDA](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html)
152
- ```bash
153
- cd examples/cuda
154
- pip install torch
155
- weco --source optimize.py \
156
- --eval-command "python evaluate.py --solution-path optimize.py" \
157
- --metric speedup \
158
- --maximize true \
159
- --steps 30 \
160
- --model gemini-2.5-pro-preview-03-25 \
161
- --additional-instructions guide.md
162
- ```
163
-
98
+ **Note:** If you have an NVIDIA GPU, change the device in the `--eval-command` to `cuda`. If you are running this on Apple Silicon, set it to `mps`.
164
99
 
165
100
  ---
166
101
 
@@ -169,16 +104,28 @@ Given how useful causal multihead self attention is to transformers, we've seen
169
104
  | Argument | Description | Required |
170
105
  | :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- |
171
106
  | `--source` | Path to the source code file that will be optimized (e.g., `optimize.py`). | Yes |
172
- | `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
173
- | `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
107
+ | `--eval-command` | Command to run for evaluating the code in `--source`. This command should print the target `--metric` and its value to the terminal (stdout/stderr). See note below. | Yes |
108
+ | `--metric` | The name of the metric you want to optimize (e.g., 'accuracy', 'speedup', 'loss'). This metric name should match what's printed by your `--eval-command`. | Yes |
174
109
  | `--maximize` | Whether to maximize (`true`) or minimize (`false`) the metric. | Yes |
175
110
  | `--steps` | Number of optimization steps (LLM iterations) to run. | Yes |
176
- | `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`. | Yes |
177
- | `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
111
+ | `--model` | Model identifier for the LLM to use (e.g., `gpt-4o`, `claude-3.5-sonnet`). Recommended models to try include `o3-mini`, `claude-3-haiku`, and `gemini-2.5-pro-exp-03-25`.| Yes |
112
+ | `--additional-instructions` | (Optional) Natural language description of specific instructions OR path to a file containing detailed instructions to guide the LLM. | No |
113
+ | `--log-dir` | (Optional) Path to the directory to log intermediate steps and final optimization result. Defaults to `.runs/`. | No |
178
114
 
179
115
  ---
180
116
 
117
+ ### Performance & Expectations
118
+
119
+ Weco, powered by the AIDE algorithm, optimizes code iteratively based on your evaluation results. Achieving significant improvements, especially on complex research-level tasks, often requires substantial exploration time.
181
120
 
121
+ The following plot from the independent [Research Engineering Benchmark (RE-Bench)](https://metr.org/AI_R_D_Evaluation_Report.pdf) report shows the performance of AIDE (the algorithm behind Weco) on challenging ML research engineering tasks over different time budgets.
122
+ <p align="center">
123
+ <img src="https://github.com/user-attachments/assets/ff0e471d-2f50-4e2d-b718-874862f533df" alt="RE-Bench Performance Across Time" width="60%"/>
124
+ </p>
125
+
126
+ As shown, AIDE demonstrates strong performance gains over time, surpassing lower human expert percentiles within hours and continuing to improve. This highlights the potential of evaluation-driven optimization but also indicates that reaching high levels of performance comparable to human experts on difficult benchmarks can take considerable time (tens of hours in this specific benchmark, corresponding to many `--steps` in the Weco CLI). Factor this into your planning when setting the number of `--steps` for your optimization runs.
127
+
128
+ ---
182
129
 
183
130
  ### Important Note on Evaluation
184
131
 
@@ -0,0 +1,11 @@
1
+ weco/__init__.py,sha256=VUGnYC55MzHzmhg_IT0RRJ3QJDE3D2QFmvwGhhJ8hwA,124
2
+ weco/api.py,sha256=89lB2572jApAxkA0DDppDnJKBwvZTa3kH9jFpC0LFDQ,3313
3
+ weco/cli.py,sha256=8WotHdSRGP5GwLucEQltiMRH8zVrGB84n8o2mhUaCco,17408
4
+ weco/panels.py,sha256=R_df-VAbWyLoqCA9A6UzbIGZ9sm2IgJO4idnyjmrHQk,12701
5
+ weco/utils.py,sha256=hhIebUPnetFMfNSFfcsKVw1TSpeu_Zw3rBPPnxDie0U,3911
6
+ weco-0.2.8.dist-info/licenses/LICENSE,sha256=p_GQqJBvuZgkLNboYKyH-5dhpTDlKs2wq2TVM55WrWE,1065
7
+ weco-0.2.8.dist-info/METADATA,sha256=5KLO8utu8ItVkPmJ6uLy3NHCUpLsRit_OvyT70Sp7XI,9179
8
+ weco-0.2.8.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
9
+ weco-0.2.8.dist-info/entry_points.txt,sha256=ixJ2uClALbCpBvnIR6BXMNck8SHAab8eVkM9pIUowcs,39
10
+ weco-0.2.8.dist-info/top_level.txt,sha256=F0N7v6e2zBSlsorFv-arAq2yDxQbzX3KVO8GxYhPUeE,5
11
+ weco-0.2.8.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- weco/__init__.py,sha256=a3RxrwZhsuinalG_NtT0maKLFXFbgmgXFahpFgcEtZQ,124
2
- weco/api.py,sha256=8rIf2Fy3tN6GW7BG1CaggtfE9pW56I1erzwLCgawcVE,3511
3
- weco/cli.py,sha256=6rGEm_L-WSkJIT-jgfFmf2i_DXkQn6ILhqYQlptLFew,17159
4
- weco/panels.py,sha256=9gq5C43hgUmQgl6tW-f2dBbDjlsBKBatSaUVKeGm4Zw,12296
5
- weco/utils.py,sha256=hhIebUPnetFMfNSFfcsKVw1TSpeu_Zw3rBPPnxDie0U,3911
6
- weco-0.2.6.dist-info/licenses/LICENSE,sha256=p_GQqJBvuZgkLNboYKyH-5dhpTDlKs2wq2TVM55WrWE,1065
7
- weco-0.2.6.dist-info/METADATA,sha256=Ih-nkHxq_SJKccYXoVHmxytElqG75BSU1DGAAC9ipkk,11581
8
- weco-0.2.6.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
9
- weco-0.2.6.dist-info/entry_points.txt,sha256=ixJ2uClALbCpBvnIR6BXMNck8SHAab8eVkM9pIUowcs,39
10
- weco-0.2.6.dist-info/top_level.txt,sha256=F0N7v6e2zBSlsorFv-arAq2yDxQbzX3KVO8GxYhPUeE,5
11
- weco-0.2.6.dist-info/RECORD,,
File without changes