coiled 1.127.1.dev3__py3-none-any.whl → 1.127.1.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of coiled might be problematic. Click here for more details.

coiled/batch.py CHANGED
@@ -17,6 +17,7 @@ def run(
17
17
  workspace: str | None = None,
18
18
  software: str | None = None,
19
19
  container: str | None = None,
20
+ run_on_host: bool | None = None,
20
21
  env: list | dict | None = None,
21
22
  secret_env: list | dict | None = None,
22
23
  tag: list | dict | None = None,
@@ -46,7 +47,11 @@ def run(
46
47
  package_sync_strict: bool = False,
47
48
  package_sync_conda_extras: list | None = None,
48
49
  package_sync_ignore: list[str] | None = None,
50
+ local_upload_path: str | None = None,
51
+ buffers_to_upload: list[dict] | None = None,
49
52
  host_setup_script: str | None = None,
53
+ host_setup_script_content: str | None = None,
54
+ command_as_script: bool | None = None,
50
55
  ignore_container_entrypoint: bool | None = None,
51
56
  job_timeout: str | None = None,
52
57
  logger=None,
@@ -61,8 +66,12 @@ def run(
61
66
  takes a list of dictionaries, so you can specify multiple environment variables for each task.
62
67
  For example, ``[{"FOO": 1, "BAR": 2}, {"FOO": 3, "BAR": 4}]`` will pass ``FOO=1 BAR=2`` to one task and
63
68
  ``FOO=3 BAR=4`` to another.
69
+ buffers_to_upload
70
+ takes a list of dictionaries, each should have path where file should be written on VM(s)
71
+ relative to working directory, and ``io.BytesIO`` which provides content of file,
72
+ for example ``[{"relative_path": "hello.txt", "buffer": io.BytesIO(b"hello")}]``.
64
73
  """
65
- if isinstance(command, str):
74
+ if isinstance(command, str) and not command.startswith("#!") and not command_as_script:
66
75
  command = shlex.split(command)
67
76
 
68
77
  env = dict_to_key_val_list(env)
@@ -76,6 +85,7 @@ def run(
76
85
  workspace=workspace,
77
86
  software=software,
78
87
  container=container,
88
+ run_on_host=run_on_host,
79
89
  env=env,
80
90
  secret_env=secret_env,
81
91
  tag=tag,
@@ -106,7 +116,11 @@ def run(
106
116
  package_sync_strict=package_sync_strict,
107
117
  package_sync_conda_extras=package_sync_conda_extras,
108
118
  package_sync_ignore=package_sync_ignore,
119
+ local_upload_path=local_upload_path,
120
+ buffers_to_upload=buffers_to_upload,
109
121
  host_setup_script=host_setup_script,
122
+ host_setup_script_content=host_setup_script_content,
123
+ command_as_script=command_as_script,
110
124
  ignore_container_entrypoint=ignore_container_entrypoint,
111
125
  job_timeout=job_timeout,
112
126
  logger=logger,
coiled/cli/batch/run.py CHANGED
@@ -503,6 +503,14 @@ def batch_run_cli(ctx, **kwargs):
503
503
 
504
504
  def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
505
505
  command = kwargs["command"]
506
+ user_files = []
507
+
508
+ if isinstance(command, str) and (command.startswith("#!") or kwargs.get("command_as_script")):
509
+ user_files.append({
510
+ "path": "script",
511
+ "content": command,
512
+ })
513
+ command = ["script"]
506
514
 
507
515
  # Handle command as string case (e.g. `coiled batch run "python myscript.py"`)
508
516
  if len(command) == 1:
@@ -524,7 +532,6 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
524
532
  # unescape escaped COILED env vars in command
525
533
  command = [part.replace("\\$COILED", "$COILED") for part in command]
526
534
 
527
- user_files = []
528
535
  kwargs_from_header = None
529
536
 
530
537
  # identify implicit files referenced in commands like "python foo.py" or "foo.sh"
@@ -715,8 +722,8 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
715
722
  if user_files_from_content:
716
723
  user_files.extend(user_files_from_content)
717
724
 
718
- host_setup_content = None
719
- if kwargs["host_setup_script"]:
725
+ host_setup_content = kwargs.get("host_setup_script_content")
726
+ if not host_setup_content and kwargs["host_setup_script"]:
720
727
  with open(kwargs["host_setup_script"]) as f:
721
728
  host_setup_content = f.read()
722
729
 
@@ -870,6 +877,7 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
870
877
  or kwargs.get("pipe_to_files")
871
878
  or kwargs.get("input_filestore")
872
879
  or kwargs.get("output_filestore")
880
+ or kwargs.get("buffers_to_upload")
873
881
  ):
874
882
  fs_base_name = kwargs["name"] or f"batch-job-{job_id}"
875
883
 
@@ -890,9 +898,11 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
890
898
  {"id": out_fs["id"], "output": True, "path": "/scratch/batch/", "primary": True},
891
899
  ])
892
900
 
893
- if kwargs.get("local_upload_path") or kwargs.get("local_sync_path"):
901
+ if kwargs.get("local_upload_path") or kwargs.get("local_sync_path") or kwargs.get("buffers_to_upload"):
894
902
  upload_to_filestore_with_ui(
895
- fs=in_fs, local_dir=kwargs.get("local_upload_path") or kwargs.get("local_sync_path")
903
+ fs=in_fs,
904
+ local_dir=kwargs.get("local_upload_path") or kwargs.get("local_sync_path"),
905
+ file_buffers=kwargs.get("buffers_to_upload"),
896
906
  )
897
907
 
898
908
  # Run the job on a cluster
coiled/filestore.py CHANGED
@@ -111,7 +111,7 @@ def download_from_filestore_with_ui(fs, into=".", name_includes=None):
111
111
  )
112
112
 
113
113
 
114
- def upload_to_filestore_with_ui(fs, local_dir):
114
+ def upload_to_filestore_with_ui(fs, local_dir, file_buffers=None):
115
115
  # TODO (future enhancement) send write status
116
116
  # this is tricky because status is stored on the "attachment" object, which might not exist yet
117
117
  # because we want to be able to upload files before cluster has been created
@@ -123,16 +123,24 @@ def upload_to_filestore_with_ui(fs, local_dir):
123
123
  Align.left(f"Currently uploading: [blue]{f or ''}[/blue]"),
124
124
  )
125
125
 
126
- if fs and local_dir:
127
- files, total_bytes = FilestoreManager.get_files_for_upload(local_dir)
126
+ files = []
127
+ total_bytes = None
128
128
 
129
+ if fs:
130
+ if local_dir:
131
+ files_from_path, total_bytes = FilestoreManager.get_files_for_upload(local_dir)
132
+ files.extend(files_from_path)
133
+ if file_buffers:
134
+ files.extend(file_buffers)
135
+
136
+ if files:
129
137
  size_label = "Bytes"
130
138
  size_scale = 1
131
139
 
132
- if total_bytes > 10_000_000:
140
+ if total_bytes and total_bytes > 10_000_000:
133
141
  size_label = "Mb"
134
142
  size_scale = 1_000_000
135
- elif total_bytes > 10_000:
143
+ elif total_bytes and total_bytes > 10_000:
136
144
  size_label = "Kb"
137
145
  size_scale = 1_000
138
146
 
@@ -146,7 +154,75 @@ def upload_to_filestore_with_ui(fs, local_dir):
146
154
  "label": size_label,
147
155
  "total": total_bytes / size_scale if size_scale > 1 else total_bytes,
148
156
  "completed": done_bytes / size_scale if size_scale > 1 else done_bytes,
149
- },
157
+ }
158
+ if total_bytes
159
+ else {},
160
+ ])
161
+
162
+ # files_for_upload is type list[dict] where each dict has "relative_path" key
163
+ upload_info = FilestoreManager.get_signed_upload_urls(fs["id"], files_for_upload=files)
164
+
165
+ upload_urls = upload_info.get("urls")
166
+ existing_blobs = upload_info.get("existing")
167
+
168
+ for file in files:
169
+ relative_path = file.get("relative_path")
170
+ local_path = file.get("local_path")
171
+ buffer = file.get("buffer")
172
+ if local_path:
173
+ size = file.get("size")
174
+ skip_upload = False
175
+ existing_blob_info = existing_blobs.get(relative_path)
176
+ if existing_blob_info:
177
+ modified = os.path.getmtime(local_path)
178
+ if size == existing_blob_info["size"] and modified < existing_blob_info["modified"]:
179
+ skip_upload = True
180
+
181
+ if not skip_upload:
182
+ progress.batch_title = progress_title(local_path)
183
+ progress.refresh()
184
+
185
+ FilestoreManager.upload_to_signed_url(local_path, upload_urls[relative_path])
186
+
187
+ done_bytes += size
188
+
189
+ elif buffer:
190
+ FilestoreManager.upload_bytes_to_signed_url(buffer, upload_urls[relative_path])
191
+
192
+ done_files += 1
193
+
194
+ progress.update_progress([
195
+ {"label": "Files", "total": len(files), "completed": done_files},
196
+ {
197
+ "label": size_label,
198
+ "total": total_bytes / size_scale if size_scale > 1 else total_bytes,
199
+ "completed": done_bytes / size_scale if size_scale > 1 else done_bytes,
200
+ }
201
+ if total_bytes
202
+ else {},
203
+ ])
204
+
205
+ progress.update_title(Align.left(f"Uploaded to cloud storage: [green]{fs['name']}[green]"))
206
+
207
+ # TODO (future enhancement) send write status
208
+ # FilestoreManager.post_fs_write_status(fs["id"], "finish", {"complete": True, "file_count": len(files)})
209
+
210
+ return len(files)
211
+
212
+
213
+ def upload_bytes_to_fs(fs, files):
214
+ def progress_title(f=None):
215
+ return Group(
216
+ Align.left(Status(f"Uploading to cloud storage: [green]{fs['name']}[green]", spinner="dots")),
217
+ Align.left(f"Currently uploading: [blue]{f or ''}[/blue]"),
218
+ )
219
+
220
+ if fs and files:
221
+ with coiled.utils.SimpleRichProgressPanel.from_defaults(title=progress_title()) as progress:
222
+ done_files = 0
223
+
224
+ progress.update_progress([
225
+ {"label": "Files", "total": len(files), "completed": done_files},
150
226
  ])
151
227
 
152
228
  upload_info = FilestoreManager.get_signed_upload_urls(fs["id"], files_for_upload=files)
@@ -154,7 +230,10 @@ def upload_to_filestore_with_ui(fs, local_dir):
154
230
  upload_urls = upload_info.get("urls")
155
231
  existing_blobs = upload_info.get("existing")
156
232
 
157
- for local_path, relative_path, size in files:
233
+ for file in files:
234
+ local_path = file.get("local_path")
235
+ relative_path = file.get("relative_path")
236
+ size = file.get("size")
158
237
  skip_upload = False
159
238
  existing_blob_info = existing_blobs.get(relative_path)
160
239
  if existing_blob_info:
@@ -169,15 +248,9 @@ def upload_to_filestore_with_ui(fs, local_dir):
169
248
  FilestoreManager.upload_to_signed_url(local_path, upload_urls[relative_path])
170
249
 
171
250
  done_files += 1
172
- done_bytes += size
173
251
 
174
252
  progress.update_progress([
175
253
  {"label": "Files", "total": len(files), "completed": done_files},
176
- {
177
- "label": size_label,
178
- "total": total_bytes / size_scale if size_scale > 1 else total_bytes,
179
- "completed": done_bytes / size_scale if size_scale > 1 else done_bytes,
180
- },
181
254
  ])
182
255
 
183
256
  progress.update_title(Align.left(f"Uploaded to cloud storage: [green]{fs['name']}[green]"))
@@ -230,7 +303,7 @@ class FilestoreManagerWithoutHttp:
230
303
 
231
304
  @classmethod
232
305
  def get_signed_upload_urls(cls, fs_id, files_for_upload):
233
- paths = [p for _, p, _ in files_for_upload] # relative paths
306
+ paths = [f["relative_path"] for f in files_for_upload] # relative paths
234
307
  return cls.make_req(f"/api/v2/filestore/fs/{fs_id}/signed-urls/upload", post=True, data={"paths": paths})
235
308
 
236
309
  @classmethod
@@ -269,7 +342,7 @@ class FilestoreManagerWithoutHttp:
269
342
  relative_path = Path(os.path.relpath(local_path, local_dir)).as_posix()
270
343
  size = os.path.getsize(local_path)
271
344
 
272
- files.append((local_path, relative_path, size))
345
+ files.append({"local_path": local_path, "relative_path": relative_path, "size": size})
273
346
  total_bytes += size
274
347
 
275
348
  return files, total_bytes
@@ -301,32 +374,36 @@ class FilestoreManagerWithoutHttp:
301
374
  relative_path = Path(os.path.relpath(local_path, local_dir)).as_posix()
302
375
  size = os.path.getsize(local_path)
303
376
 
304
- files.append((local_path, relative_path, size))
377
+ files.append({"local_path": local_path, "relative_path": relative_path, "size": size})
305
378
  total_bytes += size
306
379
  return files, total_bytes
307
380
 
308
381
  @classmethod
309
- def upload_to_signed_url(cls, local_path, url):
382
+ def upload_to_signed_url(cls, local_path: str, url: str):
310
383
  with open(local_path, "rb") as f:
311
384
  buffer = io.BytesIO(f.read())
312
- buffer.seek(0)
313
- num_bytes = len(buffer.getvalue())
314
- with httpx.Client(http2=cls.http2) as client:
315
- headers = {"Content-Type": "binary/octet-stream", "Content-Length": str(num_bytes)}
316
- if "blob.core.windows.net" in url:
317
- headers["x-ms-blob-type"] = "BlockBlob"
318
- # TODO error handling
319
- client.put(
320
- url,
321
- # content must be set to an iterable of bytes, rather than a
322
- # bytes object (like file.read()) because files >2GB need
323
- # to be sent in chunks to avoid an OverflowError in the
324
- # Python stdlib ssl module, and httpx will not chunk up a
325
- # bytes object automatically.
326
- content=buffer,
327
- timeout=60,
328
- headers=headers,
329
- )
385
+ cls.upload_bytes_to_signed_url(buffer=buffer, url=url)
386
+
387
+ @classmethod
388
+ def upload_bytes_to_signed_url(cls, buffer: io.BytesIO, url: str):
389
+ buffer.seek(0)
390
+ num_bytes = len(buffer.getvalue())
391
+ with httpx.Client(http2=cls.http2) as client:
392
+ headers = {"Content-Type": "binary/octet-stream", "Content-Length": str(num_bytes)}
393
+ if "blob.core.windows.net" in url:
394
+ headers["x-ms-blob-type"] = "BlockBlob"
395
+ # TODO error handling
396
+ client.put(
397
+ url,
398
+ # content must be set to an iterable of bytes, rather than a
399
+ # bytes object (like file.read()) because files >2GB need
400
+ # to be sent in chunks to avoid an OverflowError in the
401
+ # Python stdlib ssl module, and httpx will not chunk up a
402
+ # bytes object automatically.
403
+ content=buffer,
404
+ timeout=60,
405
+ headers=headers,
406
+ )
330
407
 
331
408
  @classmethod
332
409
  def download_from_signed_url(cls, local_path, url, max_retries=3, verbose=False):
coiled/types.py CHANGED
@@ -435,10 +435,20 @@ class AWSOptions(BackendOptions, total=False):
435
435
  If possible, this will attempt to put workers in the same cluster placement group (in theory this can
436
436
  result in better network between workers, since they'd be physically close to each other in datacenter,
437
437
  though we haven't seen this to have much benefit in practice).
438
+ use_worker_placement_group:
439
+ Cluster placement group for only the workers, not the scheduler.
440
+ use_efa
441
+ Attach Elastic Fabric Adaptor for faster inter-connect between instances.
442
+ Only some instance types are supported.
443
+ use_worker_efa
444
+ Attach Elastic Fabric Adaptor only on cluster workers, not the scheduler.
438
445
  """
439
446
 
440
447
  keypair_name: Optional[str]
441
448
  use_placement_group: Optional[bool]
449
+ use_worker_placement_group: Optional[bool]
450
+ use_efa: Optional[bool]
451
+ use_worker_efa: Optional[bool]
442
452
 
443
453
 
444
454
  class GCPOptions(BackendOptions, total=False):
coiled/utils.py CHANGED
@@ -2176,6 +2176,8 @@ class SimpleRichProgressPanel(Progress):
2176
2176
 
2177
2177
  def update_progress(self, tasks: list[dict]):
2178
2178
  for task in tasks:
2179
+ if not task:
2180
+ continue
2179
2181
  if task["label"] not in self._tasks_from_dicts:
2180
2182
  self._tasks_from_dicts[task["label"]] = self.add_task(task["label"])
2181
2183
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: coiled
3
- Version: 1.127.1.dev3
3
+ Version: 1.127.1.dev5
4
4
  Summary: Python client for coiled.io dask clusters
5
5
  Project-URL: Homepage, https://coiled.io
6
6
  Maintainer-email: Coiled <info@coiled.io>
@@ -2,7 +2,7 @@ coiled/__init__.py,sha256=SslrfRlKfunoTJMCfopqezFePDDKS8LG_FhLkMMo_TE,2274
2
2
  coiled/__main__.py,sha256=4XILBmm4ChZYo7h3JzgslFU0tjQVzdX0XtYcQLhCv0w,171
3
3
  coiled/analytics.py,sha256=96CeL8KVnm3-76lvT4fNkgML0lHebaLea-YP3wW-KqM,7486
4
4
  coiled/auth.py,sha256=go7vWtCwBbwtWyNrNBxg28xBrdjrETbE-mn3KaN5Yl8,1867
5
- coiled/batch.py,sha256=1gsUboonjwOQjFa55VFL3pdBWhZ-Vh-fk8N8qkWNqa4,6214
5
+ coiled/batch.py,sha256=LLrkq910sXNtrssPEqK-KU6-aWRS9XuQEaRQw8wPUP4,7023
6
6
  coiled/capture_environment.py,sha256=K5mNwUe8KM_l203h3oZvcZUJTrGozT-CH1GrtuPYv8U,18458
7
7
  coiled/cluster.py,sha256=wwK9-SefbFBUEHJjYHXlWN3YvPcvR6XD2J-RdPCGhgc,5049
8
8
  coiled/coiled.yaml,sha256=z70xzNUy0E8b8Yt12tYYmjJDDmp-U63oUD61ccuu5N0,1037
@@ -12,7 +12,7 @@ coiled/context.py,sha256=MXWsW0swdYU-x32U7NiM0xt-t65maiEO8rvsGGeScFw,4754
12
12
  coiled/core.py,sha256=Cu6hKBXRWSztbpF8huAyU_1glnt1gacnO9vExvG-Cwo,110796
13
13
  coiled/errors.py,sha256=5aXhNXgidMm0VgPYT3MZMwlHhRE57MeSmqAJFHYaa8Y,305
14
14
  coiled/exceptions.py,sha256=jUXgmfO0LitGe8ztSmAlzb9eQV3X5c0kNO2BwtEDTYg,3099
15
- coiled/filestore.py,sha256=GkRLsGAPR2l6_GPwysvfB0rC3Gt8HIkQaP87fTKN5kM,14912
15
+ coiled/filestore.py,sha256=Tc594sTm2e_TGsuxYQNFO8jCyT2fxjRMVzEmNRAJdCM,17952
16
16
  coiled/function.py,sha256=pONtcTUDRr0dykhVV73AWXqU_pb-4-lvOA0tR3i_PlA,21654
17
17
  coiled/plugins.py,sha256=w03H2Sck54QmwrVOM1BVscNiVeQsHyGm1yWNzPPPWKs,3424
18
18
  coiled/prefect.py,sha256=j1EOg7Xuw82TNRonAGEoZ3ANlwN8GM5aDXRYSjC0lnA,1497
@@ -22,8 +22,8 @@ coiled/software.py,sha256=eh3kZ8QBuIt_SPvTy_x6TXEv87SGqOJkO4HW-LCSsas,8701
22
22
  coiled/software_utils.py,sha256=JqGO8nstm0Hi-UCIBhHa25reNeVO-XOnv5eLoIyRcBo,40367
23
23
  coiled/spans.py,sha256=Aq2MOX6JXaJ72XiEmymPcsefs-kID85MEw6t-kOdPWI,2078
24
24
  coiled/spark.py,sha256=kooZCZT4dLMG_AQEOlaf6gj86G3UdowDfbw-Eiq94MU,9059
25
- coiled/types.py,sha256=abXo79w_FraFFCqC-6kX6S9D6yEPJzCsWXwYbpo1W-c,13984
26
- coiled/utils.py,sha256=Urochb9rYx_lKu54Sp_CPyXxtJ7iyfPZeRgf6ld8uEk,78464
25
+ coiled/types.py,sha256=mpYmhX9FGoe_pE_GU8Nx2nZXdwbn-DBSHYXl87ferjM,14442
26
+ coiled/utils.py,sha256=WalMzNUbjVUJvAMgXaTTyDC0HPSM_zsHiYRHK7lmkkk,78514
27
27
  coiled/websockets.py,sha256=BaCNiOgPVtm55R_rf1TK78tzoFSKLp4z2UCW7S57iNA,5956
28
28
  coiled/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
29
  coiled/cli/config.py,sha256=WKZkDpPYywYS665krQLqid2RgSchDOddZqBkkwBtyVk,2267
@@ -42,7 +42,7 @@ coiled/cli/utils.py,sha256=cp7ToFGRpUKi6iNL6BbLjzgrgeTYSX_C55lYhaKWHHA,3479
42
42
  coiled/cli/batch/__init__.py,sha256=539CnfnqqcW7ndSufTS-Ie5FGZiElMYxE0Ptu70wo8M,660
43
43
  coiled/cli/batch/list.py,sha256=lU3mXeKUHltzpdbo7Txcd64T1-XpHE-wxTFIBoTR28w,3300
44
44
  coiled/cli/batch/logs.py,sha256=CbGK5GRjtu0UKGetKY_yTcjB-3PdHy5Xf4MLksYTE8g,1947
45
- coiled/cli/batch/run.py,sha256=zdIVlX3nVNzqeVxaS3lrvZPd3vZaKSS6RauQKE0B_aw,36616
45
+ coiled/cli/batch/run.py,sha256=_hFcSETJMhXCqcDXe06wb2Kly4FJanTNjkIPdxO65bg,37077
46
46
  coiled/cli/batch/status.py,sha256=cSpMRC0YHFQ18y-XEXFjTOza_OTd7mtT-NX7sRSLMhk,4193
47
47
  coiled/cli/batch/util.py,sha256=ztisJzDHpsYswrdk_hI7USUkfponI8oLhcKAa6DXJo4,1026
48
48
  coiled/cli/batch/wait.py,sha256=dEP1OH0IYteqaYU2UdrGm_vU7IDE7h3l3Cb3KBoaCCY,3879
@@ -95,8 +95,8 @@ coiled/v2/widgets/__init__.py,sha256=Bt3GHTTyri-kFUaqGRVydDM-sCg5NdNujDg2RyvgV8U
95
95
  coiled/v2/widgets/interface.py,sha256=YeMQ5qdRbbpM04x9qIg2LE1xwxyRxFbdDYnkrwHazPk,301
96
96
  coiled/v2/widgets/rich.py,sha256=3rU5-yso92NdeEh3uSvEE-GwPNyp6i0Nb5PE5czXCik,28974
97
97
  coiled/v2/widgets/util.py,sha256=Y8qpGqwNzqfCzgyRFRy7vcscBoXqop-Upi4HLPpXLgg,3120
98
- coiled-1.127.1.dev3.dist-info/METADATA,sha256=kiRJC5gmRLQevypb0ekf9beDPcXbsNnQQjSC_dh2rMI,2181
99
- coiled-1.127.1.dev3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
100
- coiled-1.127.1.dev3.dist-info/entry_points.txt,sha256=C8dz1ST_bTlTO-kNvuHBJQma9PyJPotg0S4xpPt5aHY,47
101
- coiled-1.127.1.dev3.dist-info/licenses/LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
102
- coiled-1.127.1.dev3.dist-info/RECORD,,
98
+ coiled-1.127.1.dev5.dist-info/METADATA,sha256=n6zw1SHWBo_ZlfyKYVUTTjKxDnlnDFzg64BZpTtsW5Y,2181
99
+ coiled-1.127.1.dev5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
100
+ coiled-1.127.1.dev5.dist-info/entry_points.txt,sha256=C8dz1ST_bTlTO-kNvuHBJQma9PyJPotg0S4xpPt5aHY,47
101
+ coiled-1.127.1.dev5.dist-info/licenses/LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
102
+ coiled-1.127.1.dev5.dist-info/RECORD,,