zarrify 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
zarrify/__about__.py ADDED
@@ -0,0 +1,3 @@
1
+ # SPDX-FileCopyrightText: 2025-present Yurii Zubov <zubov452@gmail.com>
2
+
3
+ __version__ = "0.0.1"
zarrify/__init__.py ADDED
@@ -0,0 +1,8 @@
1
+ # SPDX-FileCopyrightText: 2025-present Yurii Zubov <zubov452@gmail.com>
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+
6
+ from zarrify import to_zarr
7
+
8
+ __all__ = ["to_zarr"]
zarrify/formats/mrc.py ADDED
@@ -0,0 +1,73 @@
1
+ import zarr
2
+ import mrcfile
3
+ import os
4
+ from typing import Tuple
5
+ from dask.array.core import slices_from_chunks, normalize_chunks
6
+ from dask.distributed import Client, wait
7
+ from toolz import partition_all
8
+ import time
9
+ from zarrify.utils.volume import Volume
10
+
11
+
12
+ class Mrc3D(Volume):
13
+
14
+ def __init__(
15
+ self,
16
+ src_path: str,
17
+ axes: list[str],
18
+ scale: list[float],
19
+ translation: list[float],
20
+ units: list[str],
21
+ ):
22
+ """Construct all the necessary attributes for the proper conversion of tiff to OME-NGFF Zarr.
23
+
24
+ Args:
25
+ input_filepath (str): path to source tiff file.
26
+ """
27
+ super().__init__(src_path, axes, scale, translation, units)
28
+
29
+ self.memmap = mrcfile.mmap(self.src_path, mode="r")
30
+ self.ndim = self.memmap.data.ndim
31
+ self.shape = self.memmap.shape
32
+ self.dtype = self.memmap.data.dtype
33
+
34
+ def save_chunk(self, z_arr: zarr.Array, chunk_slice: Tuple[slice, ...]):
35
+ """Copies data from a particular part of the input mrc array into a specific chunk of the output zarr array.
36
+
37
+ Args:
38
+ z_arr (zarr.core.Array): output zarr array object
39
+ chunk_slice (Tuple[slice, ...]): slice of the mrc array to copy.
40
+ """
41
+ mrc_file = mrcfile.mmap(self.src_path, mode="r")
42
+
43
+ if not (mrc_file.data[chunk_slice] == 0).all():
44
+ z_arr[chunk_slice] = mrc_file.data[chunk_slice]
45
+
46
+ def write_to_zarr(
47
+ self,
48
+ z_arr: zarr.Array,
49
+ client: Client,
50
+ ):
51
+ """Use mrcfile memmap to access small parts of the mrc file and write them into zarr chunks.
52
+
53
+ Args:
54
+ dest_path (str): path to the zarr group where the output dataset is stored.
55
+ client (Client): instance of a dask client
56
+ """
57
+
58
+ out_slices = slices_from_chunks(
59
+ normalize_chunks(z_arr.chunks, shape=z_arr.shape)
60
+ )
61
+ out_slices_partitioned = tuple(partition_all(100000, out_slices))
62
+
63
+ for idx, part in enumerate(out_slices_partitioned):
64
+
65
+ print(f"{idx + 1} / {len(out_slices_partitioned)}")
66
+ start = time.time()
67
+ fut = client.map(lambda v: self.save_chunk(z_arr, v), part)
68
+ print(
69
+ f"Submitted {len(part)} tasks to the scheduler in {time.time()- start}s"
70
+ )
71
+ # wait for all the futures to complete
72
+ result = wait(fut)
73
+ print(f"Completed {len(part)} tasks in {time.time() - start}s")
zarrify/formats/n5.py ADDED
@@ -0,0 +1,44 @@
1
+ import zarr
2
+ import os
3
+
4
+ class N53D(Volume):
5
+ def __init__(
6
+ self,
7
+ src_path: str,
8
+ axes: list[str],
9
+ scale: list[float],
10
+ translation: list[float],
11
+ units: list[str],
12
+ ):
13
+ """Construct all the necessary attributes for the proper conversion of tiff to OME-NGFF Zarr.
14
+
15
+ Args:
16
+ input_filepath (str): path to source tiff file.
17
+ """
18
+ super().__init__(src_path, axes, scale, translation, units)
19
+ self.store_path, self.arr_path = self.separate_store_path(src_path, '')
20
+ self.n5_store = zarr.N5Store(self.store_path)
21
+ self.n5_arr = zarr.open(store = self.n5_store, path=self.arr_path, mode='r')
22
+
23
+ self.shape = self.n5_arr.shape
24
+ self.dtype = self.n5_arr.dtype
25
+ self.chunks = self.n5_arr.chunks
26
+
27
+ def separate_store_path(store, path):
28
+ """
29
+ sometimes you can pass a total os path to node, leading to
30
+ an empty('') node.path attribute.
31
+ the correct way is to separate path to container(.n5, .zarr)
32
+ from path to array within a container.
33
+
34
+ Args:
35
+ store (string): path to store
36
+ path (string): path array/group (.n5 or .zarr)
37
+
38
+ Returns:
39
+ (string, string): returns regularized store and group/array path
40
+ """
41
+ new_store, path_prefix = os.path.split(store)
42
+ if ".n5" in path_prefix:
43
+ return store, path
44
+ return separate_store_path(new_store, os.path.join(path_prefix, path))
@@ -0,0 +1,70 @@
1
+ from tifffile import imread
2
+ import numpy as np
3
+ import zarr
4
+ import os
5
+ from dask.distributed import Client, wait
6
+ import time
7
+ import dask.array as da
8
+ import copy
9
+ from zarrify.utils.volume import Volume
10
+
11
+
12
+ class Tiff3D(Volume):
13
+
14
+ def __init__(
15
+ self,
16
+ src_path: str,
17
+ axes: list[str],
18
+ scale: list[float],
19
+ translation: list[float],
20
+ units: list[str],
21
+ ):
22
+ """Construct all the necessary attributes for the proper conversion of tiff to OME-NGFF Zarr.
23
+
24
+ Args:
25
+ input_filepath (str): path to source tiff file.
26
+ """
27
+ super().__init__(src_path, axes, scale, translation, units)
28
+
29
+ self.zarr_store = imread(os.path.join(src_path), aszarr=True)
30
+ self.zarr_arr = zarr.open(self.zarr_store)
31
+
32
+ self.shape = self.zarr_arr.shape
33
+ self.dtype = self.zarr_arr.dtype
34
+
35
+ def write_to_zarr(self, zarray: zarr.Group, client: Client):
36
+ chunks_list = np.arange(0, zarray.shape[0], zarray.chunks[0])
37
+
38
+ src_path = copy.copy(self.src_path)
39
+
40
+ start = time.time()
41
+ fut = client.map(
42
+ lambda v: write_volume_slab_to_zarr(v, zarray, src_path), chunks_list
43
+ )
44
+ print(
45
+ f"Submitted {len(chunks_list)} tasks to the scheduler in {time.time()- start}s"
46
+ )
47
+
48
+ # wait for all the futures to complete
49
+ result = wait(fut)
50
+ print(f"Completed {len(chunks_list)} tasks in {time.time() - start}s")
51
+
52
+ return 0
53
+
54
+
55
+ def write_volume_slab_to_zarr(chunk_num: int, zarray: zarr.Array, src_path: str):
56
+
57
+ # check if the slab is at the array boundary or not
58
+ if chunk_num + zarray.chunks[0] > zarray.shape[0]:
59
+ slab_thickness = zarray.shape[0] - chunk_num
60
+ else:
61
+ slab_thickness = zarray.chunks[0]
62
+
63
+ slab_shape = [slab_thickness] + list(zarray.shape[-2:])
64
+ np_slab = np.empty(slab_shape, zarray.dtype)
65
+
66
+ tiff_slab = imread(src_path, key=range(chunk_num, chunk_num + slab_thickness, 1))
67
+ np_slab[0 : zarray.chunks[0], :, :] = tiff_slab
68
+
69
+ # write a tiff stack slab into zarr array
70
+ zarray[chunk_num : chunk_num + zarray.chunks[0], :, :] = np_slab
@@ -0,0 +1,82 @@
1
+ from tifffile import imread
2
+ import numpy as np
3
+ import zarr
4
+ import os
5
+ from dask.distributed import Client, wait
6
+ import time
7
+ import dask.array as da
8
+ from natsort import natsorted
9
+ from glob import glob
10
+ from zarrify.utils.volume import Volume
11
+
12
+
13
+ class TiffStack(Volume):
14
+
15
+ def __init__(
16
+ self,
17
+ src_path: str,
18
+ axes: list[str],
19
+ scale: list[float],
20
+ translation: list[float],
21
+ units: list[str],
22
+ ):
23
+ """Construct all the necessary attributes for the proper conversion of tiff to OME-NGFF Zarr.
24
+
25
+ Args:
26
+ input_filepath (str): path to source tiff file.
27
+ """
28
+ super().__init__(src_path, axes, scale, translation, units)
29
+
30
+ self.stack_list = natsorted(glob(os.path.join(src_path, "*.tif*")))
31
+ probe_image_store = imread(
32
+ os.path.join(src_path, self.stack_list[0]), aszarr=True
33
+ )
34
+ probe_image_arr = da.from_zarr(probe_image_store)
35
+
36
+ self.dtype = probe_image_arr.dtype
37
+ self.shape = [len(self.stack_list)] + list(probe_image_arr.shape)
38
+
39
+ def write_tile_slab_to_zarr(
40
+ self, chunk_num: int, zarray: zarr.Array, src_volume: list
41
+ ):
42
+
43
+ # check if the slab is at the array boundary or not
44
+ if chunk_num + zarray.chunks[0] > zarray.shape[0]:
45
+ slab_thickness = zarray.shape[0] - chunk_num
46
+ else:
47
+ slab_thickness = zarray.chunks[0]
48
+
49
+ slab_shape = [slab_thickness] + list(zarray.shape[-2:])
50
+ np_slab = np.empty(slab_shape, zarray.dtype)
51
+
52
+ # combine tiles into a slab with thickness equal to the chunk size in z direction
53
+ for slab_index in np.arange(chunk_num, chunk_num + slab_thickness, 1):
54
+ try:
55
+ image_tile = imread(src_volume[slab_index])
56
+ except:
57
+ print(
58
+ f"Tiff tile with index {slab_index} is not present in tiff stack."
59
+ )
60
+ np_slab[slab_index - chunk_num, :, :] = image_tile
61
+
62
+ # write a tiff stack slab into a zarr array
63
+ zarray[chunk_num : chunk_num + zarray.chunks[0], :, :] = np_slab
64
+
65
+ # parallel writing of tiff stack into zarr array
66
+ def write_to_zarr(self, zarray: zarr.Array, client: Client):
67
+ chunks_list = np.arange(0, zarray.shape[0], zarray.chunks[0])
68
+
69
+ start = time.time()
70
+ fut = client.map(
71
+ lambda v: self.write_tile_slab_to_zarr(v, zarray, self.stack_list),
72
+ chunks_list,
73
+ )
74
+ print(
75
+ f"Submitted {len(chunks_list)} tasks to the scheduler in {time.time()- start}s"
76
+ )
77
+
78
+ # wait for all the futures to complete
79
+ result = wait(fut)
80
+ print(f"Completed {len(chunks_list)} tasks in {time.time() - start}s")
81
+
82
+ return 0
zarrify/to_zarr.py ADDED
@@ -0,0 +1,115 @@
1
+ import zarr
2
+ from numcodecs import Zstd
3
+ import os
4
+ import click
5
+ import sys
6
+ from dask.distributed import Client
7
+ import time
8
+ from zarrify.formats.tiff_stack import TiffStack
9
+ from zarrify.formats.tiff import Tiff3D
10
+ from zarrify.formats.mrc import Mrc3D
11
+ from zarrify.formats.n5 import N53D
12
+ from zarrify.utils.dask_utils import initialize_dask_client
13
+
14
+
15
+ # @click.command("zarrify")
16
+ # @click.option(
17
+ # "--src",
18
+ # "-s",
19
+ # type=click.Path(exists=True),
20
+ # help="Input file/directory location",
21
+ # )
22
+ # @click.option("--dest", "-s", type=click.STRING, help="Output .zarr file path.")
23
+ # @click.option(
24
+ # "--num_workers", "-w", default=100, type=click.INT, help="Number of dask workers"
25
+ # )
26
+ # @click.option(
27
+ # "--cluster",
28
+ # "-c",
29
+ # default="",
30
+ # type=click.STRING,
31
+ # help="Which instance of dask client to use. Local client - 'local', cluster 'lsf'",
32
+ # )
33
+ # @click.option(
34
+ # "--zarr_chunks",
35
+ # "-zc",
36
+ # nargs=3,
37
+ # default=(64, 128, 128),
38
+ # type=click.INT,
39
+ # help="Chunk size for (z, y, x) axis order. z-axis is normal to the tiff stack plane. Default (64, 128, 128)",
40
+ # )
41
+ # @click.option(
42
+ # "--axes",
43
+ # "-a",
44
+ # nargs=3,
45
+ # default=("z", "y", "x"),
46
+ # type=str,
47
+ # help="Metadata axis names. Order matters. \n Example: -a z y x",
48
+ # )
49
+ # @click.option(
50
+ # "--translation",
51
+ # "-t",
52
+ # nargs=3,
53
+ # default=(0.0, 0.0, 0.0),
54
+ # type=float,
55
+ # help="Metadata translation(offset) value. Order matters. \n Example: -t 1.0 2.0 3.0",
56
+ # )
57
+ # @click.option(
58
+ # "--scale",
59
+ # "-s",
60
+ # nargs=3,
61
+ # default=(1.0, 1.0, 1.0),
62
+ # type=float,
63
+ # help="Metadata scale value. Order matters. \n Example: -s 1.0 2.0 3.0",
64
+ # )
65
+ # @click.option(
66
+ # "--units",
67
+ # "-u",
68
+ # nargs=3,
69
+ # default=("nanometer", "nanometer", "nanometer"),
70
+ # type=str,
71
+ # help="Metadata unit names. Order matters. \n Example: -t nanometer nanometer nanometer",
72
+ # )
73
+ # def cli(src, dest, num_workers, cluster, zarr_chunks, axes, translation, scale, units):
74
+
75
+ # create a dask client to submit tasks
76
+ #client = initialize_dask_client(cluster)
77
+
78
+ def to_zarr(src : str,
79
+ dest: str,
80
+ client : Client,
81
+ num_workers : int = 20,
82
+ zarr_chunks : list[int] = [128]*3,
83
+ axes : list[str] = ('z', 'y', 'x'),
84
+ scale : list[float] = [1.0]*3,
85
+ translation : list[float] = [0.0]*3,
86
+ units: list[str] = ['nanometer']*3):
87
+ if '.n5' in src:
88
+ dataset = N53D(src, axes, scale, translation, units)
89
+ if src.endswith(".mrc"):
90
+ dataset = Mrc3D(src, axes, scale, translation, units)
91
+ elif src.endswith(".tif") or src.endswith(".tiff"):
92
+ dataset = Tiff3D(src, axes, scale, translation, units)
93
+ if os.path.isdir(src):
94
+ dataset = TiffStack(src, axes, scale, translation, units)
95
+
96
+ z_store = zarr.NestedDirectoryStore(dest)
97
+ z_root = zarr.open(store=z_store, mode="a")
98
+ z_arr = z_root.require_dataset(
99
+ name="s0",
100
+ shape=dataset.shape,
101
+ dtype=dataset.dtype,
102
+ chunks=zarr_chunks,
103
+ compressor=Zstd(level=6),
104
+ )
105
+
106
+ # write in parallel to zarr using dask
107
+ client.cluster.scale(num_workers)
108
+ dataset.write_to_zarr(z_arr, client)
109
+ client.cluster.scale(0)
110
+ # populate zarr metadata
111
+ dataset.add_ome_metadata(z_root)
112
+
113
+
114
+ # if __name__ == "__main__":
115
+ # cli()
@@ -0,0 +1,39 @@
1
+ from dask_jobqueue import LSFCluster
2
+ from dask.distributed import Client, LocalCluster
3
+ import os
4
+ import sys
5
+
6
+
7
+ def initialize_dask_client(cluster_type: str) -> Client:
8
+ """Initialize dask client.
9
+
10
+ Args:
11
+ cluster_type (str): type of the cluster, either local or lsf
12
+
13
+ Returns:
14
+ (Client): instance of a dask client
15
+ """
16
+ if cluster_type == "":
17
+ print("Did not specify which instance of the dask client to use!")
18
+ sys.exit(0)
19
+ elif cluster_type == "lsf":
20
+ num_cores = 1
21
+ cluster = LSFCluster(
22
+ cores=num_cores,
23
+ processes=num_cores,
24
+ memory=f"{15 * num_cores}GB",
25
+ ncpus=num_cores,
26
+ mem=15 * num_cores,
27
+ walltime="48:00",
28
+ local_directory="/scratch/$USER/",
29
+ )
30
+ elif cluster_type == "local":
31
+ cluster = LocalCluster()
32
+
33
+ client = Client(cluster)
34
+ with open(
35
+ os.path.join(os.getcwd(), "dask_dashboard_link" + ".txt"), "w"
36
+ ) as text_file:
37
+ text_file.write(str(client.dashboard_link))
38
+ print(client.dashboard_link)
39
+ return client
@@ -0,0 +1,54 @@
1
+ import zarr
2
+
3
+
4
+ class Volume:
5
+
6
+ def __init__(
7
+ self,
8
+ src_path: str,
9
+ axes: list[str],
10
+ scale: list[float],
11
+ translation: list[float],
12
+ units: list[str],
13
+ ):
14
+ self.src_path = src_path
15
+ self.metadata = {
16
+ "axes": axes,
17
+ "translation": translation,
18
+ "scale": scale,
19
+ "units": units,
20
+ }
21
+
22
+ def add_ome_metadata(self, root: zarr.Group):
23
+ """Add selected tiff metadata to zarr attributes file (.zattrs).
24
+
25
+ Args:
26
+ root (zarr.Group): root group of the output zarr array
27
+ """
28
+ # json template for a multiscale structure
29
+ z_attrs: dict = {"multiscales": [{}]}
30
+ z_attrs["multiscales"][0]["axes"] = [
31
+ {"name": axis, "type": "space", "unit": unit}
32
+ for axis, unit in zip(self.metadata["axes"], self.metadata["units"])
33
+ ]
34
+ z_attrs["multiscales"][0]["coordinateTransformations"] = [
35
+ {"scale": [1.0, 1.0, 1.0], "type": "scale"}
36
+ ]
37
+ z_attrs["multiscales"][0]["datasets"] = [
38
+ {
39
+ "coordinateTransformations": [
40
+ {"scale": self.metadata["scale"], "type": "scale"},
41
+ {
42
+ "translation": self.metadata["translation"],
43
+ "type": "translation",
44
+ },
45
+ ],
46
+ "path": list(root.array_keys())[0],
47
+ }
48
+ ]
49
+
50
+ z_attrs["multiscales"][0]["name"] = "/" if root.path == "" else root.path
51
+ z_attrs["multiscales"][0]["version"] = "0.4"
52
+
53
+ # add multiscale template to .attrs
54
+ root.attrs["multiscales"] = z_attrs["multiscales"]
@@ -0,0 +1,28 @@
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2024, Howard Hughes Medical Institute
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions are met:
7
+
8
+ 1. Redistributions of source code must retain the above copyright notice, this
9
+ list of conditions and the following disclaimer.
10
+
11
+ 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ this list of conditions and the following disclaimer in the documentation
13
+ and/or other materials provided with the distribution.
14
+
15
+ 3. Neither the name of the copyright holder nor the names of its
16
+ contributors may be used to endorse or promote products derived from
17
+ this software without specific prior written permission.
18
+
19
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@@ -0,0 +1,23 @@
1
+ Metadata-Version: 2.3
2
+ Name: zarrify
3
+ Version: 0.0.1
4
+ Summary:
5
+ Author: Yurii Zubov
6
+ Author-email: zubov452@gmail.com
7
+ Requires-Python: >=3.11
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: Programming Language :: Python :: 3.11
10
+ Classifier: Programming Language :: Python :: 3.12
11
+ Classifier: Programming Language :: Python :: 3.13
12
+ Requires-Dist: click (>=8.1.8,<9.0.0)
13
+ Requires-Dist: colorama (>=0.4.6,<0.5.0)
14
+ Requires-Dist: dask (>=2024.12.1,<2025.0.0)
15
+ Requires-Dist: dask-jobqueue (==0.8.2)
16
+ Requires-Dist: imagecodecs (>=2024.12.30,<2025.0.0)
17
+ Requires-Dist: mrcfile (>=1.5.3,<2.0.0)
18
+ Requires-Dist: natsort (>=8.4.0,<9.0.0)
19
+ Requires-Dist: tifffile (>=2025.1.10,<2026.0.0)
20
+ Requires-Dist: zarr (==2.16.1)
21
+ Description-Content-Type: text/markdown
22
+
23
+
@@ -0,0 +1,14 @@
1
+ zarrify/__about__.py,sha256=HPktDE8PNXBOjJb_lSg6ji4H2nkvEFYUlj4x6RAUgvE,95
2
+ zarrify/__init__.py,sha256=qQat7kir84KIAzcPbg_YDcmVt8AX2M1jhi6EYM7MTG0,157
3
+ zarrify/formats/mrc.py,sha256=-_jpEd9cz86ydZJPY8RPrN0RYcJl4GjeGKd006tYJBo,2495
4
+ zarrify/formats/n5.py,sha256=90HLdCC3IqgY78WEf-AMClqDKwzfXjpn512BYhXl0Zs,1521
5
+ zarrify/formats/tiff.py,sha256=QuNcdtcOsg1iGdEMMHl1uNkY8lcx-Xv25QKolZmxkOU,2144
6
+ zarrify/formats/tiff_stack.py,sha256=NtHn-2XIzNyRBH6MrxtXCdfi1C3wV_7JKdn_2cOb6eI,2761
7
+ zarrify/to_zarr.py,sha256=1fZyU6d1hPMS_g4V3XhN08b9daR7uFn5G9iIRwR9wgs,3385
8
+ zarrify/utils/dask_utils.py,sha256=BC3M5Fd0tOEoSaanHL0WCGpwxNkboxQDUTr9ROBiq_o,1108
9
+ zarrify/utils/volume.py,sha256=dTSkodL2utp_zVDBxcS4HfidE80_ISl32x7Vp4M0s3g,1717
10
+ zarrify-0.0.1.dist-info/LICENSE.txt,sha256=CGTDUwAd3nAjOp8LJaPGfTzWEzTnqfLYCulsMyw8B_s,1517
11
+ zarrify-0.0.1.dist-info/METADATA,sha256=H7Gadh1uFyFIywlfAQCGv962i1Bhqp245qZ377Tp1iE,754
12
+ zarrify-0.0.1.dist-info/WHEEL,sha256=RaoafKOydTQ7I_I3JTrPCg6kUmTgtm4BornzOqyEfJ8,88
13
+ zarrify-0.0.1.dist-info/entry_points.txt,sha256=bLnFtFGlpKYvDMeVDTU3LSM3SEEPhWyuU2sx8-k5vKg,47
14
+ zarrify-0.0.1.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: poetry-core 2.0.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ zarrify=zarrify.to_zarr:cli
3
+