fsspec 2026.1.0__py3-none-any.whl → 2026.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fsspec/_version.py +2 -2
- fsspec/caching.py +10 -17
- fsspec/compression.py +14 -0
- fsspec/implementations/arrow.py +3 -1
- fsspec/implementations/cached.py +81 -63
- fsspec/implementations/webhdfs.py +18 -0
- fsspec/implementations/zip.py +15 -11
- fsspec/parquet.py +73 -81
- fsspec/utils.py +11 -1
- {fsspec-2026.1.0.dist-info → fsspec-2026.2.0.dist-info}/METADATA +5 -4
- {fsspec-2026.1.0.dist-info → fsspec-2026.2.0.dist-info}/RECORD +13 -13
- {fsspec-2026.1.0.dist-info → fsspec-2026.2.0.dist-info}/WHEEL +0 -0
- {fsspec-2026.1.0.dist-info → fsspec-2026.2.0.dist-info}/licenses/LICENSE +0 -0
fsspec/_version.py
CHANGED
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '2026.
|
|
32
|
-
__version_tuple__ = version_tuple = (2026,
|
|
31
|
+
__version__ = version = '2026.2.0'
|
|
32
|
+
__version_tuple__ = version_tuple = (2026, 2, 0)
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|
fsspec/caching.py
CHANGED
|
@@ -25,7 +25,7 @@ else:
|
|
|
25
25
|
T = TypeVar("T")
|
|
26
26
|
|
|
27
27
|
|
|
28
|
-
logger = logging.getLogger("fsspec")
|
|
28
|
+
logger = logging.getLogger("fsspec.caching")
|
|
29
29
|
|
|
30
30
|
Fetcher = Callable[[int, int], bytes] # Maps (start, end) to bytes
|
|
31
31
|
MultiFetcher = Callable[[list[int, int]], bytes] # Maps [(start, end)] to bytes
|
|
@@ -214,7 +214,7 @@ class MMapCache(BaseCache):
|
|
|
214
214
|
if self.multi_fetcher:
|
|
215
215
|
logger.debug(f"MMap get blocks {ranges}")
|
|
216
216
|
for idx, r in enumerate(self.multi_fetcher(ranges)):
|
|
217
|
-
|
|
217
|
+
sstart, send = ranges[idx]
|
|
218
218
|
logger.debug(f"MMap copy block ({sstart}-{send}")
|
|
219
219
|
self.cache[sstart:send] = r
|
|
220
220
|
else:
|
|
@@ -391,19 +391,8 @@ class BlockCache(BaseCache):
|
|
|
391
391
|
if start >= self.size or start >= end:
|
|
392
392
|
return b""
|
|
393
393
|
|
|
394
|
-
# byte position -> block numbers
|
|
395
|
-
start_block_number = start // self.blocksize
|
|
396
|
-
end_block_number = end // self.blocksize
|
|
397
|
-
|
|
398
|
-
# these are cached, so safe to do multiple calls for the same start and end.
|
|
399
|
-
for block_number in range(start_block_number, end_block_number + 1):
|
|
400
|
-
self._fetch_block_cached(block_number)
|
|
401
|
-
|
|
402
394
|
return self._read_cache(
|
|
403
|
-
start,
|
|
404
|
-
end,
|
|
405
|
-
start_block_number=start_block_number,
|
|
406
|
-
end_block_number=end_block_number,
|
|
395
|
+
start, end, start // self.blocksize, (end - 1) // self.blocksize
|
|
407
396
|
)
|
|
408
397
|
|
|
409
398
|
def _fetch_block(self, block_number: int) -> bytes:
|
|
@@ -439,6 +428,8 @@ class BlockCache(BaseCache):
|
|
|
439
428
|
"""
|
|
440
429
|
start_pos = start % self.blocksize
|
|
441
430
|
end_pos = end % self.blocksize
|
|
431
|
+
if end_pos == 0:
|
|
432
|
+
end_pos = self.blocksize
|
|
442
433
|
|
|
443
434
|
self.hit_count += 1
|
|
444
435
|
if start_block_number == end_block_number:
|
|
@@ -662,12 +653,12 @@ class KnownPartsOfAFile(BaseCache):
|
|
|
662
653
|
pass
|
|
663
654
|
|
|
664
655
|
def _fetch(self, start: int | None, stop: int | None) -> bytes:
|
|
656
|
+
logger.debug("Known parts request %s %s", start, stop)
|
|
665
657
|
if start is None:
|
|
666
658
|
start = 0
|
|
667
659
|
if stop is None:
|
|
668
660
|
stop = self.size
|
|
669
661
|
self.total_requested_bytes += stop - start
|
|
670
|
-
|
|
671
662
|
out = b""
|
|
672
663
|
started = False
|
|
673
664
|
loc_old = 0
|
|
@@ -698,11 +689,13 @@ class KnownPartsOfAFile(BaseCache):
|
|
|
698
689
|
elif loc0 <= stop <= loc1:
|
|
699
690
|
# end block
|
|
700
691
|
self.hit_count += 1
|
|
701
|
-
|
|
692
|
+
out = out + self.data[(loc0, loc1)][: stop - loc0]
|
|
693
|
+
return out
|
|
702
694
|
loc_old = loc1
|
|
703
695
|
self.miss_count += 1
|
|
704
696
|
if started and not self.strict:
|
|
705
|
-
|
|
697
|
+
out = out + b"\x00" * (stop - loc_old)
|
|
698
|
+
return out
|
|
706
699
|
raise ValueError
|
|
707
700
|
|
|
708
701
|
|
fsspec/compression.py
CHANGED
|
@@ -163,6 +163,20 @@ try:
|
|
|
163
163
|
|
|
164
164
|
register_compression("zstd", zstd.ZstdFile, "zst")
|
|
165
165
|
except ImportError:
|
|
166
|
+
try:
|
|
167
|
+
import zstandard as zstd
|
|
168
|
+
|
|
169
|
+
def zstandard_file(infile, mode="rb"):
|
|
170
|
+
if "r" in mode:
|
|
171
|
+
cctx = zstd.ZstdDecompressor()
|
|
172
|
+
return cctx.stream_reader(infile)
|
|
173
|
+
else:
|
|
174
|
+
cctx = zstd.ZstdCompressor(level=10)
|
|
175
|
+
return cctx.stream_writer(infile)
|
|
176
|
+
|
|
177
|
+
register_compression("zstd", zstandard_file, "zst")
|
|
178
|
+
except ImportError:
|
|
179
|
+
pass
|
|
166
180
|
pass
|
|
167
181
|
|
|
168
182
|
|
fsspec/implementations/arrow.py
CHANGED
fsspec/implementations/cached.py
CHANGED
|
@@ -427,6 +427,7 @@ class CachingFileSystem(ChainedFileSystem):
|
|
|
427
427
|
def __getattribute__(self, item):
|
|
428
428
|
if item in {
|
|
429
429
|
"load_cache",
|
|
430
|
+
"_get_cached_file_before_open",
|
|
430
431
|
"_open",
|
|
431
432
|
"save_cache",
|
|
432
433
|
"close_and_update",
|
|
@@ -678,46 +679,12 @@ class WholeFileCacheFileSystem(CachingFileSystem):
|
|
|
678
679
|
out = out[paths[0]]
|
|
679
680
|
return out
|
|
680
681
|
|
|
681
|
-
def
|
|
682
|
-
|
|
683
|
-
if "r" not in mode:
|
|
684
|
-
hash = self._mapper(path)
|
|
685
|
-
fn = os.path.join(self.storage[-1], hash)
|
|
686
|
-
user_specified_kwargs = {
|
|
687
|
-
k: v
|
|
688
|
-
for k, v in kwargs.items()
|
|
689
|
-
# those kwargs were added by open(), we don't want them
|
|
690
|
-
if k not in ["autocommit", "block_size", "cache_options"]
|
|
691
|
-
}
|
|
692
|
-
return LocalTempFile(self, path, mode=mode, fn=fn, **user_specified_kwargs)
|
|
693
|
-
detail = self._check_file(path)
|
|
694
|
-
if detail:
|
|
695
|
-
detail, fn = detail
|
|
696
|
-
_, blocks = detail["fn"], detail["blocks"]
|
|
697
|
-
if blocks is True:
|
|
698
|
-
logger.debug("Opening local copy of %s", path)
|
|
699
|
-
|
|
700
|
-
# In order to support downstream filesystems to be able to
|
|
701
|
-
# infer the compression from the original filename, like
|
|
702
|
-
# the `TarFileSystem`, let's extend the `io.BufferedReader`
|
|
703
|
-
# fileobject protocol by adding a dedicated attribute
|
|
704
|
-
# `original`.
|
|
705
|
-
f = open(fn, mode)
|
|
706
|
-
f.original = detail.get("original")
|
|
707
|
-
return f
|
|
708
|
-
else:
|
|
709
|
-
raise ValueError(
|
|
710
|
-
f"Attempt to open partially cached file {path}"
|
|
711
|
-
f" as a wholly cached file"
|
|
712
|
-
)
|
|
713
|
-
else:
|
|
714
|
-
fn = self._make_local_details(path)
|
|
715
|
-
kwargs["mode"] = mode
|
|
716
|
-
|
|
682
|
+
def _get_cached_file_before_open(self, path, **kwargs):
|
|
683
|
+
fn = self._make_local_details(path)
|
|
717
684
|
# call target filesystems open
|
|
718
685
|
self._mkcache()
|
|
719
686
|
if self.compression:
|
|
720
|
-
with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
|
|
687
|
+
with self.fs._open(path, mode="rb", **kwargs) as f, open(fn, "wb") as f2:
|
|
721
688
|
if isinstance(f, AbstractBufferedFile):
|
|
722
689
|
# want no type of caching if just downloading whole thing
|
|
723
690
|
f.cache = BaseCache(0, f.cache.fetcher, f.size)
|
|
@@ -735,7 +702,47 @@ class WholeFileCacheFileSystem(CachingFileSystem):
|
|
|
735
702
|
else:
|
|
736
703
|
self.fs.get_file(path, fn)
|
|
737
704
|
self.save_cache()
|
|
738
|
-
|
|
705
|
+
|
|
706
|
+
def _open(self, path, mode="rb", **kwargs):
|
|
707
|
+
path = self._strip_protocol(path)
|
|
708
|
+
# For read (or append), (try) download from remote
|
|
709
|
+
if "r" in mode or "a" in mode:
|
|
710
|
+
if not self._check_file(path):
|
|
711
|
+
if self.fs.exists(path):
|
|
712
|
+
self._get_cached_file_before_open(path, **kwargs)
|
|
713
|
+
elif "r" in mode:
|
|
714
|
+
raise FileNotFoundError(path)
|
|
715
|
+
|
|
716
|
+
detail, fn = self._check_file(path)
|
|
717
|
+
_, blocks = detail["fn"], detail["blocks"]
|
|
718
|
+
if blocks is True:
|
|
719
|
+
logger.debug("Opening local copy of %s", path)
|
|
720
|
+
else:
|
|
721
|
+
raise ValueError(
|
|
722
|
+
f"Attempt to open partially cached file {path}"
|
|
723
|
+
f" as a wholly cached file"
|
|
724
|
+
)
|
|
725
|
+
|
|
726
|
+
# Just reading does not need special file handling
|
|
727
|
+
if "r" in mode and "+" not in mode:
|
|
728
|
+
# In order to support downstream filesystems to be able to
|
|
729
|
+
# infer the compression from the original filename, like
|
|
730
|
+
# the `TarFileSystem`, let's extend the `io.BufferedReader`
|
|
731
|
+
# fileobject protocol by adding a dedicated attribute
|
|
732
|
+
# `original`.
|
|
733
|
+
f = open(fn, mode)
|
|
734
|
+
f.original = detail.get("original")
|
|
735
|
+
return f
|
|
736
|
+
|
|
737
|
+
hash = self._mapper(path)
|
|
738
|
+
fn = os.path.join(self.storage[-1], hash)
|
|
739
|
+
user_specified_kwargs = {
|
|
740
|
+
k: v
|
|
741
|
+
for k, v in kwargs.items()
|
|
742
|
+
# those kwargs were added by open(), we don't want them
|
|
743
|
+
if k not in ["autocommit", "block_size", "cache_options"]
|
|
744
|
+
}
|
|
745
|
+
return LocalTempFile(self, path, mode=mode, fn=fn, **user_specified_kwargs)
|
|
739
746
|
|
|
740
747
|
|
|
741
748
|
class SimpleCacheFileSystem(WholeFileCacheFileSystem):
|
|
@@ -894,37 +901,16 @@ class SimpleCacheFileSystem(WholeFileCacheFileSystem):
|
|
|
894
901
|
paths, starts, ends, max_gap=max_gap, on_error=on_error, **kwargs
|
|
895
902
|
)
|
|
896
903
|
|
|
897
|
-
def
|
|
898
|
-
path = self._strip_protocol(path)
|
|
904
|
+
def _get_cached_file_before_open(self, path, **kwargs):
|
|
899
905
|
sha = self._mapper(path)
|
|
900
|
-
|
|
901
|
-
if "r" not in mode:
|
|
902
|
-
fn = os.path.join(self.storage[-1], sha)
|
|
903
|
-
user_specified_kwargs = {
|
|
904
|
-
k: v
|
|
905
|
-
for k, v in kwargs.items()
|
|
906
|
-
if k not in ["autocommit", "block_size", "cache_options"]
|
|
907
|
-
} # those were added by open()
|
|
908
|
-
return LocalTempFile(
|
|
909
|
-
self,
|
|
910
|
-
path,
|
|
911
|
-
mode=mode,
|
|
912
|
-
autocommit=not self._intrans,
|
|
913
|
-
fn=fn,
|
|
914
|
-
**user_specified_kwargs,
|
|
915
|
-
)
|
|
916
|
-
fn = self._check_file(path)
|
|
917
|
-
if fn:
|
|
918
|
-
return open(fn, mode)
|
|
919
|
-
|
|
920
906
|
fn = os.path.join(self.storage[-1], sha)
|
|
921
907
|
logger.debug("Copying %s to local cache", path)
|
|
922
|
-
kwargs["mode"] = mode
|
|
923
908
|
|
|
924
909
|
self._mkcache()
|
|
925
910
|
self._cache_size = None
|
|
911
|
+
|
|
926
912
|
if self.compression:
|
|
927
|
-
with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
|
|
913
|
+
with self.fs._open(path, mode="rb", **kwargs) as f, open(fn, "wb") as f2:
|
|
928
914
|
if isinstance(f, AbstractBufferedFile):
|
|
929
915
|
# want no type of caching if just downloading whole thing
|
|
930
916
|
f.cache = BaseCache(0, f.cache.fetcher, f.size)
|
|
@@ -941,7 +927,39 @@ class SimpleCacheFileSystem(WholeFileCacheFileSystem):
|
|
|
941
927
|
f2.write(data)
|
|
942
928
|
else:
|
|
943
929
|
self.fs.get_file(path, fn)
|
|
944
|
-
|
|
930
|
+
|
|
931
|
+
def _open(self, path, mode="rb", **kwargs):
|
|
932
|
+
path = self._strip_protocol(path)
|
|
933
|
+
sha = self._mapper(path)
|
|
934
|
+
|
|
935
|
+
# For read (or append), (try) download from remote
|
|
936
|
+
if "r" in mode or "a" in mode:
|
|
937
|
+
if not self._check_file(path):
|
|
938
|
+
# append does not require an existing file but read does
|
|
939
|
+
if self.fs.exists(path):
|
|
940
|
+
self._get_cached_file_before_open(path, **kwargs)
|
|
941
|
+
elif "r" in mode:
|
|
942
|
+
raise FileNotFoundError(path)
|
|
943
|
+
|
|
944
|
+
fn = self._check_file(path)
|
|
945
|
+
# Just reading does not need special file handling
|
|
946
|
+
if "r" in mode and "+" not in mode:
|
|
947
|
+
return open(fn, mode)
|
|
948
|
+
|
|
949
|
+
fn = os.path.join(self.storage[-1], sha)
|
|
950
|
+
user_specified_kwargs = {
|
|
951
|
+
k: v
|
|
952
|
+
for k, v in kwargs.items()
|
|
953
|
+
if k not in ["autocommit", "block_size", "cache_options"]
|
|
954
|
+
} # those were added by open()
|
|
955
|
+
return LocalTempFile(
|
|
956
|
+
self,
|
|
957
|
+
path,
|
|
958
|
+
mode=mode,
|
|
959
|
+
autocommit=not self._intrans,
|
|
960
|
+
fn=fn,
|
|
961
|
+
**user_specified_kwargs,
|
|
962
|
+
)
|
|
945
963
|
|
|
946
964
|
|
|
947
965
|
class LocalTempFile:
|
|
@@ -7,6 +7,7 @@ import shutil
|
|
|
7
7
|
import tempfile
|
|
8
8
|
import uuid
|
|
9
9
|
from contextlib import suppress
|
|
10
|
+
from datetime import datetime
|
|
10
11
|
from urllib.parse import quote
|
|
11
12
|
|
|
12
13
|
import requests
|
|
@@ -268,6 +269,23 @@ class WebHDFS(AbstractFileSystem):
|
|
|
268
269
|
info["name"] = path
|
|
269
270
|
return self._process_info(info)
|
|
270
271
|
|
|
272
|
+
def created(self, path):
|
|
273
|
+
"""Return the created timestamp of a file as a datetime.datetime"""
|
|
274
|
+
# The API does not provide creation time, so we use modification time
|
|
275
|
+
info = self.info(path)
|
|
276
|
+
mtime = info.get("modificationTime", None)
|
|
277
|
+
if mtime is not None:
|
|
278
|
+
return datetime.fromtimestamp(mtime / 1000)
|
|
279
|
+
raise RuntimeError("Could not retrieve creation time (modification time).")
|
|
280
|
+
|
|
281
|
+
def modified(self, path):
|
|
282
|
+
"""Return the modified timestamp of a file as a datetime.datetime"""
|
|
283
|
+
info = self.info(path)
|
|
284
|
+
mtime = info.get("modificationTime", None)
|
|
285
|
+
if mtime is not None:
|
|
286
|
+
return datetime.fromtimestamp(mtime / 1000)
|
|
287
|
+
raise RuntimeError("Could not retrieve modification time.")
|
|
288
|
+
|
|
271
289
|
def ls(self, path, detail=False, **kwargs):
|
|
272
290
|
out = self._call("LISTSTATUS", path=path)
|
|
273
291
|
infos = out.json()["FileStatuses"]["FileStatus"]
|
fsspec/implementations/zip.py
CHANGED
|
@@ -138,14 +138,17 @@ class ZipFileSystem(AbstractArchiveFileSystem):
|
|
|
138
138
|
if maxdepth is not None and maxdepth < 1:
|
|
139
139
|
raise ValueError("maxdepth must be at least 1")
|
|
140
140
|
|
|
141
|
+
def to_parts(_path: str):
|
|
142
|
+
return list(filter(None, _path.replace("\\", "/").split("/")))
|
|
143
|
+
|
|
144
|
+
if not isinstance(path, str):
|
|
145
|
+
path = str(path)
|
|
146
|
+
|
|
141
147
|
# Remove the leading slash, as the zip file paths are always
|
|
142
148
|
# given without a leading slash
|
|
143
149
|
path = path.lstrip("/")
|
|
144
|
-
path_parts =
|
|
145
|
-
|
|
146
|
-
def _matching_starts(file_path):
|
|
147
|
-
file_parts = filter(lambda s: bool(s), file_path.split("/"))
|
|
148
|
-
return all(a == b for a, b in zip(path_parts, file_parts))
|
|
150
|
+
path_parts = to_parts(path)
|
|
151
|
+
path_depth = len(path_parts)
|
|
149
152
|
|
|
150
153
|
self._get_dirs()
|
|
151
154
|
|
|
@@ -157,21 +160,22 @@ class ZipFileSystem(AbstractArchiveFileSystem):
|
|
|
157
160
|
return result if detail else [path]
|
|
158
161
|
|
|
159
162
|
for file_path, file_info in self.dir_cache.items():
|
|
160
|
-
if
|
|
163
|
+
if len(file_parts := to_parts(file_path)) < path_depth or any(
|
|
164
|
+
a != b for a, b in zip(path_parts, file_parts)
|
|
165
|
+
):
|
|
166
|
+
# skip parent folders and mismatching paths
|
|
161
167
|
continue
|
|
162
168
|
|
|
163
169
|
if file_info["type"] == "directory":
|
|
164
|
-
if withdirs:
|
|
165
|
-
|
|
166
|
-
result[file_path.strip("/")] = file_info
|
|
170
|
+
if withdirs and file_path not in result:
|
|
171
|
+
result[file_path.strip("/")] = file_info
|
|
167
172
|
continue
|
|
168
173
|
|
|
169
174
|
if file_path not in result:
|
|
170
175
|
result[file_path] = file_info if detail else None
|
|
171
176
|
|
|
172
177
|
if maxdepth:
|
|
173
|
-
path_depth = path.count("/")
|
|
174
178
|
result = {
|
|
175
|
-
k: v for k, v in result.items() if k.count("/")
|
|
179
|
+
k: v for k, v in result.items() if k.count("/") < maxdepth + path_depth
|
|
176
180
|
}
|
|
177
181
|
return result if detail else sorted(result)
|
fsspec/parquet.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import io
|
|
2
2
|
import json
|
|
3
3
|
import warnings
|
|
4
|
-
from typing import Literal
|
|
5
4
|
|
|
6
5
|
import fsspec
|
|
7
6
|
|
|
@@ -25,7 +24,6 @@ class AlreadyBufferedFile(AbstractBufferedFile):
|
|
|
25
24
|
|
|
26
25
|
def open_parquet_files(
|
|
27
26
|
path: list[str],
|
|
28
|
-
mode: Literal["rb"] = "rb",
|
|
29
27
|
fs: None | fsspec.AbstractFileSystem = None,
|
|
30
28
|
metadata=None,
|
|
31
29
|
columns: None | list[str] = None,
|
|
@@ -54,8 +52,6 @@ def open_parquet_files(
|
|
|
54
52
|
----------
|
|
55
53
|
path: str
|
|
56
54
|
Target file path.
|
|
57
|
-
mode: str, optional
|
|
58
|
-
Mode option to be passed through to `fs.open`. Default is "rb".
|
|
59
55
|
metadata: Any, optional
|
|
60
56
|
Parquet metadata object. Object type must be supported
|
|
61
57
|
by the backend parquet engine. For now, only the "fastparquet"
|
|
@@ -150,16 +146,16 @@ def open_parquet_files(
|
|
|
150
146
|
AlreadyBufferedFile(
|
|
151
147
|
fs=None,
|
|
152
148
|
path=fn,
|
|
153
|
-
mode=
|
|
149
|
+
mode="rb",
|
|
154
150
|
cache_type="parts",
|
|
155
151
|
cache_options={
|
|
156
152
|
**options,
|
|
157
|
-
"data":
|
|
153
|
+
"data": ranges,
|
|
158
154
|
},
|
|
159
|
-
size=max(_[1] for _ in
|
|
155
|
+
size=max(_[1] for _ in ranges),
|
|
160
156
|
**kwargs,
|
|
161
157
|
)
|
|
162
|
-
for fn in data
|
|
158
|
+
for fn, ranges in data.items()
|
|
163
159
|
]
|
|
164
160
|
|
|
165
161
|
|
|
@@ -197,7 +193,7 @@ def _get_parquet_byte_ranges(
|
|
|
197
193
|
if isinstance(engine, str):
|
|
198
194
|
engine = _set_engine(engine)
|
|
199
195
|
|
|
200
|
-
# Pass to specialized function if metadata is defined
|
|
196
|
+
# Pass to a specialized function if metadata is defined
|
|
201
197
|
if metadata is not None:
|
|
202
198
|
# Use the provided parquet metadata object
|
|
203
199
|
# to avoid transferring/parsing footer metadata
|
|
@@ -212,63 +208,54 @@ def _get_parquet_byte_ranges(
|
|
|
212
208
|
filters=filters,
|
|
213
209
|
)
|
|
214
210
|
|
|
215
|
-
# Get file sizes asynchronously
|
|
216
|
-
file_sizes = fs.sizes(paths)
|
|
217
|
-
|
|
218
211
|
# Populate global paths, starts, & ends
|
|
219
|
-
result = {}
|
|
220
|
-
data_paths = []
|
|
221
|
-
data_starts = []
|
|
222
|
-
data_ends = []
|
|
223
|
-
add_header_magic = True
|
|
224
212
|
if columns is None and row_groups is None and filters is None:
|
|
225
213
|
# We are NOT selecting specific columns or row-groups.
|
|
226
214
|
#
|
|
227
215
|
# We can avoid sampling the footers, and just transfer
|
|
228
216
|
# all file data with cat_ranges
|
|
229
|
-
|
|
230
|
-
result[path] = {}
|
|
231
|
-
data_paths.append(path)
|
|
232
|
-
data_starts.append(0)
|
|
233
|
-
data_ends.append(file_sizes[i])
|
|
234
|
-
add_header_magic = False # "Magic" should already be included
|
|
217
|
+
result = {path: {(0, len(data)): data} for path, data in fs.cat(paths).items()}
|
|
235
218
|
else:
|
|
236
219
|
# We ARE selecting specific columns or row-groups.
|
|
237
220
|
#
|
|
221
|
+
# Get file sizes asynchronously
|
|
222
|
+
file_sizes = fs.sizes(paths)
|
|
223
|
+
data_paths = []
|
|
224
|
+
data_starts = []
|
|
225
|
+
data_ends = []
|
|
238
226
|
# Gather file footers.
|
|
239
227
|
# We just take the last `footer_sample_size` bytes of each
|
|
240
228
|
# file (or the entire file if it is smaller than that)
|
|
241
|
-
footer_starts = [
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
sample_size = max(0, file_sizes[i] - footer_sample_size)
|
|
246
|
-
footer_starts.append(sample_size)
|
|
247
|
-
footer_samples = fs.cat_ranges(paths, footer_starts, footer_ends)
|
|
229
|
+
footer_starts = [
|
|
230
|
+
max(0, file_size - footer_sample_size) for file_size in file_sizes
|
|
231
|
+
]
|
|
232
|
+
footer_samples = fs.cat_ranges(paths, footer_starts, file_sizes)
|
|
248
233
|
|
|
249
234
|
# Check our footer samples and re-sample if necessary.
|
|
250
|
-
|
|
251
|
-
large_footer = 0
|
|
235
|
+
large_footer = []
|
|
252
236
|
for i, path in enumerate(paths):
|
|
253
237
|
footer_size = int.from_bytes(footer_samples[i][-8:-4], "little")
|
|
254
238
|
real_footer_start = file_sizes[i] - (footer_size + 8)
|
|
255
239
|
if real_footer_start < footer_starts[i]:
|
|
256
|
-
|
|
257
|
-
large_footer = max(large_footer, (footer_size + 8))
|
|
240
|
+
large_footer.append((i, real_footer_start))
|
|
258
241
|
if large_footer:
|
|
259
242
|
warnings.warn(
|
|
260
243
|
f"Not enough data was used to sample the parquet footer. "
|
|
261
244
|
f"Try setting footer_sample_size >= {large_footer}."
|
|
262
245
|
)
|
|
263
|
-
for i,
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
)
|
|
269
|
-
):
|
|
246
|
+
path0 = [paths[i] for i, _ in large_footer]
|
|
247
|
+
starts = [_[1] for _ in large_footer]
|
|
248
|
+
ends = [file_sizes[i] - footer_sample_size for i, _ in large_footer]
|
|
249
|
+
data = fs.cat_ranges(path0, starts, ends)
|
|
250
|
+
for i, (path, start, block) in enumerate(zip(path0, starts, data)):
|
|
270
251
|
footer_samples[i] = block + footer_samples[i]
|
|
271
|
-
footer_starts[i] =
|
|
252
|
+
footer_starts[i] = start
|
|
253
|
+
result = {
|
|
254
|
+
path: {(start, size): data}
|
|
255
|
+
for path, start, size, data in zip(
|
|
256
|
+
paths, footer_starts, file_sizes, footer_samples
|
|
257
|
+
)
|
|
258
|
+
}
|
|
272
259
|
|
|
273
260
|
# Calculate required byte ranges for each path
|
|
274
261
|
for i, path in enumerate(paths):
|
|
@@ -284,9 +271,6 @@ def _get_parquet_byte_ranges(
|
|
|
284
271
|
data_paths += [path] * len(path_data_starts)
|
|
285
272
|
data_starts += path_data_starts
|
|
286
273
|
data_ends += path_data_ends
|
|
287
|
-
result.setdefault(path, {})[(footer_starts[i], file_sizes[i])] = (
|
|
288
|
-
footer_samples[i]
|
|
289
|
-
)
|
|
290
274
|
|
|
291
275
|
# Merge adjacent offset ranges
|
|
292
276
|
data_paths, data_starts, data_ends = merge_offset_ranges(
|
|
@@ -295,19 +279,14 @@ def _get_parquet_byte_ranges(
|
|
|
295
279
|
data_ends,
|
|
296
280
|
max_gap=max_gap,
|
|
297
281
|
max_block=max_block,
|
|
298
|
-
sort=
|
|
282
|
+
sort=True,
|
|
299
283
|
)
|
|
300
284
|
|
|
301
|
-
#
|
|
302
|
-
|
|
303
|
-
result[path] = {(footer_starts[i], footer_ends[i]): footer_samples[i]}
|
|
304
|
-
|
|
305
|
-
# Transfer the data byte-ranges into local memory
|
|
306
|
-
_transfer_ranges(fs, result, data_paths, data_starts, data_ends)
|
|
285
|
+
# Transfer the data byte-ranges into local memory
|
|
286
|
+
_transfer_ranges(fs, result, data_paths, data_starts, data_ends)
|
|
307
287
|
|
|
308
|
-
# Add b"PAR1" to
|
|
309
|
-
|
|
310
|
-
_add_header_magic(result)
|
|
288
|
+
# Add b"PAR1" to headers
|
|
289
|
+
_add_header_magic(result)
|
|
311
290
|
|
|
312
291
|
return result
|
|
313
292
|
|
|
@@ -362,7 +341,7 @@ def _transfer_ranges(fs, blocks, paths, starts, ends):
|
|
|
362
341
|
|
|
363
342
|
def _add_header_magic(data):
|
|
364
343
|
# Add b"PAR1" to file headers
|
|
365
|
-
for path in list(data
|
|
344
|
+
for path in list(data):
|
|
366
345
|
add_magic = True
|
|
367
346
|
for k in data[path]:
|
|
368
347
|
if k[0] == 0 and k[1] >= 4:
|
|
@@ -419,9 +398,6 @@ class FastparquetEngine:
|
|
|
419
398
|
|
|
420
399
|
self.fp = fp
|
|
421
400
|
|
|
422
|
-
def _row_group_filename(self, row_group, pf):
|
|
423
|
-
return pf.row_group_filename(row_group)
|
|
424
|
-
|
|
425
401
|
def _parquet_byte_ranges(
|
|
426
402
|
self,
|
|
427
403
|
columns,
|
|
@@ -465,6 +441,10 @@ class FastparquetEngine:
|
|
|
465
441
|
# Input row_groups contains row-group indices
|
|
466
442
|
row_group_indices = row_groups
|
|
467
443
|
row_groups = pf.row_groups
|
|
444
|
+
if column_set is not None:
|
|
445
|
+
column_set = [
|
|
446
|
+
_ if isinstance(_, list) else _.split(".") for _ in column_set
|
|
447
|
+
]
|
|
468
448
|
|
|
469
449
|
# Loop through column chunks to add required byte ranges
|
|
470
450
|
for r, row_group in enumerate(row_groups):
|
|
@@ -472,13 +452,12 @@ class FastparquetEngine:
|
|
|
472
452
|
# specific row-groups
|
|
473
453
|
if row_group_indices is None or r in row_group_indices:
|
|
474
454
|
# Find the target parquet-file path for `row_group`
|
|
475
|
-
fn =
|
|
455
|
+
fn = pf.row_group_filename(row_group)
|
|
476
456
|
|
|
477
457
|
for column in row_group.columns:
|
|
478
|
-
name = column.meta_data.path_in_schema
|
|
479
|
-
# Skip this column if we are targeting
|
|
480
|
-
|
|
481
|
-
if column_set is None or name in column_set:
|
|
458
|
+
name = column.meta_data.path_in_schema
|
|
459
|
+
# Skip this column if we are targeting specific columns
|
|
460
|
+
if column_set is None or _cmp(name, column_set):
|
|
482
461
|
file_offset0 = column.meta_data.dictionary_page_offset
|
|
483
462
|
if file_offset0 is None:
|
|
484
463
|
file_offset0 = column.meta_data.data_page_offset
|
|
@@ -512,9 +491,6 @@ class PyarrowEngine:
|
|
|
512
491
|
|
|
513
492
|
self.pq = pq
|
|
514
493
|
|
|
515
|
-
def _row_group_filename(self, row_group, metadata):
|
|
516
|
-
raise NotImplementedError
|
|
517
|
-
|
|
518
494
|
def _parquet_byte_ranges(
|
|
519
495
|
self,
|
|
520
496
|
columns,
|
|
@@ -527,6 +503,7 @@ class PyarrowEngine:
|
|
|
527
503
|
if metadata is not None:
|
|
528
504
|
raise ValueError("metadata input not supported for PyarrowEngine")
|
|
529
505
|
if filters:
|
|
506
|
+
# there must be a way!
|
|
530
507
|
raise NotImplementedError
|
|
531
508
|
|
|
532
509
|
data_starts, data_ends = [], []
|
|
@@ -550,6 +527,10 @@ class PyarrowEngine:
|
|
|
550
527
|
if not isinstance(ind, dict)
|
|
551
528
|
]
|
|
552
529
|
column_set |= set(md_index)
|
|
530
|
+
if column_set is not None:
|
|
531
|
+
column_set = [
|
|
532
|
+
_[:1] if isinstance(_, list) else _.split(".")[:1] for _ in column_set
|
|
533
|
+
]
|
|
553
534
|
|
|
554
535
|
# Loop through column chunks to add required byte ranges
|
|
555
536
|
for r in range(md.num_row_groups):
|
|
@@ -559,22 +540,33 @@ class PyarrowEngine:
|
|
|
559
540
|
row_group = md.row_group(r)
|
|
560
541
|
for c in range(row_group.num_columns):
|
|
561
542
|
column = row_group.column(c)
|
|
562
|
-
name = column.path_in_schema
|
|
563
|
-
# Skip this column if we are targeting
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
543
|
+
name = column.path_in_schema.split(".")
|
|
544
|
+
# Skip this column if we are targeting specific columns
|
|
545
|
+
if column_set is None or _cmp(name, column_set):
|
|
546
|
+
meta = column.to_dict()
|
|
547
|
+
# Any offset could be the first one
|
|
548
|
+
file_offset0 = min(
|
|
549
|
+
_
|
|
550
|
+
for _ in [
|
|
551
|
+
meta.get("dictionary_page_offset"),
|
|
552
|
+
meta.get("data_page_offset"),
|
|
553
|
+
meta.get("index_page_offset"),
|
|
554
|
+
]
|
|
555
|
+
if _ is not None
|
|
556
|
+
)
|
|
575
557
|
if file_offset0 < footer_start:
|
|
576
558
|
data_starts.append(file_offset0)
|
|
577
559
|
data_ends.append(
|
|
578
|
-
min(
|
|
560
|
+
min(
|
|
561
|
+
meta["total_compressed_size"] + file_offset0,
|
|
562
|
+
footer_start,
|
|
563
|
+
)
|
|
579
564
|
)
|
|
565
|
+
|
|
566
|
+
data_starts.append(footer_start)
|
|
567
|
+
data_ends.append(footer_start + len(footer))
|
|
580
568
|
return data_starts, data_ends
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
def _cmp(name, column_set):
|
|
572
|
+
return any(all(a == b for a, b in zip(name, _)) for _ in column_set)
|
fsspec/utils.py
CHANGED
|
@@ -566,6 +566,16 @@ def merge_offset_ranges(
|
|
|
566
566
|
)
|
|
567
567
|
)
|
|
568
568
|
)
|
|
569
|
+
remove = []
|
|
570
|
+
for i, (path, start, end) in enumerate(zip(paths, starts, ends)):
|
|
571
|
+
if any(
|
|
572
|
+
e is not None and p == path and start >= s and end <= e and i != i2
|
|
573
|
+
for i2, (p, s, e) in enumerate(zip(paths, starts, ends))
|
|
574
|
+
):
|
|
575
|
+
remove.append(i)
|
|
576
|
+
paths = [p for i, p in enumerate(paths) if i not in remove]
|
|
577
|
+
starts = [s for i, s in enumerate(starts) if i not in remove]
|
|
578
|
+
ends = [e for i, e in enumerate(ends) if i not in remove]
|
|
569
579
|
|
|
570
580
|
if paths:
|
|
571
581
|
# Loop through the coupled `paths`, `starts`, and
|
|
@@ -587,7 +597,7 @@ def merge_offset_ranges(
|
|
|
587
597
|
new_starts.append(starts[i])
|
|
588
598
|
new_ends.append(ends[i])
|
|
589
599
|
else:
|
|
590
|
-
# Merge with previous block by updating the
|
|
600
|
+
# Merge with the previous block by updating the
|
|
591
601
|
# last element of `ends`
|
|
592
602
|
new_ends[-1] = ends[i]
|
|
593
603
|
return new_paths, new_starts, new_ends
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: fsspec
|
|
3
|
-
Version: 2026.
|
|
3
|
+
Version: 2026.2.0
|
|
4
4
|
Summary: File-system specification
|
|
5
5
|
Project-URL: Changelog, https://filesystem-spec.readthedocs.io/en/latest/changelog.html
|
|
6
6
|
Project-URL: Documentation, https://filesystem-spec.readthedocs.io/en/latest/
|
|
@@ -63,7 +63,7 @@ Requires-Dist: tqdm; extra == 'full'
|
|
|
63
63
|
Provides-Extra: fuse
|
|
64
64
|
Requires-Dist: fusepy; extra == 'fuse'
|
|
65
65
|
Provides-Extra: gcs
|
|
66
|
-
Requires-Dist: gcsfs; extra == 'gcs'
|
|
66
|
+
Requires-Dist: gcsfs>2024.2.0; extra == 'gcs'
|
|
67
67
|
Provides-Extra: git
|
|
68
68
|
Requires-Dist: pygit2; extra == 'git'
|
|
69
69
|
Provides-Extra: github
|
|
@@ -81,7 +81,7 @@ Requires-Dist: libarchive-c; extra == 'libarchive'
|
|
|
81
81
|
Provides-Extra: oci
|
|
82
82
|
Requires-Dist: ocifs; extra == 'oci'
|
|
83
83
|
Provides-Extra: s3
|
|
84
|
-
Requires-Dist: s3fs; extra == 's3'
|
|
84
|
+
Requires-Dist: s3fs>2024.2.0; extra == 's3'
|
|
85
85
|
Provides-Extra: sftp
|
|
86
86
|
Requires-Dist: paramiko; extra == 'sftp'
|
|
87
87
|
Provides-Extra: smb
|
|
@@ -124,7 +124,7 @@ Requires-Dist: lz4; extra == 'test-full'
|
|
|
124
124
|
Requires-Dist: notebook; extra == 'test-full'
|
|
125
125
|
Requires-Dist: numpy; extra == 'test-full'
|
|
126
126
|
Requires-Dist: ocifs; extra == 'test-full'
|
|
127
|
-
Requires-Dist: pandas; extra == 'test-full'
|
|
127
|
+
Requires-Dist: pandas<3.0.0; extra == 'test-full'
|
|
128
128
|
Requires-Dist: panel; extra == 'test-full'
|
|
129
129
|
Requires-Dist: paramiko; extra == 'test-full'
|
|
130
130
|
Requires-Dist: pyarrow; extra == 'test-full'
|
|
@@ -144,6 +144,7 @@ Requires-Dist: smbprotocol; extra == 'test-full'
|
|
|
144
144
|
Requires-Dist: tqdm; extra == 'test-full'
|
|
145
145
|
Requires-Dist: urllib3; extra == 'test-full'
|
|
146
146
|
Requires-Dist: zarr; extra == 'test-full'
|
|
147
|
+
Requires-Dist: zstandard; (python_version < '3.14') and extra == 'test-full'
|
|
147
148
|
Provides-Extra: tqdm
|
|
148
149
|
Requires-Dist: tqdm; extra == 'tqdm'
|
|
149
150
|
Description-Content-Type: text/markdown
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
fsspec/__init__.py,sha256=L7qwNBU1iMNQd8Of87HYSNFT9gWlNMSESaJC8fY0AaQ,2053
|
|
2
|
-
fsspec/_version.py,sha256=
|
|
2
|
+
fsspec/_version.py,sha256=AEamMn8IHx_wGXrogRYVBycv6M5u2_UCmFedn7R8hYI,710
|
|
3
3
|
fsspec/archive.py,sha256=vM6t_lgV6lBWbBYwpm3S4ofBQFQxUPr5KkDQrrQcQro,2411
|
|
4
4
|
fsspec/asyn.py,sha256=LP_OicTWXmKHe31wBoYs2MrrNf8rmlhjVeGg5AqvVy8,36630
|
|
5
|
-
fsspec/caching.py,sha256=
|
|
5
|
+
fsspec/caching.py,sha256=8IJ4rgcWnvq_b_DqlcMGJ-K59d4Db5O9Gz8PkATAgHo,34023
|
|
6
6
|
fsspec/callbacks.py,sha256=BDIwLzK6rr_0V5ch557fSzsivCElpdqhXr5dZ9Te-EE,9210
|
|
7
|
-
fsspec/compression.py,sha256=
|
|
7
|
+
fsspec/compression.py,sha256=3v_Fe39gzRRWfaeXpzNjAGPqgTzmETYRCo3qHVqD3po,5132
|
|
8
8
|
fsspec/config.py,sha256=LF4Zmu1vhJW7Je9Q-cwkRc3xP7Rhyy7Xnwj26Z6sv2g,4279
|
|
9
9
|
fsspec/conftest.py,sha256=uWfm_Qs5alPRxOhRpDfQ0-1jqSJ54pni4y96IxOREXM,3446
|
|
10
10
|
fsspec/core.py,sha256=lc7XSnZU6_C6xljp7Z_xEGN3V7704hbeQLkxvPP0wds,24173
|
|
@@ -15,17 +15,17 @@ fsspec/generic.py,sha256=9QHQYMNb-8w8-eYuIqShcTjO_LeHXFoQTyt8J5oEq5Q,13482
|
|
|
15
15
|
fsspec/gui.py,sha256=CQ7QsrTpaDlWSLNOpwNoJc7khOcYXIZxmrAJN9bHWQU,14002
|
|
16
16
|
fsspec/json.py,sha256=4EBZ-xOmRiyxmIqPIwxmDImosRQ7io7qBM2xjJPsEE4,3768
|
|
17
17
|
fsspec/mapping.py,sha256=m2ndB_gtRBXYmNJg0Ie1-BVR75TFleHmIQBzC-yWhjU,8343
|
|
18
|
-
fsspec/parquet.py,sha256=
|
|
18
|
+
fsspec/parquet.py,sha256=xGW3xfd9js7hrre7qN85XpSM0A1FObqkTcAv_H2xSwY,20505
|
|
19
19
|
fsspec/registry.py,sha256=o7EGl8TEaLkcwN53X_103arzuzJeeOoVaNUWnPiXgf0,12148
|
|
20
20
|
fsspec/spec.py,sha256=Ym-Ust6LRjHgbhrmvNqwOBZxoVnaw3g3xHXMZGHx_xg,77692
|
|
21
21
|
fsspec/transaction.py,sha256=xliRG6U2Zf3khG4xcw9WiB-yAoqJSHEGK_VjHOdtgo0,2398
|
|
22
|
-
fsspec/utils.py,sha256=
|
|
22
|
+
fsspec/utils.py,sha256=E24ji0XLWC6n3bw2sHA28OYxrGU9Wy_al2XydsRgrRk,23623
|
|
23
23
|
fsspec/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
24
|
-
fsspec/implementations/arrow.py,sha256=
|
|
24
|
+
fsspec/implementations/arrow.py,sha256=8FhvcvOYLZNMMegCYFFCEHgEqig8AkOU7Ehb8XfcgnA,8890
|
|
25
25
|
fsspec/implementations/asyn_wrapper.py,sha256=3lfJkGs6D_AwRBdxTSYlL-RCVdaXBZ9Itys2P5o5Si0,3738
|
|
26
26
|
fsspec/implementations/cache_mapper.py,sha256=W4wlxyPxZbSp9ItJ0pYRVBMh6bw9eFypgP6kUYuuiI4,2421
|
|
27
27
|
fsspec/implementations/cache_metadata.py,sha256=ipIe4S8nlU_M9oRJkvTqr-b0tcbXVZsxH3GxaelaNOY,8502
|
|
28
|
-
fsspec/implementations/cached.py,sha256=
|
|
28
|
+
fsspec/implementations/cached.py,sha256=67ipbj-3o8O1zMGR11rZ_IWCi_7h-VRYpEAowFXqrvA,36175
|
|
29
29
|
fsspec/implementations/chained.py,sha256=iGivpNaHUFjB_ea0-HAPhcmm6CL8qnDf270PSj7JwuE,680
|
|
30
30
|
fsspec/implementations/dask.py,sha256=CXZbJzIVOhKV8ILcxuy3bTvcacCueAbyQxmvAkbPkrk,4466
|
|
31
31
|
fsspec/implementations/data.py,sha256=IhOGDkacYp5gkl9jhEu4msQfZPb0gS5Q_ml7Mbr6dgQ,1627
|
|
@@ -45,8 +45,8 @@ fsspec/implementations/reference.py,sha256=xSUpB8o_QFAZiVJE2dt78QZMCUMLo5TaJ27e5
|
|
|
45
45
|
fsspec/implementations/sftp.py,sha256=L9pZOa6eLUWfJNtxkxeG2YI96SQwrM5Hj6ocyUZXUbg,5923
|
|
46
46
|
fsspec/implementations/smb.py,sha256=5fhu8h06nOLBPh2c48aT7WBRqh9cEcbIwtyu06wTjec,15236
|
|
47
47
|
fsspec/implementations/tar.py,sha256=dam78Tp_CozybNqCY2JYgGBS3Uc9FuJUAT9oB0lolOs,4111
|
|
48
|
-
fsspec/implementations/webhdfs.py,sha256=
|
|
49
|
-
fsspec/implementations/zip.py,sha256=
|
|
48
|
+
fsspec/implementations/webhdfs.py,sha256=osF2m0nhDil6sbMzYW_4DZzhxF4ygtb59XDiybd9Fyg,17589
|
|
49
|
+
fsspec/implementations/zip.py,sha256=6f3z0s12tDbz1RMx7iDc3JDx730IAaKDdx7tf_XYDp0,6151
|
|
50
50
|
fsspec/tests/abstract/__init__.py,sha256=4xUJrv7gDgc85xAOz1p-V_K1hrsdMWTSa0rviALlJk8,10181
|
|
51
51
|
fsspec/tests/abstract/common.py,sha256=1GQwNo5AONzAnzZj0fWgn8NJPLXALehbsuGxS3FzWVU,4973
|
|
52
52
|
fsspec/tests/abstract/copy.py,sha256=gU5-d97U3RSde35Vp4RxPY4rWwL744HiSrJ8IBOp9-8,19967
|
|
@@ -55,7 +55,7 @@ fsspec/tests/abstract/mv.py,sha256=k8eUEBIrRrGMsBY5OOaDXdGnQUKGwDIfQyduB6YD3Ns,1
|
|
|
55
55
|
fsspec/tests/abstract/open.py,sha256=Fi2PBPYLbRqysF8cFm0rwnB41kMdQVYjq8cGyDXp3BU,329
|
|
56
56
|
fsspec/tests/abstract/pipe.py,sha256=LFzIrLCB5GLXf9rzFKJmE8AdG7LQ_h4bJo70r8FLPqM,402
|
|
57
57
|
fsspec/tests/abstract/put.py,sha256=7aih17OKB_IZZh1Mkq1eBDIjobhtMQmI8x-Pw-S_aZk,21201
|
|
58
|
-
fsspec-2026.
|
|
59
|
-
fsspec-2026.
|
|
60
|
-
fsspec-2026.
|
|
61
|
-
fsspec-2026.
|
|
58
|
+
fsspec-2026.2.0.dist-info/METADATA,sha256=Pw6QhbyXeg-elb0hpWPiQRpAeMD_ApJ9vdNFoYCnrPs,10524
|
|
59
|
+
fsspec-2026.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
60
|
+
fsspec-2026.2.0.dist-info/licenses/LICENSE,sha256=LcNUls5TpzB5FcAIqESq1T53K0mzTN0ARFBnaRQH7JQ,1513
|
|
61
|
+
fsspec-2026.2.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|