fsspec 2025.12.0__py3-none-any.whl → 2026.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fsspec/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '2025.12.0'
32
- __version_tuple__ = version_tuple = (2025, 12, 0)
31
+ __version__ = version = '2026.2.0'
32
+ __version_tuple__ = version_tuple = (2026, 2, 0)
33
33
 
34
34
  __commit_id__ = commit_id = None
fsspec/caching.py CHANGED
@@ -25,7 +25,7 @@ else:
25
25
  T = TypeVar("T")
26
26
 
27
27
 
28
- logger = logging.getLogger("fsspec")
28
+ logger = logging.getLogger("fsspec.caching")
29
29
 
30
30
  Fetcher = Callable[[int, int], bytes] # Maps (start, end) to bytes
31
31
  MultiFetcher = Callable[[list[int, int]], bytes] # Maps [(start, end)] to bytes
@@ -214,7 +214,7 @@ class MMapCache(BaseCache):
214
214
  if self.multi_fetcher:
215
215
  logger.debug(f"MMap get blocks {ranges}")
216
216
  for idx, r in enumerate(self.multi_fetcher(ranges)):
217
- (sstart, send) = ranges[idx]
217
+ sstart, send = ranges[idx]
218
218
  logger.debug(f"MMap copy block ({sstart}-{send}")
219
219
  self.cache[sstart:send] = r
220
220
  else:
@@ -391,19 +391,8 @@ class BlockCache(BaseCache):
391
391
  if start >= self.size or start >= end:
392
392
  return b""
393
393
 
394
- # byte position -> block numbers
395
- start_block_number = start // self.blocksize
396
- end_block_number = end // self.blocksize
397
-
398
- # these are cached, so safe to do multiple calls for the same start and end.
399
- for block_number in range(start_block_number, end_block_number + 1):
400
- self._fetch_block_cached(block_number)
401
-
402
394
  return self._read_cache(
403
- start,
404
- end,
405
- start_block_number=start_block_number,
406
- end_block_number=end_block_number,
395
+ start, end, start // self.blocksize, (end - 1) // self.blocksize
407
396
  )
408
397
 
409
398
  def _fetch_block(self, block_number: int) -> bytes:
@@ -439,6 +428,8 @@ class BlockCache(BaseCache):
439
428
  """
440
429
  start_pos = start % self.blocksize
441
430
  end_pos = end % self.blocksize
431
+ if end_pos == 0:
432
+ end_pos = self.blocksize
442
433
 
443
434
  self.hit_count += 1
444
435
  if start_block_number == end_block_number:
@@ -662,12 +653,12 @@ class KnownPartsOfAFile(BaseCache):
662
653
  pass
663
654
 
664
655
  def _fetch(self, start: int | None, stop: int | None) -> bytes:
656
+ logger.debug("Known parts request %s %s", start, stop)
665
657
  if start is None:
666
658
  start = 0
667
659
  if stop is None:
668
660
  stop = self.size
669
661
  self.total_requested_bytes += stop - start
670
-
671
662
  out = b""
672
663
  started = False
673
664
  loc_old = 0
@@ -698,11 +689,13 @@ class KnownPartsOfAFile(BaseCache):
698
689
  elif loc0 <= stop <= loc1:
699
690
  # end block
700
691
  self.hit_count += 1
701
- return out + self.data[(loc0, loc1)][: stop - loc0]
692
+ out = out + self.data[(loc0, loc1)][: stop - loc0]
693
+ return out
702
694
  loc_old = loc1
703
695
  self.miss_count += 1
704
696
  if started and not self.strict:
705
- return out + b"\x00" * (stop - loc_old)
697
+ out = out + b"\x00" * (stop - loc_old)
698
+ return out
706
699
  raise ValueError
707
700
 
708
701
 
fsspec/compression.py CHANGED
@@ -1,5 +1,6 @@
1
1
  """Helper functions for a standard streaming compression API"""
2
2
 
3
+ import sys
3
4
  from zipfile import ZipFile
4
5
 
5
6
  import fsspec.utils
@@ -155,11 +156,12 @@ except ImportError:
155
156
  pass
156
157
 
157
158
  try:
158
- # zstd in the standard library for python >= 3.14
159
- from compression.zstd import ZstdFile
160
-
161
- register_compression("zstd", ZstdFile, "zst")
159
+ if sys.version_info >= (3, 14):
160
+ from compression import zstd
161
+ else:
162
+ from backports import zstd
162
163
 
164
+ register_compression("zstd", zstd.ZstdFile, "zst")
163
165
  except ImportError:
164
166
  try:
165
167
  import zstandard as zstd
@@ -175,6 +177,7 @@ except ImportError:
175
177
  register_compression("zstd", zstandard_file, "zst")
176
178
  except ImportError:
177
179
  pass
180
+ pass
178
181
 
179
182
 
180
183
  def available_compressions():
@@ -242,7 +242,9 @@ class ArrowFile(io.IOBase):
242
242
 
243
243
  @property
244
244
  def size(self):
245
- return self.stream.size()
245
+ if self.stream.seekable():
246
+ return self.stream.size()
247
+ return None
246
248
 
247
249
  def __exit__(self, *args):
248
250
  return self.close()
@@ -427,6 +427,7 @@ class CachingFileSystem(ChainedFileSystem):
427
427
  def __getattribute__(self, item):
428
428
  if item in {
429
429
  "load_cache",
430
+ "_get_cached_file_before_open",
430
431
  "_open",
431
432
  "save_cache",
432
433
  "close_and_update",
@@ -678,46 +679,12 @@ class WholeFileCacheFileSystem(CachingFileSystem):
678
679
  out = out[paths[0]]
679
680
  return out
680
681
 
681
- def _open(self, path, mode="rb", **kwargs):
682
- path = self._strip_protocol(path)
683
- if "r" not in mode:
684
- hash = self._mapper(path)
685
- fn = os.path.join(self.storage[-1], hash)
686
- user_specified_kwargs = {
687
- k: v
688
- for k, v in kwargs.items()
689
- # those kwargs were added by open(), we don't want them
690
- if k not in ["autocommit", "block_size", "cache_options"]
691
- }
692
- return LocalTempFile(self, path, mode=mode, fn=fn, **user_specified_kwargs)
693
- detail = self._check_file(path)
694
- if detail:
695
- detail, fn = detail
696
- _, blocks = detail["fn"], detail["blocks"]
697
- if blocks is True:
698
- logger.debug("Opening local copy of %s", path)
699
-
700
- # In order to support downstream filesystems to be able to
701
- # infer the compression from the original filename, like
702
- # the `TarFileSystem`, let's extend the `io.BufferedReader`
703
- # fileobject protocol by adding a dedicated attribute
704
- # `original`.
705
- f = open(fn, mode)
706
- f.original = detail.get("original")
707
- return f
708
- else:
709
- raise ValueError(
710
- f"Attempt to open partially cached file {path}"
711
- f" as a wholly cached file"
712
- )
713
- else:
714
- fn = self._make_local_details(path)
715
- kwargs["mode"] = mode
716
-
682
+ def _get_cached_file_before_open(self, path, **kwargs):
683
+ fn = self._make_local_details(path)
717
684
  # call target filesystems open
718
685
  self._mkcache()
719
686
  if self.compression:
720
- with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
687
+ with self.fs._open(path, mode="rb", **kwargs) as f, open(fn, "wb") as f2:
721
688
  if isinstance(f, AbstractBufferedFile):
722
689
  # want no type of caching if just downloading whole thing
723
690
  f.cache = BaseCache(0, f.cache.fetcher, f.size)
@@ -735,7 +702,47 @@ class WholeFileCacheFileSystem(CachingFileSystem):
735
702
  else:
736
703
  self.fs.get_file(path, fn)
737
704
  self.save_cache()
738
- return self._open(path, mode)
705
+
706
+ def _open(self, path, mode="rb", **kwargs):
707
+ path = self._strip_protocol(path)
708
+ # For read (or append), (try) download from remote
709
+ if "r" in mode or "a" in mode:
710
+ if not self._check_file(path):
711
+ if self.fs.exists(path):
712
+ self._get_cached_file_before_open(path, **kwargs)
713
+ elif "r" in mode:
714
+ raise FileNotFoundError(path)
715
+
716
+ detail, fn = self._check_file(path)
717
+ _, blocks = detail["fn"], detail["blocks"]
718
+ if blocks is True:
719
+ logger.debug("Opening local copy of %s", path)
720
+ else:
721
+ raise ValueError(
722
+ f"Attempt to open partially cached file {path}"
723
+ f" as a wholly cached file"
724
+ )
725
+
726
+ # Just reading does not need special file handling
727
+ if "r" in mode and "+" not in mode:
728
+ # In order to support downstream filesystems to be able to
729
+ # infer the compression from the original filename, like
730
+ # the `TarFileSystem`, let's extend the `io.BufferedReader`
731
+ # fileobject protocol by adding a dedicated attribute
732
+ # `original`.
733
+ f = open(fn, mode)
734
+ f.original = detail.get("original")
735
+ return f
736
+
737
+ hash = self._mapper(path)
738
+ fn = os.path.join(self.storage[-1], hash)
739
+ user_specified_kwargs = {
740
+ k: v
741
+ for k, v in kwargs.items()
742
+ # those kwargs were added by open(), we don't want them
743
+ if k not in ["autocommit", "block_size", "cache_options"]
744
+ }
745
+ return LocalTempFile(self, path, mode=mode, fn=fn, **user_specified_kwargs)
739
746
 
740
747
 
741
748
  class SimpleCacheFileSystem(WholeFileCacheFileSystem):
@@ -894,37 +901,16 @@ class SimpleCacheFileSystem(WholeFileCacheFileSystem):
894
901
  paths, starts, ends, max_gap=max_gap, on_error=on_error, **kwargs
895
902
  )
896
903
 
897
- def _open(self, path, mode="rb", **kwargs):
898
- path = self._strip_protocol(path)
904
+ def _get_cached_file_before_open(self, path, **kwargs):
899
905
  sha = self._mapper(path)
900
-
901
- if "r" not in mode:
902
- fn = os.path.join(self.storage[-1], sha)
903
- user_specified_kwargs = {
904
- k: v
905
- for k, v in kwargs.items()
906
- if k not in ["autocommit", "block_size", "cache_options"]
907
- } # those were added by open()
908
- return LocalTempFile(
909
- self,
910
- path,
911
- mode=mode,
912
- autocommit=not self._intrans,
913
- fn=fn,
914
- **user_specified_kwargs,
915
- )
916
- fn = self._check_file(path)
917
- if fn:
918
- return open(fn, mode)
919
-
920
906
  fn = os.path.join(self.storage[-1], sha)
921
907
  logger.debug("Copying %s to local cache", path)
922
- kwargs["mode"] = mode
923
908
 
924
909
  self._mkcache()
925
910
  self._cache_size = None
911
+
926
912
  if self.compression:
927
- with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
913
+ with self.fs._open(path, mode="rb", **kwargs) as f, open(fn, "wb") as f2:
928
914
  if isinstance(f, AbstractBufferedFile):
929
915
  # want no type of caching if just downloading whole thing
930
916
  f.cache = BaseCache(0, f.cache.fetcher, f.size)
@@ -941,7 +927,39 @@ class SimpleCacheFileSystem(WholeFileCacheFileSystem):
941
927
  f2.write(data)
942
928
  else:
943
929
  self.fs.get_file(path, fn)
944
- return self._open(path, mode)
930
+
931
+ def _open(self, path, mode="rb", **kwargs):
932
+ path = self._strip_protocol(path)
933
+ sha = self._mapper(path)
934
+
935
+ # For read (or append), (try) download from remote
936
+ if "r" in mode or "a" in mode:
937
+ if not self._check_file(path):
938
+ # append does not require an existing file but read does
939
+ if self.fs.exists(path):
940
+ self._get_cached_file_before_open(path, **kwargs)
941
+ elif "r" in mode:
942
+ raise FileNotFoundError(path)
943
+
944
+ fn = self._check_file(path)
945
+ # Just reading does not need special file handling
946
+ if "r" in mode and "+" not in mode:
947
+ return open(fn, mode)
948
+
949
+ fn = os.path.join(self.storage[-1], sha)
950
+ user_specified_kwargs = {
951
+ k: v
952
+ for k, v in kwargs.items()
953
+ if k not in ["autocommit", "block_size", "cache_options"]
954
+ } # those were added by open()
955
+ return LocalTempFile(
956
+ self,
957
+ path,
958
+ mode=mode,
959
+ autocommit=not self._intrans,
960
+ fn=fn,
961
+ **user_specified_kwargs,
962
+ )
945
963
 
946
964
 
947
965
  class LocalTempFile:
@@ -1,4 +1,5 @@
1
1
  import os
2
+ import ssl
2
3
  import uuid
3
4
  from ftplib import FTP, FTP_TLS, Error, error_perm
4
5
  from typing import Any
@@ -6,6 +7,37 @@ from typing import Any
6
7
  from ..spec import AbstractBufferedFile, AbstractFileSystem
7
8
  from ..utils import infer_storage_options, isfilelike
8
9
 
10
+ SECURITY_PROTOCOL_MAP = {
11
+ "tls": ssl.PROTOCOL_TLS,
12
+ "tlsv1": ssl.PROTOCOL_TLSv1,
13
+ "tlsv1_1": ssl.PROTOCOL_TLSv1_1,
14
+ "tlsv1_2": ssl.PROTOCOL_TLSv1_2,
15
+ "sslv23": ssl.PROTOCOL_SSLv23,
16
+ }
17
+
18
+
19
+ class ImplicitFTPTLS(FTP_TLS):
20
+ """
21
+ FTP_TLS subclass that automatically wraps sockets in SSL
22
+ to support implicit FTPS.
23
+ """
24
+
25
+ def __init__(self, *args, **kwargs):
26
+ super().__init__(*args, **kwargs)
27
+ self._sock = None
28
+
29
+ @property
30
+ def sock(self):
31
+ """Return the socket."""
32
+ return self._sock
33
+
34
+ @sock.setter
35
+ def sock(self, value):
36
+ """When modifying the socket, ensure that it is ssl wrapped."""
37
+ if value is not None and not isinstance(value, ssl.SSLSocket):
38
+ value = self.context.wrap_socket(value)
39
+ self._sock = value
40
+
9
41
 
10
42
  class FTPFileSystem(AbstractFileSystem):
11
43
  """A filesystem over classic FTP"""
@@ -55,8 +87,14 @@ class FTPFileSystem(AbstractFileSystem):
55
87
  Timeout of the ftp connection in seconds
56
88
  encoding: str
57
89
  Encoding to use for directories and filenames in FTP connection
58
- tls: bool
59
- Use FTP-TLS, by default False
90
+ tls: bool or str
91
+ Enable FTP-TLS for secure connections:
92
+ - False: Plain FTP (default)
93
+ - True: Explicit TLS (FTPS with AUTH TLS command)
94
+ - "tls": Auto-negotiate highest protocol
95
+ - "tlsv1": TLS v1.0
96
+ - "tlsv1_1": TLS v1.1
97
+ - "tlsv1_2": TLS v1.2
60
98
  """
61
99
  super().__init__(**kwargs)
62
100
  self.host = host
@@ -71,15 +109,27 @@ class FTPFileSystem(AbstractFileSystem):
71
109
  self.blocksize = 2**16
72
110
  self.tls = tls
73
111
  self._connect()
74
- if self.tls:
112
+ if isinstance(self.tls, bool) and self.tls:
75
113
  self.ftp.prot_p()
76
114
 
77
115
  def _connect(self):
116
+ security = None
78
117
  if self.tls:
79
- ftp_cls = FTP_TLS
118
+ if isinstance(self.tls, str):
119
+ ftp_cls = ImplicitFTPTLS
120
+ security = SECURITY_PROTOCOL_MAP.get(
121
+ self.tls,
122
+ f"Not supported {self.tls} protocol",
123
+ )
124
+ if isinstance(security, str):
125
+ raise ValueError(security)
126
+ else:
127
+ ftp_cls = FTP_TLS
80
128
  else:
81
129
  ftp_cls = FTP
82
130
  self.ftp = ftp_cls(timeout=self.timeout, encoding=self.encoding)
131
+ if security:
132
+ self.ftp.ssl_version = security
83
133
  self.ftp.connect(self.host, self.port)
84
134
  self.ftp.login(*self.cred)
85
135
 
@@ -166,6 +166,10 @@ class LocalFileSystem(AbstractFileSystem):
166
166
  """
167
167
  path1 = self._strip_protocol(path1)
168
168
  path2 = self._strip_protocol(path2)
169
+
170
+ if self.auto_mkdir:
171
+ self.makedirs(self._parent(path2), exist_ok=True)
172
+
169
173
  shutil.move(path1, path2)
170
174
 
171
175
  def link(self, src, dst, **kwargs):
@@ -7,6 +7,7 @@ import shutil
7
7
  import tempfile
8
8
  import uuid
9
9
  from contextlib import suppress
10
+ from datetime import datetime
10
11
  from urllib.parse import quote
11
12
 
12
13
  import requests
@@ -268,6 +269,23 @@ class WebHDFS(AbstractFileSystem):
268
269
  info["name"] = path
269
270
  return self._process_info(info)
270
271
 
272
+ def created(self, path):
273
+ """Return the created timestamp of a file as a datetime.datetime"""
274
+ # The API does not provide creation time, so we use modification time
275
+ info = self.info(path)
276
+ mtime = info.get("modificationTime", None)
277
+ if mtime is not None:
278
+ return datetime.fromtimestamp(mtime / 1000)
279
+ raise RuntimeError("Could not retrieve creation time (modification time).")
280
+
281
+ def modified(self, path):
282
+ """Return the modified timestamp of a file as a datetime.datetime"""
283
+ info = self.info(path)
284
+ mtime = info.get("modificationTime", None)
285
+ if mtime is not None:
286
+ return datetime.fromtimestamp(mtime / 1000)
287
+ raise RuntimeError("Could not retrieve modification time.")
288
+
271
289
  def ls(self, path, detail=False, **kwargs):
272
290
  out = self._call("LISTSTATUS", path=path)
273
291
  infos = out.json()["FileStatuses"]["FileStatus"]
@@ -138,14 +138,17 @@ class ZipFileSystem(AbstractArchiveFileSystem):
138
138
  if maxdepth is not None and maxdepth < 1:
139
139
  raise ValueError("maxdepth must be at least 1")
140
140
 
141
+ def to_parts(_path: str):
142
+ return list(filter(None, _path.replace("\\", "/").split("/")))
143
+
144
+ if not isinstance(path, str):
145
+ path = str(path)
146
+
141
147
  # Remove the leading slash, as the zip file paths are always
142
148
  # given without a leading slash
143
149
  path = path.lstrip("/")
144
- path_parts = list(filter(lambda s: bool(s), path.split("/")))
145
-
146
- def _matching_starts(file_path):
147
- file_parts = filter(lambda s: bool(s), file_path.split("/"))
148
- return all(a == b for a, b in zip(path_parts, file_parts))
150
+ path_parts = to_parts(path)
151
+ path_depth = len(path_parts)
149
152
 
150
153
  self._get_dirs()
151
154
 
@@ -157,21 +160,22 @@ class ZipFileSystem(AbstractArchiveFileSystem):
157
160
  return result if detail else [path]
158
161
 
159
162
  for file_path, file_info in self.dir_cache.items():
160
- if not (path == "" or _matching_starts(file_path)):
163
+ if len(file_parts := to_parts(file_path)) < path_depth or any(
164
+ a != b for a, b in zip(path_parts, file_parts)
165
+ ):
166
+ # skip parent folders and mismatching paths
161
167
  continue
162
168
 
163
169
  if file_info["type"] == "directory":
164
- if withdirs:
165
- if file_path not in result:
166
- result[file_path.strip("/")] = file_info
170
+ if withdirs and file_path not in result:
171
+ result[file_path.strip("/")] = file_info
167
172
  continue
168
173
 
169
174
  if file_path not in result:
170
175
  result[file_path] = file_info if detail else None
171
176
 
172
177
  if maxdepth:
173
- path_depth = path.count("/")
174
178
  result = {
175
- k: v for k, v in result.items() if k.count("/") - path_depth < maxdepth
179
+ k: v for k, v in result.items() if k.count("/") < maxdepth + path_depth
176
180
  }
177
181
  return result if detail else sorted(result)
fsspec/parquet.py CHANGED
@@ -1,7 +1,6 @@
1
1
  import io
2
2
  import json
3
3
  import warnings
4
- from typing import Literal
5
4
 
6
5
  import fsspec
7
6
 
@@ -25,7 +24,6 @@ class AlreadyBufferedFile(AbstractBufferedFile):
25
24
 
26
25
  def open_parquet_files(
27
26
  path: list[str],
28
- mode: Literal["rb"] = "rb",
29
27
  fs: None | fsspec.AbstractFileSystem = None,
30
28
  metadata=None,
31
29
  columns: None | list[str] = None,
@@ -54,8 +52,6 @@ def open_parquet_files(
54
52
  ----------
55
53
  path: str
56
54
  Target file path.
57
- mode: str, optional
58
- Mode option to be passed through to `fs.open`. Default is "rb".
59
55
  metadata: Any, optional
60
56
  Parquet metadata object. Object type must be supported
61
57
  by the backend parquet engine. For now, only the "fastparquet"
@@ -150,16 +146,16 @@ def open_parquet_files(
150
146
  AlreadyBufferedFile(
151
147
  fs=None,
152
148
  path=fn,
153
- mode=mode,
149
+ mode="rb",
154
150
  cache_type="parts",
155
151
  cache_options={
156
152
  **options,
157
- "data": data.get(fn, {}),
153
+ "data": ranges,
158
154
  },
159
- size=max(_[1] for _ in data.get(fn, {})),
155
+ size=max(_[1] for _ in ranges),
160
156
  **kwargs,
161
157
  )
162
- for fn in data
158
+ for fn, ranges in data.items()
163
159
  ]
164
160
 
165
161
 
@@ -197,7 +193,7 @@ def _get_parquet_byte_ranges(
197
193
  if isinstance(engine, str):
198
194
  engine = _set_engine(engine)
199
195
 
200
- # Pass to specialized function if metadata is defined
196
+ # Pass to a specialized function if metadata is defined
201
197
  if metadata is not None:
202
198
  # Use the provided parquet metadata object
203
199
  # to avoid transferring/parsing footer metadata
@@ -212,63 +208,54 @@ def _get_parquet_byte_ranges(
212
208
  filters=filters,
213
209
  )
214
210
 
215
- # Get file sizes asynchronously
216
- file_sizes = fs.sizes(paths)
217
-
218
211
  # Populate global paths, starts, & ends
219
- result = {}
220
- data_paths = []
221
- data_starts = []
222
- data_ends = []
223
- add_header_magic = True
224
212
  if columns is None and row_groups is None and filters is None:
225
213
  # We are NOT selecting specific columns or row-groups.
226
214
  #
227
215
  # We can avoid sampling the footers, and just transfer
228
216
  # all file data with cat_ranges
229
- for i, path in enumerate(paths):
230
- result[path] = {}
231
- data_paths.append(path)
232
- data_starts.append(0)
233
- data_ends.append(file_sizes[i])
234
- add_header_magic = False # "Magic" should already be included
217
+ result = {path: {(0, len(data)): data} for path, data in fs.cat(paths).items()}
235
218
  else:
236
219
  # We ARE selecting specific columns or row-groups.
237
220
  #
221
+ # Get file sizes asynchronously
222
+ file_sizes = fs.sizes(paths)
223
+ data_paths = []
224
+ data_starts = []
225
+ data_ends = []
238
226
  # Gather file footers.
239
227
  # We just take the last `footer_sample_size` bytes of each
240
228
  # file (or the entire file if it is smaller than that)
241
- footer_starts = []
242
- footer_ends = []
243
- for i, path in enumerate(paths):
244
- footer_ends.append(file_sizes[i])
245
- sample_size = max(0, file_sizes[i] - footer_sample_size)
246
- footer_starts.append(sample_size)
247
- footer_samples = fs.cat_ranges(paths, footer_starts, footer_ends)
229
+ footer_starts = [
230
+ max(0, file_size - footer_sample_size) for file_size in file_sizes
231
+ ]
232
+ footer_samples = fs.cat_ranges(paths, footer_starts, file_sizes)
248
233
 
249
234
  # Check our footer samples and re-sample if necessary.
250
- missing_footer_starts = footer_starts.copy()
251
- large_footer = 0
235
+ large_footer = []
252
236
  for i, path in enumerate(paths):
253
237
  footer_size = int.from_bytes(footer_samples[i][-8:-4], "little")
254
238
  real_footer_start = file_sizes[i] - (footer_size + 8)
255
239
  if real_footer_start < footer_starts[i]:
256
- missing_footer_starts[i] = real_footer_start
257
- large_footer = max(large_footer, (footer_size + 8))
240
+ large_footer.append((i, real_footer_start))
258
241
  if large_footer:
259
242
  warnings.warn(
260
243
  f"Not enough data was used to sample the parquet footer. "
261
244
  f"Try setting footer_sample_size >= {large_footer}."
262
245
  )
263
- for i, block in enumerate(
264
- fs.cat_ranges(
265
- paths,
266
- missing_footer_starts,
267
- footer_starts,
268
- )
269
- ):
246
+ path0 = [paths[i] for i, _ in large_footer]
247
+ starts = [_[1] for _ in large_footer]
248
+ ends = [file_sizes[i] - footer_sample_size for i, _ in large_footer]
249
+ data = fs.cat_ranges(path0, starts, ends)
250
+ for i, (path, start, block) in enumerate(zip(path0, starts, data)):
270
251
  footer_samples[i] = block + footer_samples[i]
271
- footer_starts[i] = missing_footer_starts[i]
252
+ footer_starts[i] = start
253
+ result = {
254
+ path: {(start, size): data}
255
+ for path, start, size, data in zip(
256
+ paths, footer_starts, file_sizes, footer_samples
257
+ )
258
+ }
272
259
 
273
260
  # Calculate required byte ranges for each path
274
261
  for i, path in enumerate(paths):
@@ -284,9 +271,6 @@ def _get_parquet_byte_ranges(
284
271
  data_paths += [path] * len(path_data_starts)
285
272
  data_starts += path_data_starts
286
273
  data_ends += path_data_ends
287
- result.setdefault(path, {})[(footer_starts[i], file_sizes[i])] = (
288
- footer_samples[i]
289
- )
290
274
 
291
275
  # Merge adjacent offset ranges
292
276
  data_paths, data_starts, data_ends = merge_offset_ranges(
@@ -295,19 +279,14 @@ def _get_parquet_byte_ranges(
295
279
  data_ends,
296
280
  max_gap=max_gap,
297
281
  max_block=max_block,
298
- sort=False, # Should already be sorted
282
+ sort=True,
299
283
  )
300
284
 
301
- # Start by populating `result` with footer samples
302
- for i, path in enumerate(paths):
303
- result[path] = {(footer_starts[i], footer_ends[i]): footer_samples[i]}
304
-
305
- # Transfer the data byte-ranges into local memory
306
- _transfer_ranges(fs, result, data_paths, data_starts, data_ends)
285
+ # Transfer the data byte-ranges into local memory
286
+ _transfer_ranges(fs, result, data_paths, data_starts, data_ends)
307
287
 
308
- # Add b"PAR1" to header if necessary
309
- if add_header_magic:
310
- _add_header_magic(result)
288
+ # Add b"PAR1" to headers
289
+ _add_header_magic(result)
311
290
 
312
291
  return result
313
292
 
@@ -362,7 +341,7 @@ def _transfer_ranges(fs, blocks, paths, starts, ends):
362
341
 
363
342
  def _add_header_magic(data):
364
343
  # Add b"PAR1" to file headers
365
- for path in list(data.keys()):
344
+ for path in list(data):
366
345
  add_magic = True
367
346
  for k in data[path]:
368
347
  if k[0] == 0 and k[1] >= 4:
@@ -419,9 +398,6 @@ class FastparquetEngine:
419
398
 
420
399
  self.fp = fp
421
400
 
422
- def _row_group_filename(self, row_group, pf):
423
- return pf.row_group_filename(row_group)
424
-
425
401
  def _parquet_byte_ranges(
426
402
  self,
427
403
  columns,
@@ -465,6 +441,10 @@ class FastparquetEngine:
465
441
  # Input row_groups contains row-group indices
466
442
  row_group_indices = row_groups
467
443
  row_groups = pf.row_groups
444
+ if column_set is not None:
445
+ column_set = [
446
+ _ if isinstance(_, list) else _.split(".") for _ in column_set
447
+ ]
468
448
 
469
449
  # Loop through column chunks to add required byte ranges
470
450
  for r, row_group in enumerate(row_groups):
@@ -472,13 +452,12 @@ class FastparquetEngine:
472
452
  # specific row-groups
473
453
  if row_group_indices is None or r in row_group_indices:
474
454
  # Find the target parquet-file path for `row_group`
475
- fn = self._row_group_filename(row_group, pf)
455
+ fn = pf.row_group_filename(row_group)
476
456
 
477
457
  for column in row_group.columns:
478
- name = column.meta_data.path_in_schema[0]
479
- # Skip this column if we are targeting a
480
- # specific columns
481
- if column_set is None or name in column_set:
458
+ name = column.meta_data.path_in_schema
459
+ # Skip this column if we are targeting specific columns
460
+ if column_set is None or _cmp(name, column_set):
482
461
  file_offset0 = column.meta_data.dictionary_page_offset
483
462
  if file_offset0 is None:
484
463
  file_offset0 = column.meta_data.data_page_offset
@@ -512,9 +491,6 @@ class PyarrowEngine:
512
491
 
513
492
  self.pq = pq
514
493
 
515
- def _row_group_filename(self, row_group, metadata):
516
- raise NotImplementedError
517
-
518
494
  def _parquet_byte_ranges(
519
495
  self,
520
496
  columns,
@@ -527,6 +503,7 @@ class PyarrowEngine:
527
503
  if metadata is not None:
528
504
  raise ValueError("metadata input not supported for PyarrowEngine")
529
505
  if filters:
506
+ # there must be a way!
530
507
  raise NotImplementedError
531
508
 
532
509
  data_starts, data_ends = [], []
@@ -550,6 +527,10 @@ class PyarrowEngine:
550
527
  if not isinstance(ind, dict)
551
528
  ]
552
529
  column_set |= set(md_index)
530
+ if column_set is not None:
531
+ column_set = [
532
+ _[:1] if isinstance(_, list) else _.split(".")[:1] for _ in column_set
533
+ ]
553
534
 
554
535
  # Loop through column chunks to add required byte ranges
555
536
  for r in range(md.num_row_groups):
@@ -559,22 +540,33 @@ class PyarrowEngine:
559
540
  row_group = md.row_group(r)
560
541
  for c in range(row_group.num_columns):
561
542
  column = row_group.column(c)
562
- name = column.path_in_schema
563
- # Skip this column if we are targeting a
564
- # specific columns
565
- split_name = name.split(".")[0]
566
- if (
567
- column_set is None
568
- or name in column_set
569
- or split_name in column_set
570
- ):
571
- file_offset0 = column.dictionary_page_offset
572
- if file_offset0 is None:
573
- file_offset0 = column.data_page_offset
574
- num_bytes = column.total_compressed_size
543
+ name = column.path_in_schema.split(".")
544
+ # Skip this column if we are targeting specific columns
545
+ if column_set is None or _cmp(name, column_set):
546
+ meta = column.to_dict()
547
+ # Any offset could be the first one
548
+ file_offset0 = min(
549
+ _
550
+ for _ in [
551
+ meta.get("dictionary_page_offset"),
552
+ meta.get("data_page_offset"),
553
+ meta.get("index_page_offset"),
554
+ ]
555
+ if _ is not None
556
+ )
575
557
  if file_offset0 < footer_start:
576
558
  data_starts.append(file_offset0)
577
559
  data_ends.append(
578
- min(file_offset0 + num_bytes, footer_start)
560
+ min(
561
+ meta["total_compressed_size"] + file_offset0,
562
+ footer_start,
563
+ )
579
564
  )
565
+
566
+ data_starts.append(footer_start)
567
+ data_ends.append(footer_start + len(footer))
580
568
  return data_starts, data_ends
569
+
570
+
571
+ def _cmp(name, column_set):
572
+ return any(all(a == b for a, b in zip(name, _)) for _ in column_set)
fsspec/registry.py CHANGED
@@ -189,7 +189,7 @@ known_implementations = {
189
189
  },
190
190
  "pyscript": {
191
191
  "class": "pyscript_fsspec_client.client.PyscriptFileSystem",
192
- "err": "Install requests (cpython) or run in pyscript",
192
+ "err": "This only runs in a pyscript context",
193
193
  },
194
194
  "reference": {"class": "fsspec.implementations.reference.ReferenceFileSystem"},
195
195
  "root": {
fsspec/utils.py CHANGED
@@ -566,6 +566,16 @@ def merge_offset_ranges(
566
566
  )
567
567
  )
568
568
  )
569
+ remove = []
570
+ for i, (path, start, end) in enumerate(zip(paths, starts, ends)):
571
+ if any(
572
+ e is not None and p == path and start >= s and end <= e and i != i2
573
+ for i2, (p, s, e) in enumerate(zip(paths, starts, ends))
574
+ ):
575
+ remove.append(i)
576
+ paths = [p for i, p in enumerate(paths) if i not in remove]
577
+ starts = [s for i, s in enumerate(starts) if i not in remove]
578
+ ends = [e for i, e in enumerate(ends) if i not in remove]
569
579
 
570
580
  if paths:
571
581
  # Loop through the coupled `paths`, `starts`, and
@@ -587,7 +597,7 @@ def merge_offset_ranges(
587
597
  new_starts.append(starts[i])
588
598
  new_ends.append(ends[i])
589
599
  else:
590
- # Merge with previous block by updating the
600
+ # Merge with the previous block by updating the
591
601
  # last element of `ends`
592
602
  new_ends[-1] = ends[i]
593
603
  return new_paths, new_starts, new_ends
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fsspec
3
- Version: 2025.12.0
3
+ Version: 2026.2.0
4
4
  Summary: File-system specification
5
5
  Project-URL: Changelog, https://filesystem-spec.readthedocs.io/en/latest/changelog.html
6
6
  Project-URL: Documentation, https://filesystem-spec.readthedocs.io/en/latest/
@@ -49,7 +49,7 @@ Requires-Dist: distributed; extra == 'full'
49
49
  Requires-Dist: dropbox; extra == 'full'
50
50
  Requires-Dist: dropboxdrivefs; extra == 'full'
51
51
  Requires-Dist: fusepy; extra == 'full'
52
- Requires-Dist: gcsfs; extra == 'full'
52
+ Requires-Dist: gcsfs>2024.2.0; extra == 'full'
53
53
  Requires-Dist: libarchive-c; extra == 'full'
54
54
  Requires-Dist: ocifs; extra == 'full'
55
55
  Requires-Dist: panel; extra == 'full'
@@ -57,13 +57,13 @@ Requires-Dist: paramiko; extra == 'full'
57
57
  Requires-Dist: pyarrow>=1; extra == 'full'
58
58
  Requires-Dist: pygit2; extra == 'full'
59
59
  Requires-Dist: requests; extra == 'full'
60
- Requires-Dist: s3fs; extra == 'full'
60
+ Requires-Dist: s3fs>2024.2.0; extra == 'full'
61
61
  Requires-Dist: smbprotocol; extra == 'full'
62
62
  Requires-Dist: tqdm; extra == 'full'
63
63
  Provides-Extra: fuse
64
64
  Requires-Dist: fusepy; extra == 'fuse'
65
65
  Provides-Extra: gcs
66
- Requires-Dist: gcsfs; extra == 'gcs'
66
+ Requires-Dist: gcsfs>2024.2.0; extra == 'gcs'
67
67
  Provides-Extra: git
68
68
  Requires-Dist: pygit2; extra == 'git'
69
69
  Provides-Extra: github
@@ -81,7 +81,7 @@ Requires-Dist: libarchive-c; extra == 'libarchive'
81
81
  Provides-Extra: oci
82
82
  Requires-Dist: ocifs; extra == 'oci'
83
83
  Provides-Extra: s3
84
- Requires-Dist: s3fs; extra == 's3'
84
+ Requires-Dist: s3fs>2024.2.0; extra == 's3'
85
85
  Provides-Extra: sftp
86
86
  Requires-Dist: paramiko; extra == 'sftp'
87
87
  Provides-Extra: smb
@@ -108,6 +108,7 @@ Requires-Dist: xarray; extra == 'test-downstream'
108
108
  Provides-Extra: test-full
109
109
  Requires-Dist: adlfs; extra == 'test-full'
110
110
  Requires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'test-full'
111
+ Requires-Dist: backports-zstd; (python_version < '3.14') and extra == 'test-full'
111
112
  Requires-Dist: cloudpickle; extra == 'test-full'
112
113
  Requires-Dist: dask; extra == 'test-full'
113
114
  Requires-Dist: distributed; extra == 'test-full'
@@ -123,7 +124,7 @@ Requires-Dist: lz4; extra == 'test-full'
123
124
  Requires-Dist: notebook; extra == 'test-full'
124
125
  Requires-Dist: numpy; extra == 'test-full'
125
126
  Requires-Dist: ocifs; extra == 'test-full'
126
- Requires-Dist: pandas; extra == 'test-full'
127
+ Requires-Dist: pandas<3.0.0; extra == 'test-full'
127
128
  Requires-Dist: panel; extra == 'test-full'
128
129
  Requires-Dist: paramiko; extra == 'test-full'
129
130
  Requires-Dist: pyarrow; extra == 'test-full'
@@ -1,10 +1,10 @@
1
1
  fsspec/__init__.py,sha256=L7qwNBU1iMNQd8Of87HYSNFT9gWlNMSESaJC8fY0AaQ,2053
2
- fsspec/_version.py,sha256=u_WCUBrOC78o9bqe-21i1S8FKT43ei4dqyL4oKUga9g,712
2
+ fsspec/_version.py,sha256=AEamMn8IHx_wGXrogRYVBycv6M5u2_UCmFedn7R8hYI,710
3
3
  fsspec/archive.py,sha256=vM6t_lgV6lBWbBYwpm3S4ofBQFQxUPr5KkDQrrQcQro,2411
4
4
  fsspec/asyn.py,sha256=LP_OicTWXmKHe31wBoYs2MrrNf8rmlhjVeGg5AqvVy8,36630
5
- fsspec/caching.py,sha256=B2xeDz9-VDgr_dDeVOTNRq3vaS9zVUe0nxtOBgsrjUk,34260
5
+ fsspec/caching.py,sha256=8IJ4rgcWnvq_b_DqlcMGJ-K59d4Db5O9Gz8PkATAgHo,34023
6
6
  fsspec/callbacks.py,sha256=BDIwLzK6rr_0V5ch557fSzsivCElpdqhXr5dZ9Te-EE,9210
7
- fsspec/compression.py,sha256=gBK2MV_oTFVW2XDq8bZVbYQKYrl6JDUou6_-kyvmxuk,5086
7
+ fsspec/compression.py,sha256=3v_Fe39gzRRWfaeXpzNjAGPqgTzmETYRCo3qHVqD3po,5132
8
8
  fsspec/config.py,sha256=LF4Zmu1vhJW7Je9Q-cwkRc3xP7Rhyy7Xnwj26Z6sv2g,4279
9
9
  fsspec/conftest.py,sha256=uWfm_Qs5alPRxOhRpDfQ0-1jqSJ54pni4y96IxOREXM,3446
10
10
  fsspec/core.py,sha256=lc7XSnZU6_C6xljp7Z_xEGN3V7704hbeQLkxvPP0wds,24173
@@ -15,23 +15,23 @@ fsspec/generic.py,sha256=9QHQYMNb-8w8-eYuIqShcTjO_LeHXFoQTyt8J5oEq5Q,13482
15
15
  fsspec/gui.py,sha256=CQ7QsrTpaDlWSLNOpwNoJc7khOcYXIZxmrAJN9bHWQU,14002
16
16
  fsspec/json.py,sha256=4EBZ-xOmRiyxmIqPIwxmDImosRQ7io7qBM2xjJPsEE4,3768
17
17
  fsspec/mapping.py,sha256=m2ndB_gtRBXYmNJg0Ie1-BVR75TFleHmIQBzC-yWhjU,8343
18
- fsspec/parquet.py,sha256=vpOuoxg0y0iS2yoiXeAUcWB0n-wct2x2L6Vmn_O-hRQ,20668
19
- fsspec/registry.py,sha256=Kvv7NEqvIDhI2PXoYmwi4Z9RHAjrLMhGRu3m492801s,12157
18
+ fsspec/parquet.py,sha256=xGW3xfd9js7hrre7qN85XpSM0A1FObqkTcAv_H2xSwY,20505
19
+ fsspec/registry.py,sha256=o7EGl8TEaLkcwN53X_103arzuzJeeOoVaNUWnPiXgf0,12148
20
20
  fsspec/spec.py,sha256=Ym-Ust6LRjHgbhrmvNqwOBZxoVnaw3g3xHXMZGHx_xg,77692
21
21
  fsspec/transaction.py,sha256=xliRG6U2Zf3khG4xcw9WiB-yAoqJSHEGK_VjHOdtgo0,2398
22
- fsspec/utils.py,sha256=5DOxB_eE-wNHwrc6zp3h-oMp5mCLw4tsQZqqPTjLDmM,23136
22
+ fsspec/utils.py,sha256=E24ji0XLWC6n3bw2sHA28OYxrGU9Wy_al2XydsRgrRk,23623
23
23
  fsspec/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
- fsspec/implementations/arrow.py,sha256=6BaSEBZ4nb8UuY6NsyFevGzXcdJWamt3qEHjMe2S-W8,8831
24
+ fsspec/implementations/arrow.py,sha256=8FhvcvOYLZNMMegCYFFCEHgEqig8AkOU7Ehb8XfcgnA,8890
25
25
  fsspec/implementations/asyn_wrapper.py,sha256=3lfJkGs6D_AwRBdxTSYlL-RCVdaXBZ9Itys2P5o5Si0,3738
26
26
  fsspec/implementations/cache_mapper.py,sha256=W4wlxyPxZbSp9ItJ0pYRVBMh6bw9eFypgP6kUYuuiI4,2421
27
27
  fsspec/implementations/cache_metadata.py,sha256=ipIe4S8nlU_M9oRJkvTqr-b0tcbXVZsxH3GxaelaNOY,8502
28
- fsspec/implementations/cached.py,sha256=gp1eaM2X7ix2eGRDaC8rtTuO0icK6hbz3yDXh9YdB0E,35392
28
+ fsspec/implementations/cached.py,sha256=67ipbj-3o8O1zMGR11rZ_IWCi_7h-VRYpEAowFXqrvA,36175
29
29
  fsspec/implementations/chained.py,sha256=iGivpNaHUFjB_ea0-HAPhcmm6CL8qnDf270PSj7JwuE,680
30
30
  fsspec/implementations/dask.py,sha256=CXZbJzIVOhKV8ILcxuy3bTvcacCueAbyQxmvAkbPkrk,4466
31
31
  fsspec/implementations/data.py,sha256=IhOGDkacYp5gkl9jhEu4msQfZPb0gS5Q_ml7Mbr6dgQ,1627
32
32
  fsspec/implementations/dbfs.py,sha256=1cvvC6KBWOb8pBVpc01xavVbEPXO1xsgZvPD7H73M9k,16217
33
33
  fsspec/implementations/dirfs.py,sha256=VNj6gPMfmmLPK4wxbtxt7mUqW7xkh2XDgMmEmSK_E1c,12166
34
- fsspec/implementations/ftp.py,sha256=bzL_TgH77nMMtTMewRGkbq4iObSHGu7YoMRCXBH4nrc,11639
34
+ fsspec/implementations/ftp.py,sha256=fJhaMIKq2RvzYlLwG3bewy2jq4iRqjVt1aIpwtUIRwI,13235
35
35
  fsspec/implementations/gist.py,sha256=Y6jTDrE-wuTwvpPyAQDuuOMBGxlajafKWoB1_yX6jdY,8528
36
36
  fsspec/implementations/git.py,sha256=qBDWMz5LNllPqVjr5jf_1FuNha4P5lyQI3IlhYg-wUE,3731
37
37
  fsspec/implementations/github.py,sha256=aCsZL8UvXZgdkcB1RUs3DdLeNrjLKcFsFYeQFDWbBFo,11653
@@ -39,14 +39,14 @@ fsspec/implementations/http.py,sha256=-AV5qeNpBWqnsmgnIO9Ily9B6--SR4sQJ7G4cBHarG
39
39
  fsspec/implementations/http_sync.py,sha256=UmBqd938ebwVjYgVtzg-ysG3ZoGhIJw0wFtQAfxV3Aw,30332
40
40
  fsspec/implementations/jupyter.py,sha256=q1PlQ66AAswGFyr8MFKWyobaV2YekMWRtqENBDQtD28,4002
41
41
  fsspec/implementations/libarchive.py,sha256=SpIA1F-zf7kb2-VYUVuhMrXTBOhBxUXKgEW1RaAdDoA,7098
42
- fsspec/implementations/local.py,sha256=DQeK7jRGv4_mJAweLKALO5WzIIkjXxZ_jRvwQ_xadSA,16936
42
+ fsspec/implementations/local.py,sha256=ERDUdXdRI8AvRX06icXaDKwO-hcQgivc7EorqnayFFM,17028
43
43
  fsspec/implementations/memory.py,sha256=TDdLtSPWXxZKrrVGwmc3uS3oK_2mlcVTk2BiqR8IeII,10507
44
44
  fsspec/implementations/reference.py,sha256=xSUpB8o_QFAZiVJE2dt78QZMCUMLo5TaJ27e5DwDAfg,48814
45
45
  fsspec/implementations/sftp.py,sha256=L9pZOa6eLUWfJNtxkxeG2YI96SQwrM5Hj6ocyUZXUbg,5923
46
46
  fsspec/implementations/smb.py,sha256=5fhu8h06nOLBPh2c48aT7WBRqh9cEcbIwtyu06wTjec,15236
47
47
  fsspec/implementations/tar.py,sha256=dam78Tp_CozybNqCY2JYgGBS3Uc9FuJUAT9oB0lolOs,4111
48
- fsspec/implementations/webhdfs.py,sha256=PUgZM9HbVPAeW4u4B-rWl8wTcKKpPhwZO7xcINDmTNQ,16779
49
- fsspec/implementations/zip.py,sha256=9LBMHPft2OutJl2Ft-r9u_z3GptLkc2n91ur2A3bCbg,6072
48
+ fsspec/implementations/webhdfs.py,sha256=osF2m0nhDil6sbMzYW_4DZzhxF4ygtb59XDiybd9Fyg,17589
49
+ fsspec/implementations/zip.py,sha256=6f3z0s12tDbz1RMx7iDc3JDx730IAaKDdx7tf_XYDp0,6151
50
50
  fsspec/tests/abstract/__init__.py,sha256=4xUJrv7gDgc85xAOz1p-V_K1hrsdMWTSa0rviALlJk8,10181
51
51
  fsspec/tests/abstract/common.py,sha256=1GQwNo5AONzAnzZj0fWgn8NJPLXALehbsuGxS3FzWVU,4973
52
52
  fsspec/tests/abstract/copy.py,sha256=gU5-d97U3RSde35Vp4RxPY4rWwL744HiSrJ8IBOp9-8,19967
@@ -55,7 +55,7 @@ fsspec/tests/abstract/mv.py,sha256=k8eUEBIrRrGMsBY5OOaDXdGnQUKGwDIfQyduB6YD3Ns,1
55
55
  fsspec/tests/abstract/open.py,sha256=Fi2PBPYLbRqysF8cFm0rwnB41kMdQVYjq8cGyDXp3BU,329
56
56
  fsspec/tests/abstract/pipe.py,sha256=LFzIrLCB5GLXf9rzFKJmE8AdG7LQ_h4bJo70r8FLPqM,402
57
57
  fsspec/tests/abstract/put.py,sha256=7aih17OKB_IZZh1Mkq1eBDIjobhtMQmI8x-Pw-S_aZk,21201
58
- fsspec-2025.12.0.dist-info/METADATA,sha256=Fm2bIMio0NYy1EdMfxgGq4OE2kE5nt1qZDaDYbOuC0M,10401
59
- fsspec-2025.12.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
60
- fsspec-2025.12.0.dist-info/licenses/LICENSE,sha256=LcNUls5TpzB5FcAIqESq1T53K0mzTN0ARFBnaRQH7JQ,1513
61
- fsspec-2025.12.0.dist-info/RECORD,,
58
+ fsspec-2026.2.0.dist-info/METADATA,sha256=Pw6QhbyXeg-elb0hpWPiQRpAeMD_ApJ9vdNFoYCnrPs,10524
59
+ fsspec-2026.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
60
+ fsspec-2026.2.0.dist-info/licenses/LICENSE,sha256=LcNUls5TpzB5FcAIqESq1T53K0mzTN0ARFBnaRQH7JQ,1513
61
+ fsspec-2026.2.0.dist-info/RECORD,,