cloud-files 5.9.0__tar.gz → 6.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cloud_files-5.9.0 → cloud_files-6.0.0}/ChangeLog +5 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/PKG-INFO +1 -1
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloud_files.egg-info/PKG-INFO +1 -1
- cloud_files-6.0.0/cloud_files.egg-info/pbr.json +1 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/cloudfiles.py +9 -3
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/interfaces.py +21 -11
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles_cli/cloudfiles_cli.py +17 -5
- cloud_files-5.9.0/cloud_files.egg-info/pbr.json +0 -1
- {cloud_files-5.9.0 → cloud_files-6.0.0}/.github/workflows/test-suite.yml +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/AUTHORS +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/LICENSE +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/MANIFEST.in +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/README.md +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/automated_test.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloud_files.egg-info/SOURCES.txt +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloud_files.egg-info/dependency_links.txt +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloud_files.egg-info/entry_points.txt +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloud_files.egg-info/not-zip-safe +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloud_files.egg-info/requires.txt +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloud_files.egg-info/top_level.txt +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/__init__.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/compression.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/connectionpools.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/exceptions.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/gcs.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/lib.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/monitoring.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/paths.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/resumable_tools.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/scheduler.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/secrets.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/test.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/threaded_queue.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles/typing.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles_cli/LICENSE +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/cloudfiles_cli/__init__.py +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/requirements.txt +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/setup.cfg +0 -0
- {cloud_files-5.9.0 → cloud_files-6.0.0}/setup.py +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"git_version": "38a2b59", "is_release": true}
|
|
@@ -1008,10 +1008,11 @@ class CloudFiles:
|
|
|
1008
1008
|
return results
|
|
1009
1009
|
return first(results.values())
|
|
1010
1010
|
|
|
1011
|
-
def subtree_size(self, prefix:GetPathType = "") -> int:
|
|
1011
|
+
def subtree_size(self, prefix:GetPathType = "") -> dict[str,int]:
|
|
1012
1012
|
"""High performance size calculation for directory trees."""
|
|
1013
1013
|
prefix, return_multiple = toiter(prefix, is_iter=True)
|
|
1014
1014
|
total_bytes = 0
|
|
1015
|
+
total_files = 0
|
|
1015
1016
|
|
|
1016
1017
|
total = totalfn(prefix, None)
|
|
1017
1018
|
|
|
@@ -1019,11 +1020,13 @@ class CloudFiles:
|
|
|
1019
1020
|
|
|
1020
1021
|
def size_thunk(prefix):
|
|
1021
1022
|
nonlocal total_bytes
|
|
1023
|
+
nonlocal total_files
|
|
1022
1024
|
nonlocal lock
|
|
1023
1025
|
|
|
1024
1026
|
with self._get_connection() as conn:
|
|
1025
|
-
subtree_bytes = conn.subtree_size(prefix)
|
|
1027
|
+
subtree_files, subtree_bytes = conn.subtree_size(prefix)
|
|
1026
1028
|
with lock:
|
|
1029
|
+
total_files += subtree_files
|
|
1027
1030
|
total_bytes += subtree_bytes
|
|
1028
1031
|
|
|
1029
1032
|
schedule_jobs(
|
|
@@ -1034,7 +1037,10 @@ class CloudFiles:
|
|
|
1034
1037
|
total=total,
|
|
1035
1038
|
)
|
|
1036
1039
|
|
|
1037
|
-
return
|
|
1040
|
+
return {
|
|
1041
|
+
"N": total_files,
|
|
1042
|
+
"num_bytes": total_bytes,
|
|
1043
|
+
}
|
|
1038
1044
|
|
|
1039
1045
|
@parallelize(desc="Delete")
|
|
1040
1046
|
def delete(
|
|
@@ -304,18 +304,21 @@ class FileInterface(StorageInterface):
|
|
|
304
304
|
|
|
305
305
|
return self.io_with_lock(do_size, path, exclusive=False)
|
|
306
306
|
|
|
307
|
-
def subtree_size(self, prefix:str = "") -> int:
|
|
307
|
+
def subtree_size(self, prefix:str = "") -> tuple[int,int]:
|
|
308
308
|
total_bytes = 0
|
|
309
|
+
total_files = 0
|
|
309
310
|
|
|
310
311
|
subdir = self.get_path_to_file("")
|
|
311
312
|
if prefix:
|
|
312
313
|
subdir = os.path.join(subdir, os.path.dirname(prefix))
|
|
313
314
|
|
|
314
315
|
for root, dirs, files in os.walk(subdir):
|
|
315
|
-
|
|
316
|
-
|
|
316
|
+
for f in files:
|
|
317
|
+
path = os.path.join(root, f)
|
|
318
|
+
total_files += 1
|
|
319
|
+
total_bytes += os.path.getsize(path)
|
|
317
320
|
|
|
318
|
-
return total_bytes
|
|
321
|
+
return (total_files, total_bytes)
|
|
319
322
|
|
|
320
323
|
def exists(self, file_path):
|
|
321
324
|
path = self.get_path_to_file(file_path)
|
|
@@ -628,7 +631,7 @@ class MemoryInterface(StorageInterface):
|
|
|
628
631
|
filenames.sort()
|
|
629
632
|
return iter(filenames)
|
|
630
633
|
|
|
631
|
-
def subtree_size(self, prefix:str = "") -> int:
|
|
634
|
+
def subtree_size(self, prefix:str = "") -> tuple[int,int]:
|
|
632
635
|
layer_path = self.get_path_to_file("")
|
|
633
636
|
|
|
634
637
|
remove = layer_path
|
|
@@ -636,12 +639,14 @@ class MemoryInterface(StorageInterface):
|
|
|
636
639
|
remove += '/'
|
|
637
640
|
|
|
638
641
|
total_bytes = 0
|
|
642
|
+
total_files = 0
|
|
639
643
|
for filename, binary in self._data.items():
|
|
640
644
|
f_prefix = f.removeprefix(remove)[:len(prefix)]
|
|
641
645
|
if f_prefix == prefix:
|
|
642
646
|
total_bytes += len(binary)
|
|
647
|
+
total_files += 1
|
|
643
648
|
|
|
644
|
-
return total_bytes
|
|
649
|
+
return (total_files, total_bytes)
|
|
645
650
|
|
|
646
651
|
class GoogleCloudStorageInterface(StorageInterface):
|
|
647
652
|
exists_batch_size = Batch._MAX_BATCH_SIZE
|
|
@@ -866,7 +871,7 @@ class GoogleCloudStorageInterface(StorageInterface):
|
|
|
866
871
|
|
|
867
872
|
|
|
868
873
|
@retry
|
|
869
|
-
def subtree_size(self, prefix:str = "") -> int:
|
|
874
|
+
def subtree_size(self, prefix:str = "") -> tuple[int,int]:
|
|
870
875
|
layer_path = self.get_path_to_file("")
|
|
871
876
|
path = posixpath.join(layer_path, prefix)
|
|
872
877
|
|
|
@@ -877,11 +882,13 @@ class GoogleCloudStorageInterface(StorageInterface):
|
|
|
877
882
|
)
|
|
878
883
|
|
|
879
884
|
total_bytes = 0
|
|
885
|
+
total_files = 0
|
|
880
886
|
for page in blobs.pages:
|
|
881
887
|
for blob in page:
|
|
882
888
|
total_bytes += blob.size
|
|
889
|
+
total_files += 1
|
|
883
890
|
|
|
884
|
-
return total_bytes
|
|
891
|
+
return (total_files, total_bytes)
|
|
885
892
|
|
|
886
893
|
def release_connection(self):
|
|
887
894
|
global GC_POOL
|
|
@@ -939,7 +946,7 @@ class HttpInterface(StorageInterface):
|
|
|
939
946
|
headers = self.head(file_path)
|
|
940
947
|
return int(headers["Content-Length"])
|
|
941
948
|
|
|
942
|
-
def subtree_size(self, prefix:str = "") -> int:
|
|
949
|
+
def subtree_size(self, prefix:str = "") -> tuple[int,int]:
|
|
943
950
|
raise NotImplementedError()
|
|
944
951
|
|
|
945
952
|
@retry
|
|
@@ -1538,7 +1545,7 @@ class S3Interface(StorageInterface):
|
|
|
1538
1545
|
for filename in iterate(resp):
|
|
1539
1546
|
yield filename
|
|
1540
1547
|
|
|
1541
|
-
def subtree_size(self, prefix:str = "") -> int:
|
|
1548
|
+
def subtree_size(self, prefix:str = "") -> tuple[int,int]:
|
|
1542
1549
|
layer_path = self.get_path_to_file("")
|
|
1543
1550
|
path = posixpath.join(layer_path, prefix)
|
|
1544
1551
|
|
|
@@ -1565,16 +1572,19 @@ class S3Interface(StorageInterface):
|
|
|
1565
1572
|
yield item.get('Size', 0)
|
|
1566
1573
|
|
|
1567
1574
|
total_bytes = 0
|
|
1575
|
+
total_files = 0
|
|
1568
1576
|
for num_bytes in iterate(resp):
|
|
1577
|
+
total_files += 1
|
|
1569
1578
|
total_bytes += num_bytes
|
|
1570
1579
|
|
|
1571
1580
|
while resp['IsTruncated'] and resp['NextContinuationToken']:
|
|
1572
1581
|
resp = s3lst(path, resp['NextContinuationToken'])
|
|
1573
1582
|
|
|
1574
1583
|
for num_bytes in iterate(resp):
|
|
1584
|
+
total_files += 1
|
|
1575
1585
|
total_bytes += num_bytes
|
|
1576
1586
|
|
|
1577
|
-
return total_bytes
|
|
1587
|
+
return (total_files, total_bytes)
|
|
1578
1588
|
|
|
1579
1589
|
def release_connection(self):
|
|
1580
1590
|
global S3_POOL
|
|
@@ -802,9 +802,13 @@ def __rm(cloudpath, progress, paths):
|
|
|
802
802
|
@click.option('-c', '--grand-total', is_flag=True, default=False, help="Sum a grand total of all inputs.")
|
|
803
803
|
@click.option('-s', '--summarize', is_flag=True, default=False, help="Sum a total for each input argument.")
|
|
804
804
|
@click.option('-h', '--human-readable', is_flag=True, default=False, help='"Human-readable" output. Use unit suffixes: Bytes, KiB, MiB, GiB, TiB, PiB, and EiB.')
|
|
805
|
-
|
|
805
|
+
@click.option('-N', '--count-files', is_flag=True, default=False, help='Also report the number of files.')
|
|
806
|
+
def du(paths, grand_total, summarize, human_readable, count_files):
|
|
806
807
|
"""Display disk usage statistics."""
|
|
807
808
|
results = []
|
|
809
|
+
|
|
810
|
+
list_data = False
|
|
811
|
+
|
|
808
812
|
for path in paths:
|
|
809
813
|
npath = normalize_path(path)
|
|
810
814
|
if ispathdir(path):
|
|
@@ -812,6 +816,7 @@ def du(paths, grand_total, summarize, human_readable):
|
|
|
812
816
|
if summarize:
|
|
813
817
|
results.append(cf.subtree_size())
|
|
814
818
|
else:
|
|
819
|
+
list_data = True
|
|
815
820
|
results.append(cf.size(cf.list()))
|
|
816
821
|
else:
|
|
817
822
|
cf = CloudFiles(os.path.dirname(npath))
|
|
@@ -841,11 +846,15 @@ def du(paths, grand_total, summarize, human_readable):
|
|
|
841
846
|
return f"{(val / 2**60):.2f} EiB"
|
|
842
847
|
|
|
843
848
|
summary = {}
|
|
849
|
+
num_files = 0
|
|
844
850
|
for path, res in zip(paths, results):
|
|
845
|
-
if
|
|
846
|
-
summary[path] = res
|
|
847
|
-
else:
|
|
851
|
+
if list_data:
|
|
848
852
|
summary[path] = sum(res.values())
|
|
853
|
+
num_files += len(res)
|
|
854
|
+
else:
|
|
855
|
+
summary[path] = res["num_bytes"]
|
|
856
|
+
num_files += res["N"]
|
|
857
|
+
|
|
849
858
|
if summarize:
|
|
850
859
|
print(f"{SI(summary[path])}\t{path}")
|
|
851
860
|
|
|
@@ -855,7 +864,10 @@ def du(paths, grand_total, summarize, human_readable):
|
|
|
855
864
|
print(f"{SI(size)}\t{pth}")
|
|
856
865
|
|
|
857
866
|
if grand_total:
|
|
858
|
-
print(f"{SI(sum(summary.values()))}\
|
|
867
|
+
print(f"{SI(sum(summary.values()))}\tbytes total")
|
|
868
|
+
|
|
869
|
+
if count_files:
|
|
870
|
+
print(f"{num_files}\tfiles total")
|
|
859
871
|
|
|
860
872
|
@main.command()
|
|
861
873
|
@click.argument('paths', nargs=-1)
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"git_version": "623052c", "is_release": true}
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|