rclone-api 1.4.14__py2.py3-none-any.whl → 1.4.18__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rclone_api/cmd/copy_large_s3_finish.py +78 -17
- rclone_api/process.py +65 -40
- rclone_api/s3/api.py +3 -3
- rclone_api/s3/create.py +31 -27
- rclone_api/s3/merge_state.py +115 -0
- rclone_api/s3/multipart/finished_piece.py +15 -1
- rclone_api/s3/s3_multipart_uploader_by_copy.py +139 -81
- rclone_api/util.py +84 -23
- {rclone_api-1.4.14.dist-info → rclone_api-1.4.18.dist-info}/METADATA +1 -1
- {rclone_api-1.4.14.dist-info → rclone_api-1.4.18.dist-info}/RECORD +14 -13
- {rclone_api-1.4.14.dist-info → rclone_api-1.4.18.dist-info}/LICENSE +0 -0
- {rclone_api-1.4.14.dist-info → rclone_api-1.4.18.dist-info}/WHEEL +0 -0
- {rclone_api-1.4.14.dist-info → rclone_api-1.4.18.dist-info}/entry_points.txt +0 -0
- {rclone_api-1.4.14.dist-info → rclone_api-1.4.18.dist-info}/top_level.txt +0 -0
@@ -6,9 +6,13 @@ from pathlib import Path
|
|
6
6
|
from rclone_api import Rclone
|
7
7
|
from rclone_api.detail.copy_file_parts import InfoJson
|
8
8
|
from rclone_api.s3.s3_multipart_uploader_by_copy import (
|
9
|
-
|
9
|
+
Part,
|
10
|
+
S3MultiPartUploader,
|
10
11
|
)
|
11
|
-
|
12
|
+
|
13
|
+
_TIMEOUT_READ = 900
|
14
|
+
_TIMEOUT_CONNECTION = 900
|
15
|
+
_MAX_WORKERS = 10
|
12
16
|
|
13
17
|
|
14
18
|
@dataclass
|
@@ -50,7 +54,55 @@ def _parse_args() -> Args:
|
|
50
54
|
return out
|
51
55
|
|
52
56
|
|
53
|
-
def
|
57
|
+
# def finish_multipart_upload_from_keys(
|
58
|
+
# s3_client: BaseClient,
|
59
|
+
# source_bucket: str,
|
60
|
+
# parts: list[Part],
|
61
|
+
# destination_bucket: str,
|
62
|
+
# destination_key: str,
|
63
|
+
# chunk_size: int, # 5MB default
|
64
|
+
# max_workers: int = 100,
|
65
|
+
# retries: int = 3,
|
66
|
+
# ) -> str | Exception:
|
67
|
+
# """
|
68
|
+
# Finish a multipart upload by copying parts from existing S3 objects.
|
69
|
+
|
70
|
+
# Args:
|
71
|
+
# s3_client: Boto3 S3 client
|
72
|
+
# source_bucket: Source bucket name
|
73
|
+
# source_keys: List of source object keys to copy from
|
74
|
+
# destination_bucket: Destination bucket name
|
75
|
+
# destination_key: Destination object key
|
76
|
+
# chunk_size: Size of each part in bytes
|
77
|
+
# retries: Number of retry attempts
|
78
|
+
# byte_ranges: Optional list of byte ranges corresponding to source_keys
|
79
|
+
|
80
|
+
# Returns:
|
81
|
+
# The URL of the completed object
|
82
|
+
# """
|
83
|
+
|
84
|
+
# # Create upload info
|
85
|
+
# info = begin_upload(
|
86
|
+
# s3_client=s3_client,
|
87
|
+
# parts=parts,
|
88
|
+
# destination_bucket=destination_bucket,
|
89
|
+
# destination_key=destination_key,
|
90
|
+
# chunk_size=chunk_size,
|
91
|
+
# retries=retries,
|
92
|
+
# )
|
93
|
+
|
94
|
+
# out = do_body_work(
|
95
|
+
# info=info,
|
96
|
+
# source_bucket=source_bucket,
|
97
|
+
# parts=parts,
|
98
|
+
# max_workers=max_workers,
|
99
|
+
# retries=retries,
|
100
|
+
# )
|
101
|
+
|
102
|
+
# return out
|
103
|
+
|
104
|
+
|
105
|
+
def do_finish_part(rclone: Rclone, info: InfoJson, dst: str) -> Exception | None:
|
54
106
|
from rclone_api.s3.create import (
|
55
107
|
BaseClient,
|
56
108
|
S3Config,
|
@@ -58,12 +110,17 @@ def do_finish_part(rclone: Rclone, info: InfoJson, dst: str) -> None:
|
|
58
110
|
create_s3_client,
|
59
111
|
)
|
60
112
|
|
61
|
-
|
62
|
-
|
63
|
-
|
113
|
+
s3_config = S3Config(
|
114
|
+
verbose=False,
|
115
|
+
timeout_read=_TIMEOUT_READ,
|
116
|
+
timeout_connection=_TIMEOUT_CONNECTION,
|
64
117
|
)
|
118
|
+
|
119
|
+
s3_creds: S3Credentials = rclone.impl.get_s3_credentials(remote=dst)
|
120
|
+
s3_client: BaseClient = create_s3_client(s3_creds=s3_creds, s3_config=s3_config)
|
65
121
|
s3_bucket = s3_creds.bucket_name
|
66
122
|
is_done = info.fetch_is_done()
|
123
|
+
size = info.size
|
67
124
|
assert is_done, f"Upload is not done: {info}"
|
68
125
|
|
69
126
|
parts_dir = info.parts_dir
|
@@ -79,11 +136,9 @@ def do_finish_part(rclone: Rclone, info: InfoJson, dst: str) -> None:
|
|
79
136
|
|
80
137
|
first_part: int | None = info.first_part
|
81
138
|
last_part: int | None = info.last_part
|
82
|
-
size: SizeSuffix | None = info.size
|
83
139
|
|
84
140
|
assert first_part is not None
|
85
141
|
assert last_part is not None
|
86
|
-
assert size is not None
|
87
142
|
|
88
143
|
def _to_s3_key(name: str | None) -> str:
|
89
144
|
if name:
|
@@ -92,11 +147,12 @@ def do_finish_part(rclone: Rclone, info: InfoJson, dst: str) -> None:
|
|
92
147
|
out = f"{parts_path}"
|
93
148
|
return out
|
94
149
|
|
95
|
-
parts: list[
|
150
|
+
parts: list[Part] = []
|
96
151
|
part_num = 1
|
97
152
|
for part_key in source_keys:
|
98
153
|
s3_key = _to_s3_key(name=part_key)
|
99
|
-
|
154
|
+
part = Part(part_number=part_num, s3_key=s3_key)
|
155
|
+
parts.append(part)
|
100
156
|
part_num += 1
|
101
157
|
|
102
158
|
chunksize = info.chunksize
|
@@ -107,31 +163,36 @@ def do_finish_part(rclone: Rclone, info: InfoJson, dst: str) -> None:
|
|
107
163
|
# dst_key =
|
108
164
|
dst_key = f"{dst_dir}/{dst_name}"
|
109
165
|
|
110
|
-
|
166
|
+
uploader: S3MultiPartUploader = S3MultiPartUploader(
|
111
167
|
s3_client=s3_client,
|
112
|
-
|
168
|
+
verbose=True,
|
169
|
+
)
|
170
|
+
|
171
|
+
from rclone_api.s3.s3_multipart_uploader_by_copy import MultipartUploadInfo
|
172
|
+
|
173
|
+
state: MultipartUploadInfo = uploader.begin_new_upload(
|
113
174
|
parts=parts,
|
114
175
|
destination_bucket=s3_creds.bucket_name,
|
115
176
|
destination_key=dst_key,
|
116
177
|
chunk_size=chunksize.as_int(),
|
117
|
-
final_size=size.as_int(),
|
118
|
-
max_workers=50,
|
119
|
-
retries=3,
|
120
178
|
)
|
121
179
|
|
180
|
+
uploader.start_upload(info=state, parts=parts, max_workers=_MAX_WORKERS)
|
181
|
+
|
122
182
|
# now check if the dst now exists, if so, delete the parts folder.
|
123
183
|
# if rclone.exists(dst):
|
124
184
|
# rclone.purge(parts_dir)
|
125
185
|
|
126
186
|
if not rclone.exists(dst):
|
127
|
-
|
187
|
+
return FileNotFoundError(f"Destination file not found: {dst}")
|
128
188
|
|
129
189
|
write_size = rclone.size_file(dst)
|
130
190
|
if write_size != size:
|
131
|
-
|
191
|
+
return ValueError(f"Size mismatch: {write_size} != {size}")
|
132
192
|
|
133
193
|
print(f"Upload complete: {dst}")
|
134
194
|
rclone.purge(parts_dir)
|
195
|
+
return None
|
135
196
|
|
136
197
|
|
137
198
|
def main() -> int:
|
rclone_api/process.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1
1
|
import atexit
|
2
2
|
import subprocess
|
3
3
|
import threading
|
4
|
-
import time
|
5
4
|
import weakref
|
6
5
|
from dataclasses import dataclass
|
7
6
|
from pathlib import Path
|
8
7
|
from typing import Any
|
9
8
|
|
9
|
+
import psutil
|
10
|
+
|
10
11
|
from rclone_api.config import Config
|
11
12
|
from rclone_api.util import clear_temp_config_file, get_verbose, make_temp_config_file
|
12
13
|
|
@@ -24,20 +25,25 @@ class ProcessArgs:
|
|
24
25
|
|
25
26
|
class Process:
|
26
27
|
def __init__(self, args: ProcessArgs) -> None:
|
27
|
-
assert
|
28
|
+
assert (
|
29
|
+
args.rclone_exe.exists()
|
30
|
+
), f"rclone executable not found: {args.rclone_exe}"
|
28
31
|
self.args = args
|
29
32
|
self.log = args.log
|
30
33
|
self.tempfile: Path | None = None
|
34
|
+
|
31
35
|
verbose = get_verbose(args.verbose)
|
36
|
+
# Create a temporary config file if needed.
|
32
37
|
if isinstance(args.rclone_conf, Config):
|
33
|
-
self.
|
34
|
-
self.
|
35
|
-
rclone_conf = self.
|
38
|
+
self.tempfile = make_temp_config_file()
|
39
|
+
self.tempfile.write_text(args.rclone_conf.text, encoding="utf-8")
|
40
|
+
rclone_conf = self.tempfile
|
36
41
|
else:
|
37
42
|
rclone_conf = args.rclone_conf
|
38
43
|
|
39
|
-
assert rclone_conf.exists()
|
44
|
+
assert rclone_conf.exists(), f"rclone config not found: {rclone_conf}"
|
40
45
|
|
46
|
+
# Build the command.
|
41
47
|
self.cmd = (
|
42
48
|
[str(args.rclone_exe.resolve())]
|
43
49
|
+ ["--config", str(rclone_conf.resolve())]
|
@@ -49,16 +55,14 @@ class Process:
|
|
49
55
|
if verbose:
|
50
56
|
cmd_str = subprocess.list2cmdline(self.cmd)
|
51
57
|
print(f"Running: {cmd_str}")
|
52
|
-
kwargs: dict = {}
|
53
|
-
kwargs["shell"] = False
|
58
|
+
kwargs: dict = {"shell": False}
|
54
59
|
if args.capture_stdout:
|
55
60
|
kwargs["stdout"] = subprocess.PIPE
|
56
61
|
kwargs["stderr"] = subprocess.STDOUT
|
57
62
|
|
58
63
|
self.process = subprocess.Popen(self.cmd, **kwargs) # type: ignore
|
59
64
|
|
60
|
-
# Register an atexit callback using a weak reference to avoid
|
61
|
-
# keeping the Process instance alive solely due to the callback.
|
65
|
+
# Register an atexit callback using a weak reference to avoid keeping the Process instance alive.
|
62
66
|
self_ref = weakref.ref(self)
|
63
67
|
|
64
68
|
def exit_cleanup():
|
@@ -77,39 +81,60 @@ class Process:
|
|
77
81
|
self.cleanup()
|
78
82
|
|
79
83
|
def cleanup(self) -> None:
|
80
|
-
|
84
|
+
if self.tempfile:
|
85
|
+
clear_temp_config_file(self.tempfile)
|
81
86
|
|
82
|
-
def
|
87
|
+
def _kill_process_tree(self) -> None:
|
83
88
|
"""
|
84
|
-
|
85
|
-
If the process does not exit within a short timeout, it is aggressively killed.
|
89
|
+
Use psutil to recursively terminate the main process and all its child processes.
|
86
90
|
"""
|
87
|
-
|
88
|
-
|
89
|
-
|
91
|
+
try:
|
92
|
+
parent = psutil.Process(self.process.pid)
|
93
|
+
except psutil.NoSuchProcess:
|
94
|
+
return
|
95
|
+
|
96
|
+
# Terminate child processes.
|
97
|
+
children = parent.children(recursive=True)
|
98
|
+
if children:
|
99
|
+
print(f"Terminating {len(children)} child processes...")
|
100
|
+
for child in children:
|
90
101
|
try:
|
91
|
-
|
92
|
-
self.process.terminate()
|
102
|
+
child.terminate()
|
93
103
|
except Exception as e:
|
94
|
-
print(f"Error
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
time.sleep(0.1)
|
100
|
-
# If still running, kill aggressively.
|
101
|
-
if self.process.poll() is None:
|
104
|
+
print(f"Error terminating child process {child.pid}: {e}")
|
105
|
+
psutil.wait_procs(children, timeout=2)
|
106
|
+
# Kill any that remain.
|
107
|
+
for child in children:
|
108
|
+
if child.is_running():
|
102
109
|
try:
|
103
|
-
|
110
|
+
child.kill()
|
104
111
|
except Exception as e:
|
105
|
-
print(f"Error
|
106
|
-
|
112
|
+
print(f"Error killing child process {child.pid}: {e}")
|
113
|
+
|
114
|
+
# Terminate the parent process.
|
115
|
+
if parent.is_running():
|
116
|
+
try:
|
117
|
+
parent.terminate()
|
118
|
+
except Exception as e:
|
119
|
+
print(f"Error terminating process {parent.pid}: {e}")
|
120
|
+
try:
|
121
|
+
parent.wait(timeout=3)
|
122
|
+
except psutil.TimeoutExpired:
|
107
123
|
try:
|
108
|
-
|
109
|
-
except Exception:
|
110
|
-
|
124
|
+
parent.kill()
|
125
|
+
except Exception as e:
|
126
|
+
print(f"Error killing process {parent.pid}: {e}")
|
127
|
+
|
128
|
+
def _atexit_terminate(self) -> None:
|
129
|
+
"""
|
130
|
+
This method is registered via atexit and uses psutil to clean up the process tree.
|
131
|
+
It runs in a daemon thread so that termination happens without blocking interpreter shutdown.
|
132
|
+
"""
|
133
|
+
if self.process.poll() is None: # Process is still running.
|
134
|
+
|
135
|
+
def terminate_sequence():
|
136
|
+
self._kill_process_tree()
|
111
137
|
|
112
|
-
# Run the termination sequence in a separate daemon thread.
|
113
138
|
t = threading.Thread(target=terminate_sequence, daemon=True)
|
114
139
|
t.start()
|
115
140
|
t.join(timeout=3)
|
@@ -122,12 +147,12 @@ class Process:
|
|
122
147
|
self.cleanup()
|
123
148
|
|
124
149
|
def kill(self) -> None:
|
125
|
-
|
126
|
-
|
150
|
+
"""Forcefully kill the process tree."""
|
151
|
+
self._kill_process_tree()
|
127
152
|
|
128
153
|
def terminate(self) -> None:
|
129
|
-
|
130
|
-
|
154
|
+
"""Gracefully terminate the process tree."""
|
155
|
+
self._kill_process_tree()
|
131
156
|
|
132
157
|
@property
|
133
158
|
def returncode(self) -> int | None:
|
@@ -147,8 +172,8 @@ class Process:
|
|
147
172
|
def wait(self) -> int:
|
148
173
|
return self.process.wait()
|
149
174
|
|
150
|
-
def send_signal(self,
|
151
|
-
|
175
|
+
def send_signal(self, sig: int) -> None:
|
176
|
+
self.process.send_signal(sig)
|
152
177
|
|
153
178
|
def __str__(self) -> str:
|
154
179
|
state = ""
|
rclone_api/s3/api.py
CHANGED
@@ -20,11 +20,11 @@ _MIN_THRESHOLD_FOR_CHUNKING = 5 * 1024 * 1024
|
|
20
20
|
|
21
21
|
|
22
22
|
class S3Client:
|
23
|
-
def __init__(self,
|
23
|
+
def __init__(self, s3_creds: S3Credentials, verbose: bool = False) -> None:
|
24
24
|
self.verbose = verbose
|
25
|
-
self.credentials: S3Credentials =
|
25
|
+
self.credentials: S3Credentials = s3_creds
|
26
26
|
self.client: BaseClient = create_s3_client(
|
27
|
-
|
27
|
+
s3_creds=s3_creds, s3_config=S3Config(verbose=verbose)
|
28
28
|
)
|
29
29
|
|
30
30
|
def list_bucket_contents(self, bucket_name: str) -> None:
|
rclone_api/s3/create.py
CHANGED
@@ -8,7 +8,7 @@ from botocore.config import Config
|
|
8
8
|
from rclone_api.s3.types import S3Credentials, S3Provider
|
9
9
|
|
10
10
|
_DEFAULT_BACKBLAZE_ENDPOINT = "https://s3.us-west-002.backblazeb2.com"
|
11
|
-
_MAX_CONNECTIONS =
|
11
|
+
_MAX_CONNECTIONS = 10
|
12
12
|
_TIMEOUT_READ = 120
|
13
13
|
_TIMEOUT_CONNECT = 60
|
14
14
|
|
@@ -30,14 +30,16 @@ class S3Config:
|
|
30
30
|
# Create a Boto3 session and S3 client, this is back blaze specific.
|
31
31
|
# Add a function if you want to use a different S3 provider.
|
32
32
|
# If AWS support is added in a fork then please merge it back here.
|
33
|
-
def _create_backblaze_s3_client(
|
33
|
+
def _create_backblaze_s3_client(
|
34
|
+
s3_creds: S3Credentials, s3_config: S3Config
|
35
|
+
) -> BaseClient:
|
34
36
|
"""Create and return an S3 client."""
|
35
|
-
region_name =
|
36
|
-
access_key =
|
37
|
-
secret_key =
|
38
|
-
endpoint_url =
|
37
|
+
region_name = s3_creds.region_name
|
38
|
+
access_key = s3_creds.access_key_id
|
39
|
+
secret_key = s3_creds.secret_access_key
|
40
|
+
endpoint_url = s3_creds.endpoint_url
|
39
41
|
endpoint_url = endpoint_url or _DEFAULT_BACKBLAZE_ENDPOINT
|
40
|
-
|
42
|
+
s3_config.resolve_defaults()
|
41
43
|
session = boto3.session.Session() # type: ignore
|
42
44
|
return session.client(
|
43
45
|
service_name="s3",
|
@@ -48,9 +50,9 @@ def _create_backblaze_s3_client(creds: S3Credentials, config: S3Config) -> BaseC
|
|
48
50
|
config=Config(
|
49
51
|
signature_version="s3v4",
|
50
52
|
region_name=region_name,
|
51
|
-
max_pool_connections=
|
52
|
-
read_timeout=
|
53
|
-
connect_timeout=
|
53
|
+
max_pool_connections=s3_config.max_pool_connections,
|
54
|
+
read_timeout=s3_config.timeout_read,
|
55
|
+
connect_timeout=s3_config.timeout_connection,
|
54
56
|
# Note that BackBlase has a boko3 bug where it doesn't support the new
|
55
57
|
# checksum header, the following line was an attempt of fix it on the newest
|
56
58
|
# version of boto3, but it didn't work.
|
@@ -59,18 +61,20 @@ def _create_backblaze_s3_client(creds: S3Credentials, config: S3Config) -> BaseC
|
|
59
61
|
)
|
60
62
|
|
61
63
|
|
62
|
-
def _create_unknown_s3_client(
|
64
|
+
def _create_unknown_s3_client(
|
65
|
+
s3_creds: S3Credentials, s3_config: S3Config
|
66
|
+
) -> BaseClient:
|
63
67
|
"""Create and return an S3 client."""
|
64
|
-
access_key =
|
65
|
-
secret_key =
|
66
|
-
endpoint_url =
|
68
|
+
access_key = s3_creds.access_key_id
|
69
|
+
secret_key = s3_creds.secret_access_key
|
70
|
+
endpoint_url = s3_creds.endpoint_url
|
67
71
|
if (endpoint_url is not None) and not (endpoint_url.startswith("http")):
|
68
|
-
if
|
72
|
+
if s3_config.verbose:
|
69
73
|
warnings.warn(
|
70
74
|
f"Endpoint URL is schema naive: {endpoint_url}, assuming HTTPS"
|
71
75
|
)
|
72
76
|
endpoint_url = f"https://{endpoint_url}"
|
73
|
-
|
77
|
+
s3_config.resolve_defaults()
|
74
78
|
session = boto3.session.Session() # type: ignore
|
75
79
|
return session.client(
|
76
80
|
service_name="s3",
|
@@ -79,25 +83,25 @@ def _create_unknown_s3_client(creds: S3Credentials, config: S3Config) -> BaseCli
|
|
79
83
|
endpoint_url=endpoint_url,
|
80
84
|
config=Config(
|
81
85
|
signature_version="s3v4",
|
82
|
-
region_name=
|
83
|
-
max_pool_connections=
|
84
|
-
read_timeout=
|
85
|
-
connect_timeout=
|
86
|
+
region_name=s3_creds.region_name,
|
87
|
+
max_pool_connections=s3_config.max_pool_connections,
|
88
|
+
read_timeout=s3_config.timeout_read,
|
89
|
+
connect_timeout=s3_config.timeout_connection,
|
86
90
|
),
|
87
91
|
)
|
88
92
|
|
89
93
|
|
90
94
|
def create_s3_client(
|
91
|
-
|
95
|
+
s3_creds: S3Credentials, s3_config: S3Config | None = None
|
92
96
|
) -> BaseClient:
|
93
97
|
"""Create and return an S3 client."""
|
94
|
-
|
95
|
-
provider =
|
98
|
+
s3_config = s3_config or S3Config()
|
99
|
+
provider = s3_creds.provider
|
96
100
|
if provider == S3Provider.BACKBLAZE:
|
97
|
-
if
|
101
|
+
if s3_config.verbose:
|
98
102
|
print("Creating BackBlaze S3 client")
|
99
|
-
return _create_backblaze_s3_client(
|
103
|
+
return _create_backblaze_s3_client(s3_creds=s3_creds, s3_config=s3_config)
|
100
104
|
else:
|
101
|
-
if
|
105
|
+
if s3_config.verbose:
|
102
106
|
print("Creating generic/unknown S3 client")
|
103
|
-
return _create_unknown_s3_client(
|
107
|
+
return _create_unknown_s3_client(s3_creds=s3_creds, s3_config=s3_config)
|
@@ -0,0 +1,115 @@
|
|
1
|
+
"""
|
2
|
+
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3/client/upload_part_copy.html
|
3
|
+
* client.upload_part_copy
|
4
|
+
|
5
|
+
This module provides functionality for S3 multipart uploads, including copying parts
|
6
|
+
from existing S3 objects using upload_part_copy.
|
7
|
+
"""
|
8
|
+
|
9
|
+
import json
|
10
|
+
from dataclasses import dataclass
|
11
|
+
from typing import Any, Callable
|
12
|
+
|
13
|
+
from rclone_api.s3.multipart.finished_piece import FinishedPiece
|
14
|
+
|
15
|
+
|
16
|
+
@dataclass
|
17
|
+
class Part:
|
18
|
+
part_number: int
|
19
|
+
s3_key: str
|
20
|
+
|
21
|
+
def to_json(self) -> dict:
|
22
|
+
return {"part_number": self.part_number, "s3_key": self.s3_key}
|
23
|
+
|
24
|
+
@staticmethod
|
25
|
+
def from_json(json_dict: dict) -> "Part | Exception":
|
26
|
+
part_number = json_dict.get("part_number")
|
27
|
+
s3_key = json_dict.get("s3_key")
|
28
|
+
if part_number is None or s3_key is None:
|
29
|
+
return Exception(f"Invalid JSON: {json_dict}")
|
30
|
+
return Part(part_number=part_number, s3_key=s3_key)
|
31
|
+
|
32
|
+
@staticmethod
|
33
|
+
def from_json_array(json_array: list[dict]) -> list["Part"] | Exception:
|
34
|
+
try:
|
35
|
+
out: list[Part] = []
|
36
|
+
for j in json_array:
|
37
|
+
ok_or_err = Part.from_json(j)
|
38
|
+
if isinstance(ok_or_err, Exception):
|
39
|
+
return ok_or_err
|
40
|
+
else:
|
41
|
+
out.append(ok_or_err)
|
42
|
+
return out
|
43
|
+
except Exception as e:
|
44
|
+
return e
|
45
|
+
|
46
|
+
|
47
|
+
class MergeState:
|
48
|
+
|
49
|
+
def __init__(self, finished: list[FinishedPiece], all_parts: list[Part]) -> None:
|
50
|
+
self.finished: list[FinishedPiece] = list(finished)
|
51
|
+
self.all_parts: list[Part] = list(all_parts)
|
52
|
+
self.callbacks: list[Callable[[FinishedPiece], None]] = []
|
53
|
+
|
54
|
+
def add_callback(self, callback: Callable[[FinishedPiece], None]) -> None:
|
55
|
+
self.callbacks.append(callback)
|
56
|
+
|
57
|
+
def on_finished(self, finished_piece: FinishedPiece) -> None:
|
58
|
+
for callback in list(self.callbacks):
|
59
|
+
callback(finished_piece)
|
60
|
+
|
61
|
+
@staticmethod
|
62
|
+
def from_json_array(json_array: dict) -> "MergeState | Exception":
|
63
|
+
try:
|
64
|
+
finished: list[FinishedPiece] = FinishedPiece.from_json_array(
|
65
|
+
json_array["finished"]
|
66
|
+
)
|
67
|
+
all_parts: list[Part | Exception] = [
|
68
|
+
Part.from_json(j) for j in json_array["all"]
|
69
|
+
]
|
70
|
+
all_parts_no_err: list[Part] = [
|
71
|
+
p for p in all_parts if not isinstance(p, Exception)
|
72
|
+
]
|
73
|
+
errs: list[Exception] = [p for p in all_parts if isinstance(p, Exception)]
|
74
|
+
if len(errs):
|
75
|
+
return Exception(f"Errors in parts: {errs}")
|
76
|
+
return MergeState(finished=finished, all_parts=all_parts_no_err)
|
77
|
+
except Exception as e:
|
78
|
+
return e
|
79
|
+
|
80
|
+
def to_json(self) -> dict:
|
81
|
+
finished = self.finished.copy()
|
82
|
+
all_parts = self.all_parts.copy()
|
83
|
+
return {
|
84
|
+
"finished": FinishedPiece.to_json_array(finished),
|
85
|
+
"all": [part.to_json() for part in all_parts],
|
86
|
+
}
|
87
|
+
|
88
|
+
def to_json_str(self) -> str:
|
89
|
+
return json.dumps(self.to_json(), indent=1)
|
90
|
+
|
91
|
+
def __str__(self):
|
92
|
+
return self.to_json_str()
|
93
|
+
|
94
|
+
def __repr__(self):
|
95
|
+
return self.to_json_str()
|
96
|
+
|
97
|
+
def write(self, rclone_impl: Any, dst: str) -> None:
|
98
|
+
from rclone_api.rclone_impl import RcloneImpl
|
99
|
+
|
100
|
+
assert isinstance(rclone_impl, RcloneImpl)
|
101
|
+
json_str = self.to_json_str()
|
102
|
+
rclone_impl.write_text(dst, json_str)
|
103
|
+
|
104
|
+
def read(self, rclone_impl: Any, src: str) -> None:
|
105
|
+
from rclone_api.rclone_impl import RcloneImpl
|
106
|
+
|
107
|
+
assert isinstance(rclone_impl, RcloneImpl)
|
108
|
+
json_str = rclone_impl.read_text(src)
|
109
|
+
if isinstance(json_str, Exception):
|
110
|
+
raise json_str
|
111
|
+
json_dict = json.loads(json_str)
|
112
|
+
ok_or_err = FinishedPiece.from_json_array(json_dict["finished"])
|
113
|
+
if isinstance(ok_or_err, Exception):
|
114
|
+
raise ok_or_err
|
115
|
+
self.finished = ok_or_err
|
@@ -17,7 +17,9 @@ class FinishedPiece:
|
|
17
17
|
return json.dumps(self.to_json(), indent=0)
|
18
18
|
|
19
19
|
@staticmethod
|
20
|
-
def to_json_array(
|
20
|
+
def to_json_array(
|
21
|
+
parts: list["FinishedPiece | EndOfStream"] | list["FinishedPiece"],
|
22
|
+
) -> list[dict]:
|
21
23
|
non_none: list[FinishedPiece] = []
|
22
24
|
for p in parts:
|
23
25
|
if not isinstance(p, EndOfStream):
|
@@ -39,3 +41,15 @@ class FinishedPiece:
|
|
39
41
|
if json is None:
|
40
42
|
return EndOfStream()
|
41
43
|
return FinishedPiece(**json)
|
44
|
+
|
45
|
+
@staticmethod
|
46
|
+
def from_json_array(json: dict) -> list["FinishedPiece"]:
|
47
|
+
tmp = [FinishedPiece.from_json(j) for j in json]
|
48
|
+
out: list[FinishedPiece] = []
|
49
|
+
for t in tmp:
|
50
|
+
if isinstance(t, FinishedPiece):
|
51
|
+
out.append(t)
|
52
|
+
return out
|
53
|
+
|
54
|
+
def __hash__(self) -> int:
|
55
|
+
return hash(self.part_number)
|
@@ -14,6 +14,7 @@ from typing import Optional
|
|
14
14
|
|
15
15
|
from botocore.client import BaseClient
|
16
16
|
|
17
|
+
from rclone_api.s3.merge_state import MergeState, Part
|
17
18
|
from rclone_api.s3.multipart.finished_piece import FinishedPiece
|
18
19
|
from rclone_api.util import locked_print
|
19
20
|
|
@@ -27,8 +28,6 @@ class MultipartUploadInfo:
|
|
27
28
|
object_name: str
|
28
29
|
upload_id: str
|
29
30
|
chunk_size: int
|
30
|
-
retries: int
|
31
|
-
file_size: Optional[int] = None
|
32
31
|
src_file_path: Optional[Path] = None
|
33
32
|
|
34
33
|
|
@@ -37,7 +36,6 @@ def upload_part_copy_task(
|
|
37
36
|
source_bucket: str,
|
38
37
|
source_key: str,
|
39
38
|
part_number: int,
|
40
|
-
retries: int = 3,
|
41
39
|
) -> FinishedPiece | Exception:
|
42
40
|
"""
|
43
41
|
Upload a part by copying from an existing S3 object.
|
@@ -56,8 +54,8 @@ def upload_part_copy_task(
|
|
56
54
|
copy_source = {"Bucket": source_bucket, "Key": source_key}
|
57
55
|
|
58
56
|
# from botocore.exceptions import NoSuchKey
|
59
|
-
|
60
|
-
retries =
|
57
|
+
default_retries = 9
|
58
|
+
retries = default_retries + 1 # Add one for the initial attempt
|
61
59
|
for retry in range(retries):
|
62
60
|
params: dict = {}
|
63
61
|
try:
|
@@ -82,18 +80,24 @@ def upload_part_copy_task(
|
|
82
80
|
|
83
81
|
# Extract ETag from the response
|
84
82
|
etag = part["CopyPartResult"]["ETag"]
|
85
|
-
|
83
|
+
out = FinishedPiece(etag=etag, part_number=part_number)
|
84
|
+
locked_print(f"Finished part {part_number} for {info.object_name}")
|
85
|
+
return out
|
86
86
|
|
87
87
|
except Exception as e:
|
88
88
|
msg = f"Error copying {copy_source} -> {info.object_name}: {e}, params={params}"
|
89
|
-
if "
|
89
|
+
if "An error occurred (InternalError)" in str(e):
|
90
|
+
locked_print(msg)
|
91
|
+
elif "NoSuchKey" in str(e):
|
90
92
|
locked_print(msg)
|
91
|
-
return e
|
92
93
|
if retry == retries - 1:
|
93
94
|
locked_print(msg)
|
94
95
|
return e
|
95
96
|
else:
|
96
97
|
locked_print(f"{msg}, retrying")
|
98
|
+
# sleep
|
99
|
+
sleep_time = 2**retry
|
100
|
+
locked_print(f"Sleeping for {sleep_time} seconds")
|
97
101
|
continue
|
98
102
|
|
99
103
|
return Exception("Should not reach here")
|
@@ -132,17 +136,64 @@ def complete_multipart_upload_from_parts(
|
|
132
136
|
return response.get("Location", f"s3://{info.bucket_name}/{info.object_name}")
|
133
137
|
|
134
138
|
|
135
|
-
def
|
136
|
-
|
139
|
+
def do_body_work(
|
140
|
+
info: MultipartUploadInfo,
|
137
141
|
source_bucket: str,
|
138
|
-
|
139
|
-
|
142
|
+
max_workers: int,
|
143
|
+
merge_state: MergeState,
|
144
|
+
) -> str | Exception:
|
145
|
+
|
146
|
+
futures: list[Future[FinishedPiece | Exception]] = []
|
147
|
+
parts = list(merge_state.all_parts)
|
148
|
+
|
149
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
150
|
+
semaphore = Semaphore(max_workers)
|
151
|
+
for part in parts:
|
152
|
+
part_number, s3_key = part.part_number, part.s3_key
|
153
|
+
|
154
|
+
def task(
|
155
|
+
info=info,
|
156
|
+
source_bucket=source_bucket,
|
157
|
+
s3_key=s3_key,
|
158
|
+
part_number=part_number,
|
159
|
+
):
|
160
|
+
out = upload_part_copy_task(
|
161
|
+
info=info,
|
162
|
+
source_bucket=source_bucket,
|
163
|
+
source_key=s3_key,
|
164
|
+
part_number=part_number,
|
165
|
+
)
|
166
|
+
if isinstance(out, Exception):
|
167
|
+
return out
|
168
|
+
merge_state.on_finished(out)
|
169
|
+
return out
|
170
|
+
|
171
|
+
fut = executor.submit(task)
|
172
|
+
fut.add_done_callback(lambda x: semaphore.release())
|
173
|
+
futures.append(fut)
|
174
|
+
semaphore.acquire()
|
175
|
+
|
176
|
+
# Upload parts by copying from source objects
|
177
|
+
finished_parts: list[FinishedPiece] = []
|
178
|
+
|
179
|
+
for fut in futures:
|
180
|
+
finished_part = fut.result()
|
181
|
+
if isinstance(finished_part, Exception):
|
182
|
+
executor.shutdown(wait=True, cancel_futures=True)
|
183
|
+
return finished_part
|
184
|
+
finished_parts.append(finished_part)
|
185
|
+
|
186
|
+
# Complete the multipart upload
|
187
|
+
return complete_multipart_upload_from_parts(info, finished_parts)
|
188
|
+
|
189
|
+
|
190
|
+
def begin_upload(
|
191
|
+
s3_client: BaseClient,
|
192
|
+
parts: list[Part],
|
140
193
|
destination_bucket: str,
|
141
194
|
destination_key: str,
|
142
|
-
chunk_size: int,
|
143
|
-
|
144
|
-
retries: int = 3,
|
145
|
-
) -> str:
|
195
|
+
chunk_size: int,
|
196
|
+
) -> MultipartUploadInfo:
|
146
197
|
"""
|
147
198
|
Finish a multipart upload by copying parts from existing S3 objects.
|
148
199
|
|
@@ -164,7 +215,6 @@ def finish_multipart_upload_from_keys(
|
|
164
215
|
locked_print(
|
165
216
|
f"Creating multipart upload for {destination_bucket}/{destination_key} from {len(parts)} source objects"
|
166
217
|
)
|
167
|
-
|
168
218
|
create_params: dict[str, str] = {
|
169
219
|
"Bucket": destination_bucket,
|
170
220
|
"Key": destination_key,
|
@@ -175,95 +225,103 @@ def finish_multipart_upload_from_keys(
|
|
175
225
|
upload_id = mpu["UploadId"]
|
176
226
|
|
177
227
|
# Create upload info
|
178
|
-
|
228
|
+
info = MultipartUploadInfo(
|
179
229
|
s3_client=s3_client,
|
180
230
|
bucket_name=destination_bucket,
|
181
231
|
object_name=destination_key,
|
182
232
|
upload_id=upload_id,
|
183
|
-
retries=retries,
|
184
233
|
chunk_size=chunk_size,
|
185
|
-
file_size=final_size,
|
186
234
|
)
|
235
|
+
return info
|
187
236
|
|
188
|
-
futures: list[Future[FinishedPiece | Exception]] = []
|
189
237
|
|
190
|
-
|
191
|
-
|
238
|
+
def finish_multipart_upload_from_keys(
|
239
|
+
s3_client: BaseClient,
|
240
|
+
source_bucket: str,
|
241
|
+
parts: list[Part],
|
242
|
+
destination_bucket: str,
|
243
|
+
destination_key: str,
|
244
|
+
chunk_size: int, # 5MB default
|
245
|
+
max_workers: int = 100,
|
246
|
+
) -> str | Exception:
|
247
|
+
"""
|
248
|
+
Finish a multipart upload by copying parts from existing S3 objects.
|
192
249
|
|
193
|
-
|
194
|
-
|
250
|
+
Args:
|
251
|
+
s3_client: Boto3 S3 client
|
252
|
+
source_bucket: Source bucket name
|
253
|
+
source_keys: List of source object keys to copy from
|
254
|
+
destination_bucket: Destination bucket name
|
255
|
+
destination_key: Destination object key
|
256
|
+
chunk_size: Size of each part in bytes
|
257
|
+
retries: Number of retry attempts
|
258
|
+
byte_ranges: Optional list of byte ranges corresponding to source_keys
|
195
259
|
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
source_key=source_key,
|
200
|
-
part_number=part_number,
|
201
|
-
retries=retries,
|
202
|
-
):
|
203
|
-
return upload_part_copy_task(
|
204
|
-
info=info,
|
205
|
-
source_bucket=source_bucket,
|
206
|
-
source_key=source_key,
|
207
|
-
part_number=part_number,
|
208
|
-
retries=retries,
|
209
|
-
)
|
260
|
+
Returns:
|
261
|
+
The URL of the completed object
|
262
|
+
"""
|
210
263
|
|
211
|
-
|
212
|
-
fut.add_done_callback(lambda x: semaphore.release())
|
213
|
-
futures.append(fut)
|
214
|
-
semaphore.acquire()
|
264
|
+
merge_state = MergeState(finished=[], all_parts=parts)
|
215
265
|
|
216
|
-
|
217
|
-
|
266
|
+
# Create upload info
|
267
|
+
info = begin_upload(
|
268
|
+
s3_client=s3_client,
|
269
|
+
parts=merge_state.all_parts,
|
270
|
+
destination_bucket=destination_bucket,
|
271
|
+
destination_key=destination_key,
|
272
|
+
chunk_size=chunk_size,
|
273
|
+
)
|
218
274
|
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
275
|
+
out = do_body_work(
|
276
|
+
info=info,
|
277
|
+
source_bucket=source_bucket,
|
278
|
+
max_workers=max_workers,
|
279
|
+
merge_state=merge_state,
|
280
|
+
)
|
281
|
+
|
282
|
+
return out
|
225
283
|
|
226
|
-
|
227
|
-
|
284
|
+
|
285
|
+
_DEFAULT_MAX_WORKERS = 10
|
228
286
|
|
229
287
|
|
230
288
|
class S3MultiPartUploader:
|
231
|
-
def __init__(self, s3_client: BaseClient, verbose: bool) -> None:
|
232
|
-
self.s3_client = s3_client
|
289
|
+
def __init__(self, s3_client: BaseClient, verbose: bool = False) -> None:
|
233
290
|
self.verbose = verbose
|
291
|
+
self.client: BaseClient = s3_client
|
234
292
|
|
235
|
-
def
|
293
|
+
def begin_new_upload(
|
236
294
|
self,
|
237
|
-
|
238
|
-
parts: list[tuple[int, str]],
|
295
|
+
parts: list[Part],
|
239
296
|
destination_bucket: str,
|
240
297
|
destination_key: str,
|
241
298
|
chunk_size: int,
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
"""
|
246
|
-
Finish a multipart upload by copying parts from existing S3 objects.
|
247
|
-
|
248
|
-
Args:
|
249
|
-
source_bucket: Source bucket name
|
250
|
-
source_keys: List of source object keys to copy from
|
251
|
-
destination_bucket: Destination bucket name
|
252
|
-
destination_key: Destination object key
|
253
|
-
chunk_size: Size of each part in bytes
|
254
|
-
retries: Number of retry attempts
|
255
|
-
byte_ranges: Optional list of byte ranges corresponding to source_keys
|
256
|
-
|
257
|
-
Returns:
|
258
|
-
The URL of the completed object
|
259
|
-
"""
|
260
|
-
return finish_multipart_upload_from_keys(
|
261
|
-
s3_client=self.s3_client,
|
262
|
-
source_bucket=source_bucket,
|
299
|
+
) -> MultipartUploadInfo:
|
300
|
+
return begin_upload(
|
301
|
+
s3_client=self.client,
|
263
302
|
parts=parts,
|
264
303
|
destination_bucket=destination_bucket,
|
265
304
|
destination_key=destination_key,
|
266
305
|
chunk_size=chunk_size,
|
267
|
-
|
268
|
-
|
306
|
+
)
|
307
|
+
|
308
|
+
def start_upload_resume(
|
309
|
+
self,
|
310
|
+
info: MultipartUploadInfo,
|
311
|
+
state: MergeState,
|
312
|
+
max_workers: int = _DEFAULT_MAX_WORKERS,
|
313
|
+
) -> MultipartUploadInfo | Exception:
|
314
|
+
return Exception("Not implemented")
|
315
|
+
|
316
|
+
def start_upload(
|
317
|
+
self,
|
318
|
+
info: MultipartUploadInfo,
|
319
|
+
parts: list[Part],
|
320
|
+
max_workers: int = _DEFAULT_MAX_WORKERS,
|
321
|
+
) -> str | Exception:
|
322
|
+
return do_body_work(
|
323
|
+
info=info,
|
324
|
+
source_bucket=info.bucket_name,
|
325
|
+
max_workers=max_workers,
|
326
|
+
merge_state=MergeState(finished=[], all_parts=parts),
|
269
327
|
)
|
rclone_api/util.py
CHANGED
@@ -5,10 +5,13 @@ import shutil
|
|
5
5
|
import signal
|
6
6
|
import subprocess
|
7
7
|
import warnings
|
8
|
+
import weakref
|
8
9
|
from pathlib import Path
|
9
10
|
from threading import Lock
|
10
11
|
from typing import Any
|
11
12
|
|
13
|
+
import psutil
|
14
|
+
|
12
15
|
from rclone_api.config import Config
|
13
16
|
from rclone_api.dir import Dir
|
14
17
|
from rclone_api.remote import Remote
|
@@ -167,51 +170,109 @@ def rclone_execute(
|
|
167
170
|
tmpfile: Path | None = None
|
168
171
|
verbose = get_verbose(verbose)
|
169
172
|
|
170
|
-
# Handle the Path case for capture
|
171
|
-
output_file = None
|
173
|
+
# Handle the Path case for capture.
|
174
|
+
output_file: Path | None = None
|
172
175
|
if isinstance(capture, Path):
|
173
176
|
output_file = capture
|
174
|
-
capture = False #
|
177
|
+
capture = False # When redirecting to file, don't capture to memory.
|
175
178
|
else:
|
176
179
|
capture = capture if isinstance(capture, bool) else True
|
177
180
|
|
178
|
-
assert verbose is not None
|
179
|
-
|
180
181
|
try:
|
182
|
+
# Create a temporary config file if needed.
|
181
183
|
if isinstance(rclone_conf, Config):
|
182
184
|
tmpfile = make_temp_config_file()
|
183
185
|
tmpfile.write_text(rclone_conf.text, encoding="utf-8")
|
184
186
|
rclone_conf = tmpfile
|
185
|
-
|
187
|
+
|
188
|
+
# Build the command line.
|
189
|
+
full_cmd = (
|
186
190
|
[str(rclone_exe.resolve())] + ["--config", str(rclone_conf.resolve())] + cmd
|
187
191
|
)
|
188
192
|
if verbose:
|
189
|
-
cmd_str = subprocess.list2cmdline(
|
193
|
+
cmd_str = subprocess.list2cmdline(full_cmd)
|
190
194
|
print(f"\nRunning: {cmd_str}")
|
191
195
|
|
192
|
-
#
|
196
|
+
# Prepare subprocess parameters.
|
197
|
+
proc_kwargs: dict[str, Any] = {
|
198
|
+
"encoding": "utf-8",
|
199
|
+
"shell": False,
|
200
|
+
"stderr": subprocess.PIPE,
|
201
|
+
}
|
202
|
+
file_handle = None
|
193
203
|
if output_file:
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
stdout=f,
|
198
|
-
stderr=subprocess.PIPE,
|
199
|
-
encoding="utf-8",
|
200
|
-
check=False,
|
201
|
-
shell=False,
|
202
|
-
)
|
204
|
+
# Open the file for writing.
|
205
|
+
file_handle = open(output_file, "w", encoding="utf-8")
|
206
|
+
proc_kwargs["stdout"] = file_handle
|
203
207
|
else:
|
204
|
-
|
205
|
-
|
206
|
-
|
208
|
+
proc_kwargs["stdout"] = subprocess.PIPE if capture else None
|
209
|
+
|
210
|
+
# Start the process.
|
211
|
+
process = subprocess.Popen(full_cmd, **proc_kwargs)
|
212
|
+
|
213
|
+
# Register an atexit callback that uses psutil to kill the process tree.
|
214
|
+
proc_ref = weakref.ref(process)
|
215
|
+
|
216
|
+
def cleanup():
|
217
|
+
proc = proc_ref()
|
218
|
+
if proc is None:
|
219
|
+
return
|
220
|
+
try:
|
221
|
+
parent = psutil.Process(proc.pid)
|
222
|
+
except psutil.NoSuchProcess:
|
223
|
+
return
|
224
|
+
# Terminate all child processes first.
|
225
|
+
children = parent.children(recursive=True)
|
226
|
+
if children:
|
227
|
+
print(f"Terminating {len(children)} child process(es)...")
|
228
|
+
for child in children:
|
229
|
+
try:
|
230
|
+
child.terminate()
|
231
|
+
except Exception as e:
|
232
|
+
print(f"Error terminating child {child.pid}: {e}")
|
233
|
+
psutil.wait_procs(children, timeout=2)
|
234
|
+
for child in children:
|
235
|
+
if child.is_running():
|
236
|
+
try:
|
237
|
+
child.kill()
|
238
|
+
except Exception as e:
|
239
|
+
print(f"Error killing child {child.pid}: {e}")
|
240
|
+
# Now terminate the parent process.
|
241
|
+
if parent.is_running():
|
242
|
+
try:
|
243
|
+
parent.terminate()
|
244
|
+
parent.wait(timeout=3)
|
245
|
+
except (psutil.TimeoutExpired, Exception):
|
246
|
+
try:
|
247
|
+
parent.kill()
|
248
|
+
except Exception as e:
|
249
|
+
print(f"Error killing process {parent.pid}: {e}")
|
250
|
+
|
251
|
+
atexit.register(cleanup)
|
252
|
+
|
253
|
+
# Wait for the process to complete.
|
254
|
+
out, err = process.communicate()
|
255
|
+
# Close the file handle if used.
|
256
|
+
if file_handle:
|
257
|
+
file_handle.close()
|
258
|
+
|
259
|
+
cp: subprocess.CompletedProcess = subprocess.CompletedProcess(
|
260
|
+
args=full_cmd,
|
261
|
+
returncode=process.returncode,
|
262
|
+
stdout=out,
|
263
|
+
stderr=err,
|
264
|
+
)
|
265
|
+
|
266
|
+
# Warn or raise if return code is non-zero.
|
207
267
|
if cp.returncode != 0:
|
208
|
-
cmd_str = subprocess.list2cmdline(
|
268
|
+
cmd_str = subprocess.list2cmdline(full_cmd)
|
209
269
|
warnings.warn(
|
210
|
-
f"Error running: {cmd_str}, returncode: {cp.returncode}\n
|
270
|
+
f"Error running: {cmd_str}, returncode: {cp.returncode}\n"
|
271
|
+
f"{cp.stdout}\n{cp.stderr}"
|
211
272
|
)
|
212
273
|
if check:
|
213
274
|
raise subprocess.CalledProcessError(
|
214
|
-
cp.returncode,
|
275
|
+
cp.returncode, full_cmd, cp.stdout, cp.stderr
|
215
276
|
)
|
216
277
|
return cp
|
217
278
|
finally:
|
@@ -17,17 +17,17 @@ rclone_api/group_files.py,sha256=H92xPW9lQnbNw5KbtZCl00bD6iRh9yRbCuxku4j_3dg,803
|
|
17
17
|
rclone_api/http_server.py,sha256=3fPBV6l50erTe32DyeJBNmsDrn5KuujsbmEAbx13T-c,8720
|
18
18
|
rclone_api/log.py,sha256=VZHM7pNSXip2ZLBKMP7M1u-rp_F7zoafFDuR8CPUoKI,1271
|
19
19
|
rclone_api/mount.py,sha256=TE_VIBMW7J1UkF_6HRCt8oi_jGdMov4S51bm2OgxFAM,10045
|
20
|
-
rclone_api/process.py,sha256=
|
20
|
+
rclone_api/process.py,sha256=tGooS5NLdPuqHh7hCH8SfK44A6LGftPQCPQUNgSo0a0,5714
|
21
21
|
rclone_api/rclone_impl.py,sha256=xTTriz6-zn_aSrkY8B7wzT-zRXax7Og7ns6xu6-7O6g,48769
|
22
22
|
rclone_api/remote.py,sha256=mTgMTQTwxUmbLjTpr-AGTId2ycXKI9mLX5L7PPpDIoc,520
|
23
23
|
rclone_api/rpath.py,sha256=Y1JjQWcie39EgQrq-UtbfDz5yDLCwwfu27W7AQXllSE,2860
|
24
24
|
rclone_api/scan_missing_folders.py,sha256=-8NCwpCaHeHrX-IepCoAEsX1rl8S-GOCxcIhTr_w3gA,4747
|
25
25
|
rclone_api/types.py,sha256=HkpEZgZWhr5Gb04iHq5NxMRXxieWoN-PKmOfJFrg5Qg,12155
|
26
|
-
rclone_api/util.py,sha256=
|
26
|
+
rclone_api/util.py,sha256=yY72YKpmpT_ZM7AleVtPpl0YZZYQPTwTdqKn9qPwm8Y,9290
|
27
27
|
rclone_api/assets/example.txt,sha256=lTBovRjiz0_TgtAtbA1C5hNi2ffbqnNPqkKg6UiKCT8,54
|
28
28
|
rclone_api/cmd/analyze.py,sha256=RHbvk1G5ZUc3qLqlm1AZEyQzd_W_ZjcbCNDvW4YpTKQ,1252
|
29
29
|
rclone_api/cmd/copy_large_s3.py,sha256=B17GliDQyAauNglJCpsey0d3eArT2DAcT9g684TMQk8,3514
|
30
|
-
rclone_api/cmd/copy_large_s3_finish.py,sha256=
|
30
|
+
rclone_api/cmd/copy_large_s3_finish.py,sha256=FeeFZsuc1Gh7Xpso3alvirTX1_eXvHoAIsddMrVdCyw,6521
|
31
31
|
rclone_api/cmd/list_files.py,sha256=x8FHODEilwKqwdiU1jdkeJbLwOqUkUQuDWPo2u_zpf0,741
|
32
32
|
rclone_api/cmd/save_to_db.py,sha256=ylvnhg_yzexM-m6Zr7XDiswvoDVSl56ELuFAdb9gqBY,1957
|
33
33
|
rclone_api/db/__init__.py,sha256=OSRUdnSWUlDTOHmjdjVmxYTUNpTbtaJ5Ll9sl-PfZg0,40
|
@@ -37,20 +37,21 @@ rclone_api/detail/copy_file_parts.py,sha256=dpqZ0d7l195dZg6Vob2Ty43Uah1v0ozQu5kM
|
|
37
37
|
rclone_api/detail/walk.py,sha256=-54NVE8EJcCstwDoaC_UtHm73R2HrZwVwQmsnv55xNU,3369
|
38
38
|
rclone_api/experimental/flags.py,sha256=qCVD--fSTmzlk9hloRLr0q9elzAOFzPsvVpKM3aB1Mk,2739
|
39
39
|
rclone_api/experimental/flags_base.py,sha256=ajU_czkTcAxXYU-SlmiCfHY7aCQGHvpCLqJ-Z8uZLk0,2102
|
40
|
-
rclone_api/s3/api.py,sha256=
|
40
|
+
rclone_api/s3/api.py,sha256=owoQ1H-R0hXcUozxC6sl53D7NmMOewHk2pUxK-ye8ms,4061
|
41
41
|
rclone_api/s3/basic_ops.py,sha256=hK3366xhVEzEcjz9Gk_8lFx6MRceAk72cax6mUrr6ko,2104
|
42
42
|
rclone_api/s3/chunk_task.py,sha256=waEYe-iYQ1_BR3NCS4BrzVrK9UANvH1EcbXx2I6Z_NM,6839
|
43
|
-
rclone_api/s3/create.py,sha256=
|
44
|
-
rclone_api/s3/
|
43
|
+
rclone_api/s3/create.py,sha256=_Q-faQ4Zl8XKTB28gireRxVXWP-YNxoAK4bligxDtiI,3998
|
44
|
+
rclone_api/s3/merge_state.py,sha256=FJOQvOVLvAe9E_M_UgPffrTSN2YzRYfBMnzkOtBTqwg,3939
|
45
|
+
rclone_api/s3/s3_multipart_uploader_by_copy.py,sha256=Rww9S81pbCUUu_f72xkxU4HQ_xOf69_C5MyMmmNchcw,10339
|
45
46
|
rclone_api/s3/types.py,sha256=cYI5MbXRNdT-ps5kGIRQaYrseHyx_ozT4AcwBABTKwk,1616
|
46
47
|
rclone_api/s3/upload_file_multipart.py,sha256=V7syKjFyVIe4U9Ahl5XgqVTzt9akiew3MFjGmufLo2w,12503
|
47
48
|
rclone_api/s3/multipart/file_info.py,sha256=8v_07_eADo0K-Nsv7F0Ac1wcv3lkIsrR3MaRCmkYLTQ,105
|
48
|
-
rclone_api/s3/multipart/finished_piece.py,sha256=
|
49
|
+
rclone_api/s3/multipart/finished_piece.py,sha256=6ev7MFOV3dWqylJFEttOIeoaEA74RMqNWh258L_ENnY,1732
|
49
50
|
rclone_api/s3/multipart/upload_info.py,sha256=d6_OfzFR_vtDzCEegFfzCfWi2kUBUV4aXZzqAEVp1c4,1874
|
50
51
|
rclone_api/s3/multipart/upload_state.py,sha256=f-Aq2NqtAaMUMhYitlICSNIxCKurWAl2gDEUVizLIqw,6019
|
51
|
-
rclone_api-1.4.
|
52
|
-
rclone_api-1.4.
|
53
|
-
rclone_api-1.4.
|
54
|
-
rclone_api-1.4.
|
55
|
-
rclone_api-1.4.
|
56
|
-
rclone_api-1.4.
|
52
|
+
rclone_api-1.4.18.dist-info/LICENSE,sha256=b6pOoifSXiUaz_lDS84vWlG3fr4yUKwB8fzkrH9R8bQ,1064
|
53
|
+
rclone_api-1.4.18.dist-info/METADATA,sha256=Ym9QwhV3G0YNmRao9cluDShuY7AEwclfGTDEz-B2xio,4628
|
54
|
+
rclone_api-1.4.18.dist-info/WHEEL,sha256=rF4EZyR2XVS6irmOHQIJx2SUqXLZKRMUrjsg8UwN-XQ,109
|
55
|
+
rclone_api-1.4.18.dist-info/entry_points.txt,sha256=fJteOlYVwgX3UbNuL9jJ0zUTuX2O79JFAeNgK7Sw7EQ,255
|
56
|
+
rclone_api-1.4.18.dist-info/top_level.txt,sha256=EvZ7uuruUpe9RiUyEp25d1Keq7PWYNT0O_-mr8FCG5g,11
|
57
|
+
rclone_api-1.4.18.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|