ptctools 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ptctools/__init__.py +3 -0
- ptctools/_portainer.py +279 -0
- ptctools/_s3.py +150 -0
- ptctools/cli.py +28 -0
- ptctools/config.py +293 -0
- ptctools/db.py +544 -0
- ptctools/stack.py +367 -0
- ptctools/utils.py +416 -0
- ptctools/volume.py +359 -0
- ptctools-0.1.0.dist-info/METADATA +99 -0
- ptctools-0.1.0.dist-info/RECORD +14 -0
- ptctools-0.1.0.dist-info/WHEEL +4 -0
- ptctools-0.1.0.dist-info/entry_points.txt +2 -0
- ptctools-0.1.0.dist-info/licenses/LICENSE +201 -0
ptctools/db.py
ADDED
|
@@ -0,0 +1,544 @@
|
|
|
1
|
+
"""Database backup and restore commands."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
import click
|
|
9
|
+
|
|
10
|
+
from ptctools._portainer import create_exec, start_exec, run_ephemeral_container
|
|
11
|
+
from ptctools._s3 import parse_s3_uri, is_s3_uri, get_s3_endpoint, get_s3_credentials
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# Database type to backup path mapping
|
|
15
|
+
DB_BACKUP_PATHS = {
|
|
16
|
+
"postgres": "/var/lib/postgresql/data/backup.sql.gz",
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
ALLOWED_DB_TYPES = list(DB_BACKUP_PATHS.keys())
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def run_mc_command(
|
|
23
|
+
portainer_url: str,
|
|
24
|
+
access_token: str,
|
|
25
|
+
endpoint_id: int,
|
|
26
|
+
volume_name: str,
|
|
27
|
+
s3_endpoint: str,
|
|
28
|
+
s3_access_key: str,
|
|
29
|
+
s3_secret_key: str,
|
|
30
|
+
mc_args: list[str],
|
|
31
|
+
) -> tuple[int, str]:
|
|
32
|
+
"""Run minio/mc command in ephemeral container.
|
|
33
|
+
|
|
34
|
+
The volume is mounted at /data in the container.
|
|
35
|
+
"""
|
|
36
|
+
# mc needs alias setup before running commands
|
|
37
|
+
# We configure 's3' alias and run the command
|
|
38
|
+
alias_cmd = f"mc alias set s3 {s3_endpoint} {s3_access_key} {s3_secret_key}"
|
|
39
|
+
mc_cmd = "mc " + " ".join(mc_args)
|
|
40
|
+
full_cmd = f"{alias_cmd} && {mc_cmd}"
|
|
41
|
+
|
|
42
|
+
config = {
|
|
43
|
+
"Image": "minio/mc:latest",
|
|
44
|
+
"Entrypoint": ["sh", "-c"],
|
|
45
|
+
"Cmd": [full_cmd],
|
|
46
|
+
"HostConfig": {
|
|
47
|
+
"Binds": [f"{volume_name}:/data"],
|
|
48
|
+
"AutoRemove": False,
|
|
49
|
+
},
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
return run_ephemeral_container(portainer_url, access_token, endpoint_id, config)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def backup_database(
|
|
56
|
+
portainer_url: str,
|
|
57
|
+
access_token: str,
|
|
58
|
+
endpoint_id: int,
|
|
59
|
+
container_id: str,
|
|
60
|
+
volume_name: str,
|
|
61
|
+
db_user: str,
|
|
62
|
+
db_name: str,
|
|
63
|
+
db_type: str,
|
|
64
|
+
output: str,
|
|
65
|
+
s3_endpoint: str | None,
|
|
66
|
+
s3_access_key: str | None,
|
|
67
|
+
s3_secret_key: str | None,
|
|
68
|
+
) -> bool:
|
|
69
|
+
"""Backup database using pg_dump via exec, then optionally upload to S3 with mc."""
|
|
70
|
+
backup_file = DB_BACKUP_PATHS[db_type]
|
|
71
|
+
backup_filename = os.path.basename(backup_file)
|
|
72
|
+
|
|
73
|
+
click.echo(f" Using container: {container_id[:12]}")
|
|
74
|
+
|
|
75
|
+
# Step 1: Run pg_dump inside the database container
|
|
76
|
+
click.echo(" Running pg_dump...")
|
|
77
|
+
cmd = [
|
|
78
|
+
"sh",
|
|
79
|
+
"-c",
|
|
80
|
+
f"pg_dump -U {db_user} {db_name} | gzip > {backup_file} && "
|
|
81
|
+
f"stat -c %s {backup_file}",
|
|
82
|
+
]
|
|
83
|
+
|
|
84
|
+
exec_id = create_exec(portainer_url, access_token, endpoint_id, container_id, cmd)
|
|
85
|
+
if not exec_id:
|
|
86
|
+
click.echo(" ✗ Failed to create exec instance")
|
|
87
|
+
return False
|
|
88
|
+
|
|
89
|
+
exit_code, output_text = start_exec(
|
|
90
|
+
portainer_url, access_token, endpoint_id, exec_id
|
|
91
|
+
)
|
|
92
|
+
if exit_code != 0:
|
|
93
|
+
click.echo(f" ✗ pg_dump failed with exit code {exit_code}")
|
|
94
|
+
if output_text.strip():
|
|
95
|
+
click.echo(f" {output_text.strip()}")
|
|
96
|
+
return False
|
|
97
|
+
|
|
98
|
+
# Parse file size from stat output
|
|
99
|
+
try:
|
|
100
|
+
file_size = int(output_text.strip().split("\n")[-1])
|
|
101
|
+
click.echo(f" ✓ pg_dump completed ({file_size} bytes)")
|
|
102
|
+
except (ValueError, IndexError):
|
|
103
|
+
click.echo(" ✓ pg_dump completed")
|
|
104
|
+
|
|
105
|
+
# Step 2: Handle output (local file or S3)
|
|
106
|
+
if is_s3_uri(output):
|
|
107
|
+
# Upload to S3 using minio/mc
|
|
108
|
+
try:
|
|
109
|
+
uri_endpoint, bucket, s3_path = parse_s3_uri(output)
|
|
110
|
+
except click.ClickException as e:
|
|
111
|
+
click.echo(f" ✗ {e.message}")
|
|
112
|
+
return False
|
|
113
|
+
|
|
114
|
+
try:
|
|
115
|
+
endpoint = get_s3_endpoint(uri_endpoint, s3_endpoint)
|
|
116
|
+
except click.ClickException as e:
|
|
117
|
+
click.echo(f" ✗ {e.message}")
|
|
118
|
+
return False
|
|
119
|
+
|
|
120
|
+
if not s3_access_key or not s3_secret_key:
|
|
121
|
+
click.echo(" ✗ S3 credentials required (S3_ACCESS_KEY, S3_SECRET_KEY)")
|
|
122
|
+
return False
|
|
123
|
+
|
|
124
|
+
click.echo(f" Uploading to S3: s3://{bucket}/{s3_path}...")
|
|
125
|
+
|
|
126
|
+
mc_exit_code, mc_logs = run_mc_command(
|
|
127
|
+
portainer_url,
|
|
128
|
+
access_token,
|
|
129
|
+
endpoint_id,
|
|
130
|
+
volume_name,
|
|
131
|
+
endpoint,
|
|
132
|
+
s3_access_key,
|
|
133
|
+
s3_secret_key,
|
|
134
|
+
["cp", f"/data/{backup_filename}", f"s3/{bucket}/{s3_path}"],
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
if mc_exit_code != 0:
|
|
138
|
+
click.echo(f" ✗ S3 upload failed with exit code {mc_exit_code}")
|
|
139
|
+
if mc_logs:
|
|
140
|
+
click.echo(f" {mc_logs}")
|
|
141
|
+
return False
|
|
142
|
+
|
|
143
|
+
click.echo(f" ✓ Uploaded to s3://{bucket}/{s3_path}")
|
|
144
|
+
if mc_logs:
|
|
145
|
+
for line in mc_logs.strip().split("\n"):
|
|
146
|
+
if line.strip():
|
|
147
|
+
click.echo(f" {line.strip()}")
|
|
148
|
+
|
|
149
|
+
else:
|
|
150
|
+
# Copy backup file to local path
|
|
151
|
+
click.echo(f" Saving to local file: {output}...")
|
|
152
|
+
|
|
153
|
+
# Read file from container using base64 encoding
|
|
154
|
+
read_cmd = ["sh", "-c", f"base64 {backup_file}"]
|
|
155
|
+
read_exec_id = create_exec(
|
|
156
|
+
portainer_url, access_token, endpoint_id, container_id, read_cmd
|
|
157
|
+
)
|
|
158
|
+
if not read_exec_id:
|
|
159
|
+
click.echo(" ✗ Failed to read backup file from container")
|
|
160
|
+
return False
|
|
161
|
+
|
|
162
|
+
read_exit_code, b64_content = start_exec(
|
|
163
|
+
portainer_url, access_token, endpoint_id, read_exec_id
|
|
164
|
+
)
|
|
165
|
+
if read_exit_code != 0:
|
|
166
|
+
click.echo(f" ✗ Failed to read backup file: exit code {read_exit_code}")
|
|
167
|
+
return False
|
|
168
|
+
|
|
169
|
+
# Decode and save to output file
|
|
170
|
+
import base64
|
|
171
|
+
|
|
172
|
+
content = base64.b64decode(b64_content.strip())
|
|
173
|
+
|
|
174
|
+
# Ensure output directory exists
|
|
175
|
+
output_dir = os.path.dirname(output)
|
|
176
|
+
if output_dir and not os.path.exists(output_dir):
|
|
177
|
+
os.makedirs(output_dir)
|
|
178
|
+
|
|
179
|
+
with open(output, "wb") as f:
|
|
180
|
+
f.write(content)
|
|
181
|
+
|
|
182
|
+
click.echo(f" ✓ Saved to {output} ({len(content)} bytes)")
|
|
183
|
+
|
|
184
|
+
# Step 3: Cleanup - delete the .sql.gz file from the container volume
|
|
185
|
+
click.echo(" Cleaning up temporary backup file...")
|
|
186
|
+
cleanup_cmd = ["sh", "-c", f"rm -f {backup_file}"]
|
|
187
|
+
cleanup_exec_id = create_exec(
|
|
188
|
+
portainer_url, access_token, endpoint_id, container_id, cleanup_cmd
|
|
189
|
+
)
|
|
190
|
+
if cleanup_exec_id:
|
|
191
|
+
start_exec(portainer_url, access_token, endpoint_id, cleanup_exec_id)
|
|
192
|
+
click.echo(" ✓ Cleanup completed")
|
|
193
|
+
|
|
194
|
+
return True
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def restore_database(
|
|
198
|
+
portainer_url: str,
|
|
199
|
+
access_token: str,
|
|
200
|
+
endpoint_id: int,
|
|
201
|
+
container_id: str,
|
|
202
|
+
volume_name: str,
|
|
203
|
+
db_user: str,
|
|
204
|
+
db_name: str,
|
|
205
|
+
db_type: str,
|
|
206
|
+
input_path: str,
|
|
207
|
+
s3_endpoint: str | None,
|
|
208
|
+
s3_access_key: str | None,
|
|
209
|
+
s3_secret_key: str | None,
|
|
210
|
+
) -> bool:
|
|
211
|
+
"""Restore database from local file or S3."""
|
|
212
|
+
restore_path = DB_BACKUP_PATHS[db_type]
|
|
213
|
+
restore_filename = os.path.basename(restore_path)
|
|
214
|
+
|
|
215
|
+
click.echo(f" Using container: {container_id[:12]}")
|
|
216
|
+
|
|
217
|
+
# Step 1: Get the backup file (from S3 or local)
|
|
218
|
+
if is_s3_uri(input_path):
|
|
219
|
+
# Download from S3 using minio/mc
|
|
220
|
+
try:
|
|
221
|
+
uri_endpoint, bucket, s3_path = parse_s3_uri(input_path)
|
|
222
|
+
except click.ClickException as e:
|
|
223
|
+
click.echo(f" ✗ {e.message}")
|
|
224
|
+
return False
|
|
225
|
+
|
|
226
|
+
try:
|
|
227
|
+
endpoint = get_s3_endpoint(uri_endpoint, s3_endpoint)
|
|
228
|
+
except click.ClickException as e:
|
|
229
|
+
click.echo(f" ✗ {e.message}")
|
|
230
|
+
return False
|
|
231
|
+
|
|
232
|
+
if not s3_access_key or not s3_secret_key:
|
|
233
|
+
click.echo(" ✗ S3 credentials required (S3_ACCESS_KEY, S3_SECRET_KEY)")
|
|
234
|
+
return False
|
|
235
|
+
|
|
236
|
+
click.echo(f" Downloading from S3: s3://{bucket}/{s3_path}...")
|
|
237
|
+
|
|
238
|
+
# mc cp s3/bucket/path /data/backup.sql.gz
|
|
239
|
+
mc_exit_code, mc_logs = run_mc_command(
|
|
240
|
+
portainer_url,
|
|
241
|
+
access_token,
|
|
242
|
+
endpoint_id,
|
|
243
|
+
volume_name,
|
|
244
|
+
endpoint,
|
|
245
|
+
s3_access_key,
|
|
246
|
+
s3_secret_key,
|
|
247
|
+
["cp", f"s3/{bucket}/{s3_path}", f"/data/{restore_filename}"],
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
if mc_exit_code != 0:
|
|
251
|
+
click.echo(f" ✗ S3 download failed with exit code {mc_exit_code}")
|
|
252
|
+
if mc_logs:
|
|
253
|
+
click.echo(f" {mc_logs}")
|
|
254
|
+
return False
|
|
255
|
+
|
|
256
|
+
click.echo(" ✓ Downloaded from S3")
|
|
257
|
+
if mc_logs:
|
|
258
|
+
for line in mc_logs.strip().split("\n"):
|
|
259
|
+
if line.strip():
|
|
260
|
+
click.echo(f" {line.strip()}")
|
|
261
|
+
|
|
262
|
+
else:
|
|
263
|
+
# Upload local file to container volume
|
|
264
|
+
click.echo(f" Uploading from local file: {input_path}...")
|
|
265
|
+
if not os.path.exists(input_path):
|
|
266
|
+
click.echo(f" ✗ File not found: {input_path}")
|
|
267
|
+
return False
|
|
268
|
+
|
|
269
|
+
import base64
|
|
270
|
+
|
|
271
|
+
with open(input_path, "rb") as f:
|
|
272
|
+
content = f.read()
|
|
273
|
+
b64_content = base64.b64encode(content).decode("ascii")
|
|
274
|
+
click.echo(f" ✓ Read {len(content)} bytes")
|
|
275
|
+
|
|
276
|
+
# Write file to container using base64 decoding (in chunks)
|
|
277
|
+
click.echo(" Uploading backup to container...")
|
|
278
|
+
chunk_size = 65536 # 64KB chunks
|
|
279
|
+
chunks = [
|
|
280
|
+
b64_content[i : i + chunk_size]
|
|
281
|
+
for i in range(0, len(b64_content), chunk_size)
|
|
282
|
+
]
|
|
283
|
+
|
|
284
|
+
# First chunk: create file
|
|
285
|
+
write_cmd = ["sh", "-c", f"echo '{chunks[0]}' | base64 -d > {restore_path}"]
|
|
286
|
+
write_exec_id = create_exec(
|
|
287
|
+
portainer_url, access_token, endpoint_id, container_id, write_cmd
|
|
288
|
+
)
|
|
289
|
+
if not write_exec_id:
|
|
290
|
+
click.echo(" ✗ Failed to write backup file to container")
|
|
291
|
+
return False
|
|
292
|
+
|
|
293
|
+
write_exit_code, _ = start_exec(
|
|
294
|
+
portainer_url, access_token, endpoint_id, write_exec_id
|
|
295
|
+
)
|
|
296
|
+
if write_exit_code != 0:
|
|
297
|
+
click.echo(f" ✗ Failed to write backup file: exit code {write_exit_code}")
|
|
298
|
+
return False
|
|
299
|
+
|
|
300
|
+
# Append remaining chunks
|
|
301
|
+
for chunk in chunks[1:]:
|
|
302
|
+
append_cmd = ["sh", "-c", f"echo '{chunk}' | base64 -d >> {restore_path}"]
|
|
303
|
+
append_exec_id = create_exec(
|
|
304
|
+
portainer_url, access_token, endpoint_id, container_id, append_cmd
|
|
305
|
+
)
|
|
306
|
+
if append_exec_id:
|
|
307
|
+
start_exec(portainer_url, access_token, endpoint_id, append_exec_id)
|
|
308
|
+
|
|
309
|
+
click.echo(" ✓ Backup file uploaded to container")
|
|
310
|
+
|
|
311
|
+
# Step 2: Restore the database using psql
|
|
312
|
+
click.echo(" Running restore...")
|
|
313
|
+
# Use gunzip for .gz files, cat for plain SQL files
|
|
314
|
+
if input_path.endswith(".gz"):
|
|
315
|
+
read_cmd = f"gunzip -c {restore_path}"
|
|
316
|
+
else:
|
|
317
|
+
read_cmd = f"cat {restore_path}"
|
|
318
|
+
psql_cmd = f"{read_cmd} | psql -U {db_user} -d {db_name}"
|
|
319
|
+
cmd = ["sh", "-c", psql_cmd]
|
|
320
|
+
|
|
321
|
+
exec_id = create_exec(portainer_url, access_token, endpoint_id, container_id, cmd)
|
|
322
|
+
if not exec_id:
|
|
323
|
+
click.echo(" ✗ Failed to create exec instance")
|
|
324
|
+
return False
|
|
325
|
+
|
|
326
|
+
exit_code, output = start_exec(portainer_url, access_token, endpoint_id, exec_id)
|
|
327
|
+
if output.strip():
|
|
328
|
+
# Show last 10 lines of output
|
|
329
|
+
lines = output.strip().split("\n")[-10:]
|
|
330
|
+
for line in lines:
|
|
331
|
+
click.echo(f" {line}")
|
|
332
|
+
|
|
333
|
+
# Step 3: Cleanup - delete the restore file from container
|
|
334
|
+
cleanup_cmd = ["sh", "-c", f"rm -f {restore_path}"]
|
|
335
|
+
cleanup_exec_id = create_exec(
|
|
336
|
+
portainer_url, access_token, endpoint_id, container_id, cleanup_cmd
|
|
337
|
+
)
|
|
338
|
+
if cleanup_exec_id:
|
|
339
|
+
start_exec(portainer_url, access_token, endpoint_id, cleanup_exec_id)
|
|
340
|
+
|
|
341
|
+
if exit_code == 0:
|
|
342
|
+
click.echo(" ✓ Database restore completed")
|
|
343
|
+
return True
|
|
344
|
+
else:
|
|
345
|
+
click.echo(f" ✗ Restore failed with exit code {exit_code}")
|
|
346
|
+
return False
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
@click.group()
|
|
350
|
+
def cli():
|
|
351
|
+
"""Database backup and restore commands."""
|
|
352
|
+
pass
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
@cli.command()
|
|
356
|
+
@click.option("--url", "-u", required=True, help="Portainer base URL")
|
|
357
|
+
@click.option("--container-id", "-c", required=True, help="Database container ID")
|
|
358
|
+
@click.option(
|
|
359
|
+
"--volume-name", "-v", required=True, help="Volume name where database stores data"
|
|
360
|
+
)
|
|
361
|
+
@click.option(
|
|
362
|
+
"--output",
|
|
363
|
+
"-o",
|
|
364
|
+
required=True,
|
|
365
|
+
help="Output: local file.sql.gz or s3://bucket/path/file.sql.gz",
|
|
366
|
+
)
|
|
367
|
+
@click.option("--s3-endpoint", help="S3/MinIO endpoint URL (or S3_ENDPOINT env var)")
|
|
368
|
+
@click.option("--endpoint-id", "-e", type=int, default=1, help="Portainer endpoint ID")
|
|
369
|
+
@click.option(
|
|
370
|
+
"--db-type",
|
|
371
|
+
"-t",
|
|
372
|
+
type=click.Choice(ALLOWED_DB_TYPES),
|
|
373
|
+
default="postgres",
|
|
374
|
+
help="Database type (default: postgres)",
|
|
375
|
+
)
|
|
376
|
+
@click.option("--db-user", required=True, help="Database user")
|
|
377
|
+
@click.option("--db-name", required=True, help="Database name")
|
|
378
|
+
def backup(
|
|
379
|
+
url: str,
|
|
380
|
+
container_id: str,
|
|
381
|
+
volume_name: str,
|
|
382
|
+
output: str,
|
|
383
|
+
s3_endpoint: str | None,
|
|
384
|
+
endpoint_id: int,
|
|
385
|
+
db_type: str,
|
|
386
|
+
db_user: str,
|
|
387
|
+
db_name: str,
|
|
388
|
+
):
|
|
389
|
+
"""Backup PostgreSQL database to local file or S3.
|
|
390
|
+
|
|
391
|
+
Examples:
|
|
392
|
+
ptctools db backup -u https://portainer.example.com -c abc123 -v db_data --db-user postgres --db-name mydb -o /tmp/backup.sql.gz
|
|
393
|
+
|
|
394
|
+
ptctools db backup -u https://portainer.example.com -c abc123 -v db_data --db-user postgres --db-name mydb -o s3://mybucket/backups/backup.sql.gz --s3-endpoint https://s3.<region>.amazonaws.com
|
|
395
|
+
"""
|
|
396
|
+
access_token = os.environ.get("PORTAINER_ACCESS_TOKEN")
|
|
397
|
+
if not access_token:
|
|
398
|
+
click.echo("Error: Missing PORTAINER_ACCESS_TOKEN", err=True)
|
|
399
|
+
sys.exit(1)
|
|
400
|
+
|
|
401
|
+
s3_access_key = os.environ.get("S3_ACCESS_KEY")
|
|
402
|
+
s3_secret_key = os.environ.get("S3_SECRET_KEY")
|
|
403
|
+
|
|
404
|
+
# Check S3 credentials if output is S3 URI
|
|
405
|
+
if is_s3_uri(output):
|
|
406
|
+
if not s3_access_key or not s3_secret_key:
|
|
407
|
+
click.echo(
|
|
408
|
+
"Error: Missing S3_ACCESS_KEY or S3_SECRET_KEY for S3 upload", err=True
|
|
409
|
+
)
|
|
410
|
+
sys.exit(1)
|
|
411
|
+
|
|
412
|
+
portainer_url = url.rstrip("/")
|
|
413
|
+
|
|
414
|
+
click.echo(f"Portainer URL: {portainer_url}")
|
|
415
|
+
click.echo(f"Container ID: {container_id[:12]}")
|
|
416
|
+
click.echo(f"Volume Name: {volume_name}")
|
|
417
|
+
click.echo(f"Database Type: {db_type}")
|
|
418
|
+
click.echo(f"Database: {db_name} (user: {db_user})")
|
|
419
|
+
click.echo(f"Output: {output}")
|
|
420
|
+
if is_s3_uri(output):
|
|
421
|
+
endpoint = s3_endpoint or os.environ.get("S3_ENDPOINT")
|
|
422
|
+
click.echo(f"S3 Endpoint: {endpoint}")
|
|
423
|
+
click.echo()
|
|
424
|
+
click.echo("=== Backing up database ===")
|
|
425
|
+
|
|
426
|
+
success = backup_database(
|
|
427
|
+
portainer_url,
|
|
428
|
+
access_token,
|
|
429
|
+
endpoint_id,
|
|
430
|
+
container_id,
|
|
431
|
+
volume_name,
|
|
432
|
+
db_user,
|
|
433
|
+
db_name,
|
|
434
|
+
db_type,
|
|
435
|
+
output,
|
|
436
|
+
s3_endpoint,
|
|
437
|
+
s3_access_key,
|
|
438
|
+
s3_secret_key,
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
click.echo()
|
|
442
|
+
click.echo(
|
|
443
|
+
"=== Database backup complete ==="
|
|
444
|
+
if success
|
|
445
|
+
else "=== Database backup failed ==="
|
|
446
|
+
)
|
|
447
|
+
sys.exit(0 if success else 1)
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
@cli.command()
|
|
451
|
+
@click.option("--url", "-u", required=True, help="Portainer base URL")
|
|
452
|
+
@click.option("--container-id", "-c", required=True, help="Database container ID")
|
|
453
|
+
@click.option(
|
|
454
|
+
"--volume-name", "-v", required=True, help="Volume name where database stores data"
|
|
455
|
+
)
|
|
456
|
+
@click.option(
|
|
457
|
+
"--input",
|
|
458
|
+
"-i",
|
|
459
|
+
"input_path",
|
|
460
|
+
required=True,
|
|
461
|
+
help="Input: local file.sql.gz or s3://bucket/path/file.sql.gz",
|
|
462
|
+
)
|
|
463
|
+
@click.option("--s3-endpoint", help="S3/MinIO endpoint URL (or S3_ENDPOINT env var)")
|
|
464
|
+
@click.option("--endpoint-id", "-e", type=int, default=1, help="Portainer endpoint ID")
|
|
465
|
+
@click.option(
|
|
466
|
+
"--db-type",
|
|
467
|
+
"-t",
|
|
468
|
+
type=click.Choice(ALLOWED_DB_TYPES),
|
|
469
|
+
default="postgres",
|
|
470
|
+
help="Database type (default: postgres)",
|
|
471
|
+
)
|
|
472
|
+
@click.option("--db-user", required=True, help="Database user")
|
|
473
|
+
@click.option("--db-name", required=True, help="Database name")
|
|
474
|
+
def restore(
|
|
475
|
+
url: str,
|
|
476
|
+
container_id: str,
|
|
477
|
+
volume_name: str,
|
|
478
|
+
input_path: str,
|
|
479
|
+
s3_endpoint: str | None,
|
|
480
|
+
endpoint_id: int,
|
|
481
|
+
db_type: str,
|
|
482
|
+
db_user: str,
|
|
483
|
+
db_name: str,
|
|
484
|
+
):
|
|
485
|
+
"""Restore PostgreSQL database from local file or S3.
|
|
486
|
+
|
|
487
|
+
Examples:
|
|
488
|
+
ptctools db restore -u https://portainer.example.com -c abc123 -v db_data --db-user postgres --db-name mydb -i /tmp/backup.sql.gz
|
|
489
|
+
|
|
490
|
+
ptctools db restore -u https://portainer.example.com -c abc123 -v db_data --db-user postgres --db-name mydb -i s3://mybucket/backups/backup.sql.gz --s3-endpoint https://s3.<region>.amazonaws.com
|
|
491
|
+
"""
|
|
492
|
+
access_token = os.environ.get("PORTAINER_ACCESS_TOKEN")
|
|
493
|
+
if not access_token:
|
|
494
|
+
click.echo("Error: Missing PORTAINER_ACCESS_TOKEN", err=True)
|
|
495
|
+
sys.exit(1)
|
|
496
|
+
|
|
497
|
+
s3_access_key = os.environ.get("S3_ACCESS_KEY")
|
|
498
|
+
s3_secret_key = os.environ.get("S3_SECRET_KEY")
|
|
499
|
+
|
|
500
|
+
# Check S3 credentials if input is S3 URI
|
|
501
|
+
if is_s3_uri(input_path):
|
|
502
|
+
if not s3_access_key or not s3_secret_key:
|
|
503
|
+
click.echo(
|
|
504
|
+
"Error: Missing S3_ACCESS_KEY or S3_SECRET_KEY for S3 download",
|
|
505
|
+
err=True,
|
|
506
|
+
)
|
|
507
|
+
sys.exit(1)
|
|
508
|
+
|
|
509
|
+
portainer_url = url.rstrip("/")
|
|
510
|
+
|
|
511
|
+
click.echo(f"Portainer URL: {portainer_url}")
|
|
512
|
+
click.echo(f"Container ID: {container_id[:12]}")
|
|
513
|
+
click.echo(f"Volume Name: {volume_name}")
|
|
514
|
+
click.echo(f"Database Type: {db_type}")
|
|
515
|
+
click.echo(f"Database: {db_name} (user: {db_user})")
|
|
516
|
+
click.echo(f"Input: {input_path}")
|
|
517
|
+
if is_s3_uri(input_path):
|
|
518
|
+
endpoint = s3_endpoint or os.environ.get("S3_ENDPOINT")
|
|
519
|
+
click.echo(f"S3 Endpoint: {endpoint}")
|
|
520
|
+
click.echo()
|
|
521
|
+
click.echo("=== Restoring database ===")
|
|
522
|
+
|
|
523
|
+
success = restore_database(
|
|
524
|
+
portainer_url,
|
|
525
|
+
access_token,
|
|
526
|
+
endpoint_id,
|
|
527
|
+
container_id,
|
|
528
|
+
volume_name,
|
|
529
|
+
db_user,
|
|
530
|
+
db_name,
|
|
531
|
+
db_type,
|
|
532
|
+
input_path,
|
|
533
|
+
s3_endpoint,
|
|
534
|
+
s3_access_key,
|
|
535
|
+
s3_secret_key,
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
click.echo()
|
|
539
|
+
click.echo(
|
|
540
|
+
"=== Database restore complete ==="
|
|
541
|
+
if success
|
|
542
|
+
else "=== Database restore failed ==="
|
|
543
|
+
)
|
|
544
|
+
sys.exit(0 if success else 1)
|