kaqing 2.0.145__py3-none-any.whl → 2.0.174__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaqing might be problematic. Click here for more details.
- adam/__init__.py +0 -2
- adam/app_session.py +8 -11
- adam/batch.py +3 -3
- adam/checks/check_utils.py +14 -46
- adam/checks/cpu.py +7 -1
- adam/checks/cpu_metrics.py +52 -0
- adam/checks/disk.py +2 -3
- adam/columns/columns.py +3 -1
- adam/columns/cpu.py +3 -1
- adam/columns/cpu_metrics.py +22 -0
- adam/columns/memory.py +3 -4
- adam/commands/__init__.py +22 -0
- adam/commands/alter_tables.py +33 -48
- adam/commands/audit/audit.py +22 -23
- adam/commands/audit/audit_repair_tables.py +14 -17
- adam/commands/audit/audit_run.py +15 -23
- adam/commands/audit/show_last10.py +10 -13
- adam/commands/audit/show_slow10.py +10 -13
- adam/commands/audit/show_top10.py +10 -13
- adam/commands/audit/utils_show_top10.py +2 -3
- adam/commands/bash/__init__.py +5 -0
- adam/commands/bash/bash.py +7 -104
- adam/commands/bash/utils_bash.py +16 -0
- adam/commands/cat.py +7 -23
- adam/commands/cd.py +7 -11
- adam/commands/check.py +14 -23
- adam/commands/cli_commands.py +2 -3
- adam/commands/code.py +20 -23
- adam/commands/command.py +152 -37
- adam/commands/commands_utils.py +8 -17
- adam/commands/cp.py +18 -32
- adam/commands/cql/cql_completions.py +11 -7
- adam/commands/cql/cqlsh.py +10 -30
- adam/commands/cql/{cql_utils.py → utils_cql.py} +147 -15
- adam/commands/deploy/code_start.py +7 -10
- adam/commands/deploy/code_stop.py +4 -21
- adam/commands/deploy/code_utils.py +3 -3
- adam/commands/deploy/deploy.py +4 -27
- adam/commands/deploy/deploy_frontend.py +14 -17
- adam/commands/deploy/deploy_pg_agent.py +2 -5
- adam/commands/deploy/deploy_pod.py +64 -68
- adam/commands/deploy/undeploy.py +4 -27
- adam/commands/deploy/undeploy_frontend.py +4 -7
- adam/commands/deploy/undeploy_pg_agent.py +4 -7
- adam/commands/deploy/undeploy_pod.py +9 -12
- adam/commands/devices/device.py +93 -2
- adam/commands/devices/device_app.py +37 -10
- adam/commands/devices/device_auit_log.py +8 -2
- adam/commands/devices/device_cass.py +47 -7
- adam/commands/devices/device_export.py +9 -11
- adam/commands/devices/device_postgres.py +41 -6
- adam/commands/exit.py +1 -4
- adam/commands/export/clean_up_all_export_sessions.py +37 -0
- adam/commands/export/clean_up_export_sessions.py +12 -8
- adam/commands/export/drop_export_database.py +7 -26
- adam/commands/export/drop_export_databases.py +5 -14
- adam/commands/export/export.py +8 -38
- adam/commands/export/export_databases.py +86 -27
- adam/commands/export/export_select.py +25 -27
- adam/commands/export/export_select_x.py +3 -3
- adam/commands/export/export_sessions.py +124 -0
- adam/commands/export/export_use.py +8 -17
- adam/commands/export/exporter.py +88 -158
- adam/commands/export/import_session.py +7 -35
- adam/commands/export/importer.py +12 -5
- adam/commands/export/importer_athena.py +21 -20
- adam/commands/export/importer_sqlite.py +16 -21
- adam/commands/export/show_column_counts.py +7 -25
- adam/commands/export/show_export_databases.py +4 -6
- adam/commands/export/show_export_session.py +7 -18
- adam/commands/export/show_export_sessions.py +9 -12
- adam/commands/export/utils_export.py +26 -1
- adam/commands/intermediate_command.py +49 -0
- adam/commands/issues.py +11 -43
- adam/commands/kubectl.py +3 -6
- adam/commands/login.py +22 -24
- adam/commands/logs.py +3 -6
- adam/commands/ls.py +8 -9
- adam/commands/medusa/medusa.py +4 -22
- adam/commands/medusa/medusa_backup.py +20 -25
- adam/commands/medusa/medusa_restore.py +34 -36
- adam/commands/medusa/medusa_show_backupjobs.py +14 -18
- adam/commands/medusa/medusa_show_restorejobs.py +11 -18
- adam/commands/nodetool.py +6 -15
- adam/commands/param_get.py +11 -13
- adam/commands/param_set.py +8 -12
- adam/commands/postgres/postgres.py +22 -38
- adam/commands/postgres/postgres_context.py +47 -23
- adam/commands/postgres/postgres_ls.py +4 -8
- adam/commands/postgres/postgres_preview.py +5 -9
- adam/commands/postgres/psql_completions.py +1 -1
- adam/commands/postgres/utils_postgres.py +70 -0
- adam/commands/preview_table.py +6 -45
- adam/commands/pwd.py +13 -16
- adam/commands/reaper/reaper.py +4 -27
- adam/commands/reaper/reaper_forward.py +48 -55
- adam/commands/reaper/reaper_forward_session.py +6 -0
- adam/commands/reaper/reaper_forward_stop.py +10 -16
- adam/commands/reaper/reaper_restart.py +7 -14
- adam/commands/reaper/reaper_run_abort.py +8 -33
- adam/commands/reaper/reaper_runs.py +42 -57
- adam/commands/reaper/reaper_runs_abort.py +29 -49
- adam/commands/reaper/reaper_schedule_activate.py +9 -32
- adam/commands/reaper/reaper_schedule_start.py +9 -32
- adam/commands/reaper/reaper_schedule_stop.py +9 -32
- adam/commands/reaper/reaper_schedules.py +4 -14
- adam/commands/reaper/reaper_status.py +8 -16
- adam/commands/reaper/utils_reaper.py +196 -0
- adam/commands/repair/repair.py +4 -22
- adam/commands/repair/repair_log.py +5 -11
- adam/commands/repair/repair_run.py +27 -34
- adam/commands/repair/repair_scan.py +32 -38
- adam/commands/repair/repair_stop.py +5 -11
- adam/commands/report.py +27 -29
- adam/commands/restart.py +25 -26
- adam/commands/rollout.py +19 -24
- adam/commands/shell.py +10 -4
- adam/commands/show/show.py +10 -26
- adam/commands/show/show_cassandra_repairs.py +35 -0
- adam/commands/show/show_cassandra_status.py +32 -43
- adam/commands/show/show_cassandra_version.py +5 -18
- adam/commands/show/show_commands.py +19 -24
- adam/commands/show/show_host.py +1 -1
- adam/commands/show/show_login.py +20 -27
- adam/commands/show/show_processes.py +15 -19
- adam/commands/show/show_storage.py +10 -20
- adam/commands/watch.py +26 -29
- adam/config.py +4 -16
- adam/embedded_params.py +1 -1
- adam/log.py +4 -4
- adam/pod_exec_result.py +3 -3
- adam/repl.py +31 -32
- adam/repl_commands.py +11 -11
- adam/repl_state.py +52 -26
- adam/sql/sql_completer.py +4 -6
- adam/sql/sql_state_machine.py +21 -14
- adam/sso/authn_ad.py +6 -8
- adam/sso/authn_okta.py +4 -6
- adam/sso/cred_cache.py +3 -5
- adam/sso/idp.py +9 -12
- adam/utils.py +393 -33
- adam/utils_athena.py +14 -13
- adam/utils_audits.py +12 -12
- adam/utils_issues.py +32 -0
- adam/utils_k8s/app_clusters.py +13 -18
- adam/utils_k8s/app_pods.py +2 -0
- adam/utils_k8s/cassandra_clusters.py +21 -18
- adam/utils_k8s/custom_resources.py +16 -17
- adam/utils_k8s/ingresses.py +2 -2
- adam/utils_k8s/jobs.py +7 -11
- adam/utils_k8s/k8s.py +87 -0
- adam/utils_k8s/pods.py +14 -76
- adam/utils_k8s/secrets.py +4 -4
- adam/utils_k8s/service_accounts.py +5 -4
- adam/utils_k8s/services.py +2 -2
- adam/utils_k8s/statefulsets.py +1 -12
- adam/utils_repl/state_machine.py +3 -3
- adam/utils_sqlite.py +78 -42
- adam/version.py +1 -1
- {kaqing-2.0.145.dist-info → kaqing-2.0.174.dist-info}/METADATA +1 -1
- kaqing-2.0.174.dist-info/RECORD +230 -0
- adam/commands/app.py +0 -67
- adam/commands/app_ping.py +0 -44
- adam/commands/export/clean_up_export_session.py +0 -53
- adam/commands/postgres/postgres_utils.py +0 -31
- adam/commands/reaper/reaper_session.py +0 -159
- adam/commands/show/show_app_actions.py +0 -56
- adam/commands/show/show_app_id.py +0 -47
- adam/commands/show/show_app_queues.py +0 -45
- adam/commands/show/show_repairs.py +0 -47
- kaqing-2.0.145.dist-info/RECORD +0 -227
- {kaqing-2.0.145.dist-info → kaqing-2.0.174.dist-info}/WHEEL +0 -0
- {kaqing-2.0.145.dist-info → kaqing-2.0.174.dist-info}/entry_points.txt +0 -0
- {kaqing-2.0.145.dist-info → kaqing-2.0.174.dist-info}/top_level.txt +0 -0
adam/utils_athena.py
CHANGED
|
@@ -1,23 +1,24 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
1
2
|
import functools
|
|
2
3
|
import time
|
|
3
4
|
import boto3
|
|
4
5
|
import botocore
|
|
5
6
|
|
|
6
7
|
from adam.config import Config
|
|
7
|
-
from adam.utils import lines_to_tabular, log, log2
|
|
8
|
+
from adam.utils import lines_to_tabular, log, log2, log_exc, wait_log
|
|
8
9
|
|
|
9
10
|
# no state utility class
|
|
10
11
|
class Athena:
|
|
11
12
|
@functools.lru_cache()
|
|
12
13
|
def database_names(like: str = None):
|
|
13
14
|
# this function is called only from export currently
|
|
14
|
-
|
|
15
|
+
wait_log(f'Inspecting export database schema...')
|
|
15
16
|
|
|
16
17
|
query = f"SELECT schema_name FROM information_schema.schemata WHERE schema_name <> 'information_schema'"
|
|
17
18
|
if like:
|
|
18
19
|
query = f"{query} AND schema_name like '{like}'"
|
|
19
20
|
|
|
20
|
-
|
|
21
|
+
with log_exc():
|
|
21
22
|
state, reason, rs = Athena.query(query)
|
|
22
23
|
if rs:
|
|
23
24
|
names = []
|
|
@@ -26,8 +27,6 @@ class Athena:
|
|
|
26
27
|
names.append(row_data[0])
|
|
27
28
|
|
|
28
29
|
return names
|
|
29
|
-
except:
|
|
30
|
-
pass
|
|
31
30
|
|
|
32
31
|
return []
|
|
33
32
|
|
|
@@ -64,7 +63,7 @@ class Athena:
|
|
|
64
63
|
|
|
65
64
|
@functools.lru_cache()
|
|
66
65
|
def column_names(tables: list[str] = [], database: str = None, function: str = 'audit', partition_cols_only = False):
|
|
67
|
-
|
|
66
|
+
with log_exc():
|
|
68
67
|
if not database:
|
|
69
68
|
database = Config().get(f'{function}.athena.database', 'audit')
|
|
70
69
|
|
|
@@ -80,15 +79,13 @@ class Athena:
|
|
|
80
79
|
_, _, rs = Athena.query(query)
|
|
81
80
|
if rs:
|
|
82
81
|
return [row['Data'][0].get('VarCharValue') for row in rs[1:]]
|
|
83
|
-
except:
|
|
84
|
-
# aws credentials not found
|
|
85
|
-
pass
|
|
86
82
|
|
|
87
83
|
return []
|
|
88
84
|
|
|
89
|
-
def run_query(sql: str, database: str = None):
|
|
85
|
+
def run_query(sql: str, database: str = None, output: Callable[[str], str] = None):
|
|
90
86
|
state, reason, rs = Athena.query(sql, database)
|
|
91
87
|
|
|
88
|
+
log_file = None
|
|
92
89
|
if state == 'SUCCEEDED':
|
|
93
90
|
if rs:
|
|
94
91
|
column_info = rs[0]['Data']
|
|
@@ -98,14 +95,18 @@ class Athena:
|
|
|
98
95
|
row_data = [col.get('VarCharValue') if col else '' for col in row['Data']]
|
|
99
96
|
lines.append('\t'.join(row_data))
|
|
100
97
|
|
|
101
|
-
|
|
98
|
+
out = lines_to_tabular(lines, header='\t'.join(columns), separator='\t')
|
|
99
|
+
if output:
|
|
100
|
+
log_file = output(out)
|
|
101
|
+
else:
|
|
102
|
+
log(out)
|
|
102
103
|
|
|
103
|
-
return len(lines)
|
|
104
|
+
return len(lines), log_file
|
|
104
105
|
else:
|
|
105
106
|
log2(f"Query failed or was cancelled. State: {state}")
|
|
106
107
|
log2(f"Reason: {reason}")
|
|
107
108
|
|
|
108
|
-
return 0
|
|
109
|
+
return 0, log_file
|
|
109
110
|
|
|
110
111
|
def query(sql: str, database: str = None, function: str = 'audit') -> tuple[str, str, list]:
|
|
111
112
|
region_name = Config().get(f'{function}.athena.region', 'us-west-2')
|
adam/utils_audits.py
CHANGED
|
@@ -4,7 +4,8 @@ import time
|
|
|
4
4
|
import requests
|
|
5
5
|
|
|
6
6
|
from adam.config import Config
|
|
7
|
-
from adam.utils import log2
|
|
7
|
+
from adam.utils import OffloadHandler, debug, log2, log_exc, offload
|
|
8
|
+
from adam.utils_athena import Athena
|
|
8
9
|
from adam.utils_net import get_my_host
|
|
9
10
|
|
|
10
11
|
class AuditMeta:
|
|
@@ -32,7 +33,7 @@ class Audits:
|
|
|
32
33
|
try:
|
|
33
34
|
response = requests.post(audit_endpoint, json=payload, timeout=Config().get("audit.timeout", 10))
|
|
34
35
|
if response.status_code in [200, 201]:
|
|
35
|
-
|
|
36
|
+
debug(response.text)
|
|
36
37
|
else:
|
|
37
38
|
log2(f"Error: {response.status_code} {response.text}")
|
|
38
39
|
except requests.exceptions.Timeout as e:
|
|
@@ -42,15 +43,13 @@ class Audits:
|
|
|
42
43
|
checked_in = 0.0
|
|
43
44
|
cluster_last_checked = 0.0
|
|
44
45
|
|
|
45
|
-
state, _, rs =
|
|
46
|
+
state, _, rs = Athena.query(f'select partitions_last_checked, clusters_last_checked from meta')
|
|
46
47
|
if state == 'SUCCEEDED':
|
|
47
48
|
if len(rs) > 1:
|
|
48
|
-
|
|
49
|
+
with log_exc():
|
|
49
50
|
row = rs[1]['Data']
|
|
50
51
|
checked_in = float(row[0]['VarCharValue'])
|
|
51
52
|
cluster_last_checked = float(row[1]['VarCharValue'])
|
|
52
|
-
except:
|
|
53
|
-
pass
|
|
54
53
|
|
|
55
54
|
return AuditMeta(checked_in, cluster_last_checked)
|
|
56
55
|
|
|
@@ -67,7 +66,7 @@ class Audits:
|
|
|
67
66
|
try:
|
|
68
67
|
response = requests.post(audit_endpoint, json=payload, timeout=Config().get("audit.timeout", 10))
|
|
69
68
|
if response.status_code in [200, 201]:
|
|
70
|
-
|
|
69
|
+
debug(response.text)
|
|
71
70
|
else:
|
|
72
71
|
log2(f"Error: {response.status_code} {response.text}")
|
|
73
72
|
except requests.exceptions.Timeout as e:
|
|
@@ -84,13 +83,11 @@ class Audits:
|
|
|
84
83
|
f'(select distinct c as name from audit where {Audits.date_from(dt_object)}) as c2',
|
|
85
84
|
'on c1.name = c2.name where c1.name is null'])
|
|
86
85
|
log2(query)
|
|
87
|
-
state, _, rs =
|
|
86
|
+
state, _, rs = Athena.query(query)
|
|
88
87
|
if state == 'SUCCEEDED':
|
|
89
88
|
if len(rs) > 1:
|
|
90
|
-
|
|
89
|
+
with log_exc():
|
|
91
90
|
return [r['Data'][0]['VarCharValue'] for r in rs[1:]]
|
|
92
|
-
except:
|
|
93
|
-
pass
|
|
94
91
|
|
|
95
92
|
return []
|
|
96
93
|
|
|
@@ -99,4 +96,7 @@ class Audits:
|
|
|
99
96
|
m = dt_object.strftime("%m")
|
|
100
97
|
d = dt_object.strftime("%d")
|
|
101
98
|
|
|
102
|
-
return f"y = '{y}' and m = '{m}' and d >= '{d}' or y = '{y}' and m > '{m}' or y > '{y}'"
|
|
99
|
+
return f"y = '{y}' and m = '{m}' and d >= '{d}' or y = '{y}' and m > '{m}' or y > '{y}'"
|
|
100
|
+
|
|
101
|
+
def offload() -> OffloadHandler:
|
|
102
|
+
return offload(max_workers=Config().get('audit.workers', 3))
|
adam/utils_issues.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from adam.checks.check_result import CheckResult
|
|
2
|
+
from adam.checks.issue import Issue
|
|
3
|
+
from adam.repl_session import ReplSession
|
|
4
|
+
from adam.utils import lines_to_tabular, log, log2
|
|
5
|
+
|
|
6
|
+
class IssuesUtils:
|
|
7
|
+
def show(check_results: list[CheckResult], in_repl = False):
|
|
8
|
+
IssuesUtils.show_issues(CheckResult.collect_issues(check_results), in_repl=in_repl)
|
|
9
|
+
|
|
10
|
+
def show_issues(issues: list[Issue], in_repl = False):
|
|
11
|
+
if not issues:
|
|
12
|
+
log2('No issues found.')
|
|
13
|
+
else:
|
|
14
|
+
suggested = 0
|
|
15
|
+
log2(f'* {len(issues)} issues found.')
|
|
16
|
+
lines = []
|
|
17
|
+
for i, issue in enumerate(issues, start=1):
|
|
18
|
+
lines.append(f"{i}||{issue.category}||{issue.desc}")
|
|
19
|
+
lines.append(f"||statefulset||{issue.statefulset}@{issue.namespace}")
|
|
20
|
+
lines.append(f"||pod||{issue.pod}@{issue.namespace}")
|
|
21
|
+
if issue.details:
|
|
22
|
+
lines.append(f"||details||{issue.details}")
|
|
23
|
+
|
|
24
|
+
if issue.suggestion:
|
|
25
|
+
lines.append(f'||suggestion||{issue.suggestion}')
|
|
26
|
+
if in_repl:
|
|
27
|
+
ReplSession().prompt_session.history.append_string(issue.suggestion)
|
|
28
|
+
suggested += 1
|
|
29
|
+
log(lines_to_tabular(lines, separator='||'))
|
|
30
|
+
if suggested:
|
|
31
|
+
log2()
|
|
32
|
+
log2(f'* {suggested} suggested commands are added to history. Press <Up> arrow to access them.')
|
adam/utils_k8s/app_clusters.py
CHANGED
|
@@ -1,10 +1,9 @@
|
|
|
1
|
-
from concurrent.futures import ThreadPoolExecutor
|
|
2
1
|
import sys
|
|
3
2
|
from typing import TypeVar
|
|
4
3
|
|
|
5
4
|
from adam.utils_k8s.app_pods import AppPods
|
|
6
5
|
from adam.pod_exec_result import PodExecResult
|
|
7
|
-
from adam.utils import log2
|
|
6
|
+
from adam.utils import log, log2
|
|
8
7
|
from adam.utils_k8s.pods import Pods
|
|
9
8
|
from .kube_context import KubeContext
|
|
10
9
|
|
|
@@ -14,20 +13,16 @@ T = TypeVar('T')
|
|
|
14
13
|
class AppClusters:
|
|
15
14
|
def exec(pods: list[str], namespace: str, command: str, action: str = 'action',
|
|
16
15
|
max_workers=0, show_out=True, on_any = False, shell = '/bin/sh', background = False) -> list[PodExecResult]:
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
16
|
+
samples = 1 if on_any else sys.maxsize
|
|
17
|
+
msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
|
|
18
|
+
with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
|
|
19
|
+
results: list[PodExecResult] = exec.map(lambda pod: AppPods.exec(pod, namespace, command, False, False, shell, background))
|
|
20
|
+
for result in results:
|
|
21
|
+
if KubeContext.show_out(show_out):
|
|
22
|
+
log(result.command)
|
|
23
|
+
if result.stdout:
|
|
24
|
+
log(result.stdout)
|
|
25
|
+
if result.stderr:
|
|
26
|
+
log2(result.stderr, file=sys.stderr)
|
|
20
27
|
|
|
21
|
-
return
|
|
22
|
-
|
|
23
|
-
def post(result, show_out: bool):
|
|
24
|
-
if KubeContext.show_out(show_out):
|
|
25
|
-
print(result.command)
|
|
26
|
-
if result.stdout:
|
|
27
|
-
print(result.stdout)
|
|
28
|
-
if result.stderr:
|
|
29
|
-
log2(result.stderr, file=sys.stderr)
|
|
30
|
-
|
|
31
|
-
return result
|
|
32
|
-
|
|
33
|
-
return Pods.on_pods(pods, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out, on_any=on_any, background=background)
|
|
28
|
+
return results
|
adam/utils_k8s/app_pods.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import functools
|
|
1
2
|
from typing import List
|
|
2
3
|
from kubernetes import client
|
|
3
4
|
|
|
@@ -8,6 +9,7 @@ from adam.repl_session import ReplSession
|
|
|
8
9
|
|
|
9
10
|
# utility collection on app pods; methods are all static
|
|
10
11
|
class AppPods:
|
|
12
|
+
@functools.lru_cache()
|
|
11
13
|
def pod_names(namespace: str, env: str, app: str):
|
|
12
14
|
return [pod.metadata.name for pod in AppPods.app_pods(namespace, env, app)]
|
|
13
15
|
|
|
@@ -1,33 +1,36 @@
|
|
|
1
|
-
from concurrent.futures import ThreadPoolExecutor
|
|
2
1
|
import sys
|
|
3
2
|
from typing import TypeVar
|
|
4
3
|
|
|
4
|
+
from adam.config import Config
|
|
5
5
|
from adam.utils_k8s.cassandra_nodes import CassandraNodes
|
|
6
6
|
from adam.pod_exec_result import PodExecResult
|
|
7
|
-
from adam.utils import log2
|
|
8
|
-
from .
|
|
9
|
-
from .
|
|
7
|
+
from adam.utils import log, log2
|
|
8
|
+
from adam.utils_k8s.pods import Pods
|
|
9
|
+
from adam.utils_k8s.statefulsets import StatefulSets
|
|
10
10
|
|
|
11
11
|
T = TypeVar('T')
|
|
12
12
|
|
|
13
13
|
# utility collection on cassandra clusters; methods are all static
|
|
14
14
|
class CassandraClusters:
|
|
15
|
-
def exec(
|
|
15
|
+
def exec(sts: str, namespace: str, command: str, action: str = 'action',
|
|
16
16
|
max_workers=0, show_out=True, on_any = False, shell = '/bin/sh', background = False, log_file = None) -> list[PodExecResult]:
|
|
17
|
-
def body(executor: ThreadPoolExecutor, pod: str, namespace: str, show_out: bool):
|
|
18
|
-
if executor:
|
|
19
|
-
return executor.submit(CassandraNodes.exec, pod, namespace, command, False, False, shell, background, log_file)
|
|
20
17
|
|
|
21
|
-
|
|
18
|
+
pods = StatefulSets.pod_names(sts, namespace)
|
|
19
|
+
samples = 1 if on_any else sys.maxsize
|
|
20
|
+
msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
|
|
21
|
+
with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
|
|
22
|
+
results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell, background, log_file))
|
|
23
|
+
for result in results:
|
|
24
|
+
if show_out and not Config().is_debug():
|
|
25
|
+
log(result.command)
|
|
26
|
+
if result.stdout:
|
|
27
|
+
log(result.stdout)
|
|
28
|
+
if result.stderr:
|
|
29
|
+
log2(result.stderr, file=sys.stderr)
|
|
22
30
|
|
|
23
|
-
|
|
24
|
-
if KubeContext.show_out(show_out):
|
|
25
|
-
print(result.command)
|
|
26
|
-
if result.stdout:
|
|
27
|
-
print(result.stdout)
|
|
28
|
-
if result.stderr:
|
|
29
|
-
log2(result.stderr, file=sys.stderr)
|
|
31
|
+
return results
|
|
30
32
|
|
|
31
|
-
|
|
33
|
+
def pod_names_by_host_id(sts: str, ns: str):
|
|
34
|
+
pods = StatefulSets.pods(sts, ns)
|
|
32
35
|
|
|
33
|
-
return
|
|
36
|
+
return {CassandraNodes.get_host_id(pod.metadata.name, ns): pod.metadata.name for pod in pods}
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
+
import functools
|
|
1
2
|
import re
|
|
2
|
-
import time
|
|
3
3
|
from kubernetes import client
|
|
4
4
|
|
|
5
5
|
from adam.config import Config
|
|
6
6
|
from .kube_context import KubeContext
|
|
7
|
-
from adam.utils import
|
|
7
|
+
from adam.utils import log2, log_exc
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
# utility collection; methods are all static
|
|
@@ -19,14 +19,12 @@ class CustomResources:
|
|
|
19
19
|
strip = Config().get('app.strip', '0')
|
|
20
20
|
|
|
21
21
|
v1 = client.CustomObjectsApi()
|
|
22
|
-
|
|
22
|
+
with log_exc():
|
|
23
23
|
c3cassandras = v1.list_cluster_custom_object(group=group, version=v, plural=plural)
|
|
24
24
|
for c in c3cassandras.items():
|
|
25
25
|
if c[0] == 'items':
|
|
26
26
|
for item in c[1]:
|
|
27
27
|
app_ids_by_ss[f"{item['metadata']['name']}@{item['metadata']['namespace']}"] = item['metadata']['labels'][label].strip(strip)
|
|
28
|
-
except Exception:
|
|
29
|
-
pass
|
|
30
28
|
|
|
31
29
|
return app_ids_by_ss
|
|
32
30
|
|
|
@@ -121,11 +119,10 @@ class CustomResources:
|
|
|
121
119
|
body = bkspecs
|
|
122
120
|
pretty = 'true'
|
|
123
121
|
|
|
124
|
-
|
|
122
|
+
with log_exc(lambda e: "Exception when calling create_medusa_backupjob.create_namespaced_custom_object: %s\n" % e):
|
|
125
123
|
api_instance.create_namespaced_custom_object(group, version, namespace, plural, body, pretty=pretty)
|
|
126
124
|
log2(f"create_medusa_backupjob: created Full Backup {bkname}: {api_instance}")
|
|
127
|
-
|
|
128
|
-
log2("Exception when calling create_medusa_backupjob.create_namespaced_custom_object: %s\n" % e)
|
|
125
|
+
|
|
129
126
|
return None
|
|
130
127
|
|
|
131
128
|
def create_medusa_restorejob(restorejobname: str, bkname: str, dc: str, ns: str):
|
|
@@ -154,11 +151,10 @@ class CustomResources:
|
|
|
154
151
|
body = rtspecs
|
|
155
152
|
pretty = 'true'
|
|
156
153
|
|
|
157
|
-
|
|
154
|
+
with log_exc(lambda e: "Exception when calling create_medusa_restorejob.create_namespaced_custom_object: %s\n" % e):
|
|
158
155
|
api_instance.create_namespaced_custom_object(group, version, namespace, plural, body, pretty=pretty)
|
|
159
156
|
log2(f"create_medusa_restorejob: created Restore Job {restorejobname}: {api_instance}")
|
|
160
|
-
|
|
161
|
-
log2("Exception when calling create_medusa_restorejob.create_namespaced_custom_object: %s\n" % e)
|
|
157
|
+
|
|
162
158
|
return None
|
|
163
159
|
|
|
164
160
|
def medusa_show_backup_names(dc: str, ns: str) -> list[dict]:
|
|
@@ -171,6 +167,10 @@ class CustomResources:
|
|
|
171
167
|
|
|
172
168
|
return None
|
|
173
169
|
|
|
170
|
+
def clear_caches():
|
|
171
|
+
CustomResources.medusa_show_backupjobs.cache_clear()
|
|
172
|
+
|
|
173
|
+
@functools.lru_cache()
|
|
174
174
|
def medusa_show_backupjobs(dc: str, ns: str) -> list[dict]:
|
|
175
175
|
api_instance = client.CustomObjectsApi()
|
|
176
176
|
group = 'medusa.k8ssandra.io'
|
|
@@ -180,11 +180,10 @@ class CustomResources:
|
|
|
180
180
|
pretty = 'true'
|
|
181
181
|
label_selector = 'cassandra.datastax.com/datacenter=' + dc
|
|
182
182
|
|
|
183
|
-
|
|
183
|
+
with log_exc(lambda e: "Exception when calling medusa_show_backupjobs.list_namespaced_custom_object: %s\n" % e):
|
|
184
184
|
api_response = api_instance.list_namespaced_custom_object(group, version, namespace, plural, pretty=pretty, label_selector=label_selector)
|
|
185
185
|
return api_response['items']
|
|
186
|
-
|
|
187
|
-
log2("Exception when calling medusa_show_backupjobs.list_namespaced_custom_object: %s\n" % e)
|
|
186
|
+
|
|
188
187
|
return None
|
|
189
188
|
|
|
190
189
|
def medusa_show_restorejobs(dc: str, ns: str):
|
|
@@ -196,11 +195,11 @@ class CustomResources:
|
|
|
196
195
|
pretty = 'true'
|
|
197
196
|
label_selector = 'cassandra.datastax.com/datacenter=' + dc
|
|
198
197
|
rtlist = []
|
|
199
|
-
|
|
198
|
+
|
|
199
|
+
with log_exc(lambda e: "Exception when calling medusa_show_restorejobs.list_namespaced_custom_object: %s\n" % e):
|
|
200
200
|
api_response = api_instance.list_namespaced_custom_object(group, version, namespace, plural, pretty=pretty, label_selector=label_selector)
|
|
201
201
|
for x in api_response['items']:
|
|
202
202
|
rtlist.append(f"{x['metadata']['name']}\t{x['metadata']['creationTimestamp']}\t{x['status'].get('finishTime', '')}")
|
|
203
203
|
return rtlist
|
|
204
|
-
|
|
205
|
-
log2("Exception when calling medusa_show_restorejobs.list_namespaced_custom_object: %s\n" % e)
|
|
204
|
+
|
|
206
205
|
return None
|
adam/utils_k8s/ingresses.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from kubernetes import client
|
|
2
2
|
|
|
3
3
|
from adam.config import Config
|
|
4
|
-
from adam.utils import log2
|
|
4
|
+
from adam.utils import debug, log2
|
|
5
5
|
|
|
6
6
|
# utility collection on ingresses; methods are all static
|
|
7
7
|
class Ingresses:
|
|
@@ -63,7 +63,7 @@ class Ingresses:
|
|
|
63
63
|
log2(f"200 Ingress '{name}' in namespace '{namespace}' deleted successfully.")
|
|
64
64
|
else:
|
|
65
65
|
api.delete_namespaced_ingress(name=name, namespace=namespace)
|
|
66
|
-
|
|
66
|
+
debug(f"200 Ingress '{name}' in namespace '{namespace}' deleted successfully.")
|
|
67
67
|
except client.ApiException as e:
|
|
68
68
|
log2(f"Error deleting Ingress: {e}")
|
|
69
69
|
|
adam/utils_k8s/jobs.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from kubernetes import client
|
|
2
2
|
from time import sleep
|
|
3
3
|
from .pods import Pods
|
|
4
|
-
from adam.utils import log2
|
|
4
|
+
from adam.utils import log2, log_exc
|
|
5
5
|
|
|
6
6
|
# utility collection on jobs; methods are all static
|
|
7
7
|
class Jobs:
|
|
@@ -20,11 +20,10 @@ class Jobs:
|
|
|
20
20
|
metadata=client.V1ObjectMeta(name=job_name),
|
|
21
21
|
spec=spec)
|
|
22
22
|
|
|
23
|
-
|
|
23
|
+
with log_exc(lambda e: "Exception when calling BatchV1Apii->create_namespaced_job: %s\n" % e):
|
|
24
24
|
client.BatchV1Api().create_namespaced_job(body=job, namespace=namespace)
|
|
25
25
|
log2(f"Job {job_name} created in {namespace}")
|
|
26
|
-
|
|
27
|
-
log2("Exception when calling BatchV1Apii->create_namespaced_job: %s\n" % e)
|
|
26
|
+
|
|
28
27
|
return
|
|
29
28
|
|
|
30
29
|
def get_job_pods(job_name: str, namespace: str):
|
|
@@ -32,7 +31,7 @@ class Jobs:
|
|
|
32
31
|
return pods
|
|
33
32
|
|
|
34
33
|
def delete(job_name: str, namespace: str, wait=True):
|
|
35
|
-
|
|
34
|
+
with log_exc(lambda e: "Exception when calling BatchV1Apii->delete_namespaced_job: %s\n" % e):
|
|
36
35
|
client.BatchV1Api().delete_namespaced_job(name=job_name, namespace=namespace, propagation_policy='Background')
|
|
37
36
|
if wait:
|
|
38
37
|
while True:
|
|
@@ -41,14 +40,11 @@ class Jobs:
|
|
|
41
40
|
return
|
|
42
41
|
sleep(5)
|
|
43
42
|
log2(f"Job {job_name} in {namespace} deleted.")
|
|
44
|
-
|
|
45
|
-
log2("Exception when calling BatchV1Apii->delete_namespaced_job: %s\n" % e)
|
|
43
|
+
|
|
46
44
|
return
|
|
47
45
|
|
|
48
46
|
def get_logs(job_name: str, namespace: str):
|
|
49
47
|
v1 = client.CoreV1Api()
|
|
50
|
-
|
|
48
|
+
with log_exc(lambda e: "Exception when calling CorV1Apii->list_namespaced_pod, cannot find job pod: %s\n" % e):
|
|
51
49
|
pod_name = Jobs.get_job_pods(job_name, namespace).items[0].metadata.name
|
|
52
|
-
log2(v1.read_namespaced_pod_log(name=pod_name, namespace=namespace))
|
|
53
|
-
except Exception as e:
|
|
54
|
-
log2("Exception when calling CorV1Apii->list_namespaced_pod, cannot find job pod: %s\n" % e)
|
|
50
|
+
log2(v1.read_namespaced_pod_log(name=pod_name, namespace=namespace))
|
adam/utils_k8s/k8s.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
import re
|
|
3
|
+
import portforward
|
|
4
|
+
|
|
5
|
+
from adam.commands.command import InvalidStateException
|
|
6
|
+
from adam.repl_state import ReplState
|
|
7
|
+
from adam.utils import log2
|
|
8
|
+
from adam.utils_k8s.kube_context import KubeContext
|
|
9
|
+
|
|
10
|
+
class PortForwardHandler:
|
|
11
|
+
connections: dict[str, int] = {}
|
|
12
|
+
|
|
13
|
+
def __init__(self, state: ReplState, local_port: int, svc_or_pod: Callable[[bool],str], target_port: int):
|
|
14
|
+
self.state = state
|
|
15
|
+
self.local_port = local_port
|
|
16
|
+
self.svc_or_pod = svc_or_pod
|
|
17
|
+
self.target_port = target_port
|
|
18
|
+
self.forward_connection = None
|
|
19
|
+
self.pod = None
|
|
20
|
+
|
|
21
|
+
def __enter__(self) -> tuple[str, str]:
|
|
22
|
+
state = self.state
|
|
23
|
+
|
|
24
|
+
if not self.svc_or_pod:
|
|
25
|
+
log2('No service or pod found.')
|
|
26
|
+
|
|
27
|
+
raise InvalidStateException(state)
|
|
28
|
+
|
|
29
|
+
if KubeContext.in_cluster():
|
|
30
|
+
svc_name = self.svc_or_pod(True)
|
|
31
|
+
if not svc_name:
|
|
32
|
+
log2('No service found.')
|
|
33
|
+
|
|
34
|
+
raise InvalidStateException(state)
|
|
35
|
+
|
|
36
|
+
# cs-a526330d23-cs-a526330d23-default-sts-0 ->
|
|
37
|
+
# curl http://cs-a526330d23-cs-a526330d23-reaper-service.stgawsscpsr.svc.cluster.local:8080
|
|
38
|
+
groups = re.match(r'^(.*?-.*?-.*?-.*?-).*', state.sts)
|
|
39
|
+
if groups:
|
|
40
|
+
svc = f'{groups[1]}{svc_name}.{state.namespace}.svc.cluster.local:{self.target_port}'
|
|
41
|
+
return (svc, svc)
|
|
42
|
+
else:
|
|
43
|
+
raise InvalidStateException(state)
|
|
44
|
+
else:
|
|
45
|
+
pod = self.svc_or_pod(False)
|
|
46
|
+
if not pod:
|
|
47
|
+
log2('No pod found.')
|
|
48
|
+
|
|
49
|
+
raise InvalidStateException(state)
|
|
50
|
+
|
|
51
|
+
self.pod = pod
|
|
52
|
+
self.forward_connection = portforward.forward(state.namespace, pod, self.local_port, self.target_port)
|
|
53
|
+
if self.inc_connection_cnt() == 1:
|
|
54
|
+
self.forward_connection.__enter__()
|
|
55
|
+
|
|
56
|
+
return (f'localhost:{self.local_port}', f'{pod}:{self.target_port}')
|
|
57
|
+
|
|
58
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
59
|
+
if self.forward_connection:
|
|
60
|
+
if not self.dec_connection_cnt():
|
|
61
|
+
return self.forward_connection.__exit__(exc_type, exc_val, exc_tb)
|
|
62
|
+
|
|
63
|
+
return False
|
|
64
|
+
|
|
65
|
+
def inc_connection_cnt(self):
|
|
66
|
+
id = self.connection_id(self.pod)
|
|
67
|
+
if id not in PortForwardHandler.connections:
|
|
68
|
+
PortForwardHandler.connections[id] = 1
|
|
69
|
+
else:
|
|
70
|
+
PortForwardHandler.connections[id] += 1
|
|
71
|
+
|
|
72
|
+
return PortForwardHandler.connections[id]
|
|
73
|
+
|
|
74
|
+
def dec_connection_cnt(self):
|
|
75
|
+
id = self.connection_id(self.pod)
|
|
76
|
+
if id not in PortForwardHandler.connections:
|
|
77
|
+
PortForwardHandler.connections[id] = 0
|
|
78
|
+
elif PortForwardHandler.connections[id] > 0:
|
|
79
|
+
PortForwardHandler.connections[id] -= 1
|
|
80
|
+
|
|
81
|
+
return PortForwardHandler.connections[id]
|
|
82
|
+
|
|
83
|
+
def connection_id(self, pod: str):
|
|
84
|
+
return f'{self.local_port}:{pod}:{self.target_port}'
|
|
85
|
+
|
|
86
|
+
def port_forwarding(state: ReplState, local_port: int, svc_or_pod: Callable[[bool],str], target_port: int):
|
|
87
|
+
return PortForwardHandler(state, local_port, svc_or_pod, target_port)
|