kaqing 2.0.98__py3-none-any.whl → 2.0.171__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- adam/__init__.py +0 -2
- adam/app_session.py +9 -7
- adam/batch.py +4 -18
- adam/checks/check_utils.py +14 -46
- adam/checks/cpu.py +7 -1
- adam/checks/cpu_metrics.py +52 -0
- adam/columns/columns.py +3 -1
- adam/columns/cpu.py +3 -1
- adam/columns/cpu_metrics.py +22 -0
- adam/commands/__init__.py +15 -0
- adam/commands/alter_tables.py +50 -61
- adam/commands/app_cmd.py +38 -0
- adam/commands/app_ping.py +8 -14
- adam/commands/audit/audit.py +43 -30
- adam/commands/audit/audit_repair_tables.py +26 -46
- adam/commands/audit/audit_run.py +50 -0
- adam/commands/audit/show_last10.py +48 -0
- adam/commands/audit/show_slow10.py +47 -0
- adam/commands/audit/show_top10.py +45 -0
- adam/commands/audit/utils_show_top10.py +59 -0
- adam/commands/bash/__init__.py +5 -0
- adam/commands/bash/bash.py +36 -0
- adam/commands/bash/bash_completer.py +93 -0
- adam/commands/bash/utils_bash.py +16 -0
- adam/commands/cat.py +50 -0
- adam/commands/cd.py +15 -91
- adam/commands/check.py +23 -18
- adam/commands/cli_commands.py +2 -3
- adam/commands/code.py +57 -0
- adam/commands/command.py +96 -40
- adam/commands/commands_utils.py +9 -19
- adam/commands/cp.py +33 -39
- adam/commands/cql/cql_completions.py +30 -8
- adam/commands/cql/cqlsh.py +12 -27
- adam/commands/cql/utils_cql.py +343 -0
- adam/commands/deploy/code_start.py +7 -10
- adam/commands/deploy/code_stop.py +4 -21
- adam/commands/deploy/code_utils.py +3 -3
- adam/commands/deploy/deploy.py +4 -21
- adam/commands/deploy/deploy_frontend.py +14 -17
- adam/commands/deploy/deploy_pg_agent.py +3 -6
- adam/commands/deploy/deploy_pod.py +67 -73
- adam/commands/deploy/deploy_utils.py +14 -24
- adam/commands/deploy/undeploy.py +4 -21
- adam/commands/deploy/undeploy_frontend.py +4 -7
- adam/commands/deploy/undeploy_pg_agent.py +6 -8
- adam/commands/deploy/undeploy_pod.py +11 -12
- adam/commands/devices/device.py +118 -0
- adam/commands/devices/device_app.py +173 -0
- adam/commands/devices/device_auit_log.py +49 -0
- adam/commands/devices/device_cass.py +185 -0
- adam/commands/devices/device_export.py +86 -0
- adam/commands/devices/device_postgres.py +144 -0
- adam/commands/devices/devices.py +25 -0
- adam/commands/exit.py +1 -4
- adam/commands/export/__init__.py +0 -0
- adam/commands/export/clean_up_all_export_sessions.py +37 -0
- adam/commands/export/clean_up_export_sessions.py +51 -0
- adam/commands/export/drop_export_database.py +55 -0
- adam/commands/export/drop_export_databases.py +43 -0
- adam/commands/export/export.py +53 -0
- adam/commands/export/export_databases.py +170 -0
- adam/commands/export/export_handlers.py +71 -0
- adam/commands/export/export_select.py +81 -0
- adam/commands/export/export_select_x.py +54 -0
- adam/commands/export/export_use.py +52 -0
- adam/commands/export/exporter.py +352 -0
- adam/commands/export/import_session.py +40 -0
- adam/commands/export/importer.py +67 -0
- adam/commands/export/importer_athena.py +80 -0
- adam/commands/export/importer_sqlite.py +47 -0
- adam/commands/export/show_column_counts.py +54 -0
- adam/commands/export/show_export_databases.py +36 -0
- adam/commands/export/show_export_session.py +48 -0
- adam/commands/export/show_export_sessions.py +44 -0
- adam/commands/export/utils_export.py +314 -0
- adam/commands/help.py +10 -6
- adam/commands/intermediate_command.py +49 -0
- adam/commands/issues.py +14 -40
- adam/commands/kubectl.py +38 -0
- adam/commands/login.py +28 -24
- adam/commands/logs.py +4 -6
- adam/commands/ls.py +11 -116
- adam/commands/medusa/medusa.py +4 -22
- adam/commands/medusa/medusa_backup.py +20 -24
- adam/commands/medusa/medusa_restore.py +30 -32
- adam/commands/medusa/medusa_show_backupjobs.py +16 -17
- adam/commands/medusa/medusa_show_restorejobs.py +12 -17
- adam/commands/nodetool.py +11 -17
- adam/commands/param_get.py +11 -12
- adam/commands/param_set.py +9 -10
- adam/commands/postgres/postgres.py +43 -36
- adam/commands/postgres/{postgres_session.py → postgres_context.py} +80 -46
- adam/commands/postgres/postgres_ls.py +4 -8
- adam/commands/postgres/postgres_preview.py +5 -9
- adam/commands/postgres/psql_completions.py +2 -2
- adam/commands/postgres/utils_postgres.py +66 -0
- adam/commands/preview_table.py +8 -61
- adam/commands/pwd.py +14 -44
- adam/commands/reaper/reaper.py +4 -24
- adam/commands/reaper/reaper_forward.py +48 -55
- adam/commands/reaper/reaper_forward_session.py +6 -0
- adam/commands/reaper/reaper_forward_stop.py +10 -16
- adam/commands/reaper/reaper_restart.py +7 -14
- adam/commands/reaper/reaper_run_abort.py +11 -30
- adam/commands/reaper/reaper_runs.py +42 -57
- adam/commands/reaper/reaper_runs_abort.py +29 -49
- adam/commands/reaper/reaper_schedule_activate.py +11 -30
- adam/commands/reaper/reaper_schedule_start.py +10 -29
- adam/commands/reaper/reaper_schedule_stop.py +10 -29
- adam/commands/reaper/reaper_schedules.py +4 -14
- adam/commands/reaper/reaper_status.py +8 -16
- adam/commands/reaper/utils_reaper.py +196 -0
- adam/commands/repair/repair.py +4 -22
- adam/commands/repair/repair_log.py +4 -7
- adam/commands/repair/repair_run.py +27 -29
- adam/commands/repair/repair_scan.py +31 -34
- adam/commands/repair/repair_stop.py +4 -7
- adam/commands/report.py +25 -21
- adam/commands/restart.py +25 -26
- adam/commands/rollout.py +19 -24
- adam/commands/shell.py +5 -4
- adam/commands/show/show.py +6 -19
- adam/commands/show/show_app_actions.py +26 -22
- adam/commands/show/show_app_id.py +8 -11
- adam/commands/show/show_app_queues.py +7 -10
- adam/commands/show/{show_repairs.py → show_cassandra_repairs.py} +8 -17
- adam/commands/show/show_cassandra_status.py +29 -33
- adam/commands/show/show_cassandra_version.py +4 -14
- adam/commands/show/show_commands.py +19 -21
- adam/commands/show/show_host.py +1 -1
- adam/commands/show/show_login.py +26 -24
- adam/commands/show/show_processes.py +16 -18
- adam/commands/show/show_storage.py +10 -20
- adam/commands/watch.py +26 -29
- adam/config.py +5 -14
- adam/embedded_params.py +1 -1
- adam/pod_exec_result.py +7 -1
- adam/repl.py +95 -131
- adam/repl_commands.py +48 -20
- adam/repl_state.py +270 -61
- adam/sql/sql_completer.py +105 -63
- adam/sql/sql_state_machine.py +618 -0
- adam/sql/term_completer.py +3 -0
- adam/sso/authn_ad.py +6 -5
- adam/sso/authn_okta.py +3 -3
- adam/sso/cred_cache.py +3 -2
- adam/sso/idp.py +3 -3
- adam/utils.py +439 -3
- adam/utils_app.py +98 -0
- adam/utils_athena.py +140 -87
- adam/utils_audits.py +106 -0
- adam/utils_issues.py +32 -0
- adam/utils_k8s/app_clusters.py +28 -0
- adam/utils_k8s/app_pods.py +33 -0
- adam/utils_k8s/cassandra_clusters.py +22 -20
- adam/utils_k8s/cassandra_nodes.py +4 -4
- adam/utils_k8s/custom_resources.py +5 -0
- adam/utils_k8s/ingresses.py +2 -2
- adam/utils_k8s/k8s.py +87 -0
- adam/utils_k8s/pods.py +77 -68
- adam/utils_k8s/secrets.py +4 -4
- adam/utils_k8s/service_accounts.py +5 -4
- adam/utils_k8s/services.py +2 -2
- adam/utils_k8s/statefulsets.py +1 -12
- adam/utils_net.py +4 -4
- adam/utils_repl/__init__.py +0 -0
- adam/utils_repl/automata_completer.py +48 -0
- adam/utils_repl/repl_completer.py +46 -0
- adam/utils_repl/state_machine.py +173 -0
- adam/utils_sqlite.py +109 -0
- adam/version.py +1 -1
- {kaqing-2.0.98.dist-info → kaqing-2.0.171.dist-info}/METADATA +1 -1
- kaqing-2.0.171.dist-info/RECORD +236 -0
- adam/commands/app.py +0 -67
- adam/commands/bash.py +0 -92
- adam/commands/cql/cql_table_completer.py +0 -8
- adam/commands/cql/cql_utils.py +0 -115
- adam/commands/describe/describe.py +0 -47
- adam/commands/describe/describe_keyspace.py +0 -60
- adam/commands/describe/describe_keyspaces.py +0 -49
- adam/commands/describe/describe_schema.py +0 -49
- adam/commands/describe/describe_table.py +0 -60
- adam/commands/describe/describe_tables.py +0 -49
- adam/commands/devices.py +0 -118
- adam/commands/postgres/postgres_utils.py +0 -31
- adam/commands/postgres/psql_table_completer.py +0 -11
- adam/commands/reaper/reaper_session.py +0 -159
- adam/sql/state_machine.py +0 -460
- kaqing-2.0.98.dist-info/RECORD +0 -191
- /adam/commands/{describe → devices}/__init__.py +0 -0
- {kaqing-2.0.98.dist-info → kaqing-2.0.171.dist-info}/WHEEL +0 -0
- {kaqing-2.0.98.dist-info → kaqing-2.0.171.dist-info}/entry_points.txt +0 -0
- {kaqing-2.0.98.dist-info → kaqing-2.0.171.dist-info}/top_level.txt +0 -0
adam/utils_app.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Union
|
|
3
|
+
|
|
4
|
+
from adam.app_session import AppSession
|
|
5
|
+
from adam.apps import Apps
|
|
6
|
+
from adam.pod_exec_result import PodExecResult
|
|
7
|
+
from adam.repl_state import ReplState
|
|
8
|
+
from adam.utils import log2
|
|
9
|
+
from adam.utils_k8s.app_clusters import AppClusters
|
|
10
|
+
from adam.utils_k8s.app_pods import AppPods
|
|
11
|
+
|
|
12
|
+
class AppRestHandler:
|
|
13
|
+
def __init__(self, state: ReplState, forced = False):
|
|
14
|
+
self.state = state
|
|
15
|
+
self.forced = forced
|
|
16
|
+
|
|
17
|
+
def __enter__(self):
|
|
18
|
+
return self.post
|
|
19
|
+
|
|
20
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
21
|
+
return False
|
|
22
|
+
|
|
23
|
+
def post(self, args: list[str]) -> Union[ReplState, str]:
|
|
24
|
+
if not args:
|
|
25
|
+
return 'arg missing'
|
|
26
|
+
|
|
27
|
+
t_f = args[0].split('.')
|
|
28
|
+
if len(t_f) < 2:
|
|
29
|
+
return 'arg missing'
|
|
30
|
+
|
|
31
|
+
state = self.state
|
|
32
|
+
|
|
33
|
+
payload, valid = Apps().payload(t_f[0], t_f[1], args[1:] if len(args) > 1 else [])
|
|
34
|
+
if not valid:
|
|
35
|
+
log2('Missing one or more action arguments.')
|
|
36
|
+
return state
|
|
37
|
+
|
|
38
|
+
if payload:
|
|
39
|
+
try:
|
|
40
|
+
payload = json.loads(payload)
|
|
41
|
+
except json.decoder.JSONDecodeError as e:
|
|
42
|
+
log2(f'Invalid json argument: {e}')
|
|
43
|
+
return state
|
|
44
|
+
|
|
45
|
+
AppSession.run(state.app_env, state.app_app, state.namespace, t_f[0], t_f[1], payload=payload, forced=self.forced)
|
|
46
|
+
|
|
47
|
+
return state
|
|
48
|
+
|
|
49
|
+
class AppPodService:
|
|
50
|
+
def __init__(self, handler: 'AppHandler'):
|
|
51
|
+
self.handler = handler
|
|
52
|
+
|
|
53
|
+
def exec(self, command: str, show_out = True) -> Union[PodExecResult, list[PodExecResult]]:
|
|
54
|
+
state = self.handler.state
|
|
55
|
+
|
|
56
|
+
if state.app_pod:
|
|
57
|
+
return AppPods.exec(state.app_pod, state.namespace, command, show_out=show_out, shell='bash')
|
|
58
|
+
elif state.app_app:
|
|
59
|
+
pods = AppPods.pod_names(state.namespace, state.app_env, state.app_app)
|
|
60
|
+
return AppClusters.exec(pods, state.namespace, command, action='bash', show_out=show_out, shell='bash')
|
|
61
|
+
|
|
62
|
+
return []
|
|
63
|
+
|
|
64
|
+
def post(self, args: list[str], forced=False) -> Union[ReplState, str]:
|
|
65
|
+
state = self.handler.state
|
|
66
|
+
|
|
67
|
+
if not args:
|
|
68
|
+
return 'arg missing'
|
|
69
|
+
|
|
70
|
+
t_f = args[0].split('.')
|
|
71
|
+
if len(t_f) < 2:
|
|
72
|
+
return 'arg missing'
|
|
73
|
+
|
|
74
|
+
payload, valid = Apps().payload(t_f[0], t_f[1], args[1:] if len(args) > 1 else [])
|
|
75
|
+
if not valid:
|
|
76
|
+
log2('Missing one or more action arguments.')
|
|
77
|
+
return state
|
|
78
|
+
|
|
79
|
+
if payload:
|
|
80
|
+
try:
|
|
81
|
+
payload = json.loads(payload)
|
|
82
|
+
except json.decoder.JSONDecodeError as e:
|
|
83
|
+
log2(f'Invalid json argument: {e}')
|
|
84
|
+
return state
|
|
85
|
+
|
|
86
|
+
AppSession.run(state.app_env, state.app_app, state.namespace, t_f[0], t_f[1], payload=payload, forced=forced)
|
|
87
|
+
|
|
88
|
+
return state
|
|
89
|
+
|
|
90
|
+
class AppHandler:
|
|
91
|
+
def __init__(self, state: ReplState):
|
|
92
|
+
self.state = state
|
|
93
|
+
|
|
94
|
+
def __enter__(self):
|
|
95
|
+
return AppPodService(self)
|
|
96
|
+
|
|
97
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
98
|
+
return False
|
adam/utils_athena.py
CHANGED
|
@@ -1,92 +1,145 @@
|
|
|
1
1
|
import functools
|
|
2
2
|
import time
|
|
3
3
|
import boto3
|
|
4
|
+
import botocore
|
|
4
5
|
|
|
5
6
|
from adam.config import Config
|
|
6
|
-
from adam.utils import lines_to_tabular, log, log2
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
7
|
+
from adam.utils import lines_to_tabular, log, log2, wait_log
|
|
8
|
+
|
|
9
|
+
# no state utility class
|
|
10
|
+
class Athena:
|
|
11
|
+
@functools.lru_cache()
|
|
12
|
+
def database_names(like: str = None):
|
|
13
|
+
# this function is called only from export currently
|
|
14
|
+
wait_log(f'Inspecting export database schema...')
|
|
15
|
+
|
|
16
|
+
query = f"SELECT schema_name FROM information_schema.schemata WHERE schema_name <> 'information_schema'"
|
|
17
|
+
if like:
|
|
18
|
+
query = f"{query} AND schema_name like '{like}'"
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
state, reason, rs = Athena.query(query)
|
|
22
|
+
if rs:
|
|
23
|
+
names = []
|
|
24
|
+
for row in rs[1:]:
|
|
25
|
+
row_data = [col.get('VarCharValue') if col else '' for col in row['Data']]
|
|
26
|
+
names.append(row_data[0])
|
|
27
|
+
|
|
28
|
+
return names
|
|
29
|
+
except:
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
return []
|
|
33
|
+
|
|
34
|
+
def clear_cache(cache: str = None):
|
|
35
|
+
if not cache or cache == 'databases':
|
|
36
|
+
Athena.database_names.cache_clear()
|
|
37
|
+
if not cache or cache == 'tables':
|
|
38
|
+
Athena.table_names.cache_clear()
|
|
39
|
+
if not cache or cache == 'columns':
|
|
40
|
+
Athena.column_names.cache_clear()
|
|
41
|
+
|
|
42
|
+
@functools.lru_cache()
|
|
43
|
+
def table_names(database: str = 'audit', function: str = 'audit'):
|
|
44
|
+
table_names = []
|
|
45
|
+
try:
|
|
46
|
+
region_name = Config().get(f'{function}.athena.region', 'us-west-2')
|
|
47
|
+
database_name = Config().get(f'{function}.athena.database', database)
|
|
48
|
+
catalog_name = Config().get(f'{function}.athena.catalog', 'AwsDataCatalog')
|
|
49
|
+
|
|
50
|
+
athena_client = boto3.client('athena', region_name=region_name)
|
|
51
|
+
paginator = athena_client.get_paginator('list_table_metadata')
|
|
52
|
+
|
|
53
|
+
for page in paginator.paginate(CatalogName=catalog_name, DatabaseName=database_name):
|
|
54
|
+
for table_metadata in page.get('TableMetadataList', []):
|
|
55
|
+
table_names.append(table_metadata['Name'])
|
|
56
|
+
except botocore.exceptions.NoCredentialsError as e:
|
|
57
|
+
# aws credentials not found
|
|
58
|
+
if function == 'audit':
|
|
59
|
+
log2(f'Please configure AWS credentials to Audit Log Database.')
|
|
60
|
+
except:
|
|
61
|
+
pass
|
|
62
|
+
|
|
63
|
+
return table_names
|
|
64
|
+
|
|
65
|
+
@functools.lru_cache()
|
|
66
|
+
def column_names(tables: list[str] = [], database: str = None, function: str = 'audit', partition_cols_only = False):
|
|
67
|
+
try:
|
|
68
|
+
if not database:
|
|
69
|
+
database = Config().get(f'{function}.athena.database', 'audit')
|
|
70
|
+
|
|
71
|
+
if not tables:
|
|
72
|
+
tables = Config().get(f'{function}.athena.tables', 'audit').split(',')
|
|
73
|
+
|
|
74
|
+
table_names = "'" + "','".join([table.strip() for table in tables]) + "'"
|
|
75
|
+
|
|
76
|
+
query = f"select column_name from information_schema.columns where table_name in ({table_names}) and table_schema = '{database}'"
|
|
77
|
+
if partition_cols_only:
|
|
78
|
+
query = f"{query} and extra_info = 'partition key'"
|
|
79
|
+
|
|
80
|
+
_, _, rs = Athena.query(query)
|
|
81
|
+
if rs:
|
|
82
|
+
return [row['Data'][0].get('VarCharValue') for row in rs[1:]]
|
|
83
|
+
except:
|
|
84
|
+
# aws credentials not found
|
|
85
|
+
pass
|
|
86
|
+
|
|
87
|
+
return []
|
|
88
|
+
|
|
89
|
+
def run_query(sql: str, database: str = None):
|
|
90
|
+
state, reason, rs = Athena.query(sql, database)
|
|
91
|
+
|
|
92
|
+
if state == 'SUCCEEDED':
|
|
93
|
+
if rs:
|
|
94
|
+
column_info = rs[0]['Data']
|
|
95
|
+
columns = [col.get('VarCharValue') for col in column_info]
|
|
96
|
+
lines = []
|
|
97
|
+
for row in rs[1:]:
|
|
98
|
+
row_data = [col.get('VarCharValue') if col else '' for col in row['Data']]
|
|
99
|
+
lines.append('\t'.join(row_data))
|
|
100
|
+
|
|
101
|
+
log(lines_to_tabular(lines, header='\t'.join(columns), separator='\t'))
|
|
102
|
+
|
|
103
|
+
return len(lines)
|
|
104
|
+
else:
|
|
105
|
+
log2(f"Query failed or was cancelled. State: {state}")
|
|
106
|
+
log2(f"Reason: {reason}")
|
|
107
|
+
|
|
108
|
+
return 0
|
|
109
|
+
|
|
110
|
+
def query(sql: str, database: str = None, function: str = 'audit') -> tuple[str, str, list]:
|
|
111
|
+
region_name = Config().get(f'{function}.athena.region', 'us-west-2')
|
|
112
|
+
athena_client = boto3.client('athena', region_name=region_name)
|
|
113
|
+
|
|
114
|
+
if not database:
|
|
115
|
+
database = Config().get(f'{function}.athena.database', 'audit')
|
|
116
|
+
|
|
117
|
+
s3_output_location = Config().get(f'{function}.athena.output', f's3://s3.ops--{function}/ddl/results')
|
|
118
|
+
|
|
119
|
+
response = athena_client.start_query_execution(
|
|
120
|
+
QueryString=sql,
|
|
121
|
+
QueryExecutionContext={
|
|
122
|
+
'Database': database
|
|
123
|
+
},
|
|
124
|
+
ResultConfiguration={
|
|
125
|
+
'OutputLocation': s3_output_location
|
|
126
|
+
}
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
query_execution_id = response['QueryExecutionId']
|
|
130
|
+
|
|
131
|
+
while True:
|
|
132
|
+
query_status = athena_client.get_query_execution(QueryExecutionId=query_execution_id)
|
|
133
|
+
state = query_status['QueryExecution']['Status']['State']
|
|
134
|
+
if state in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
|
|
135
|
+
break
|
|
136
|
+
time.sleep(1)
|
|
137
|
+
|
|
138
|
+
if state == 'SUCCEEDED':
|
|
139
|
+
results_response = athena_client.get_query_results(QueryExecutionId=query_execution_id)
|
|
140
|
+
if results_response['ResultSet']['Rows']:
|
|
141
|
+
return (state, None, results_response['ResultSet']['Rows'])
|
|
142
|
+
|
|
143
|
+
return (state, None, [])
|
|
144
|
+
else:
|
|
145
|
+
return (state, query_status['QueryExecution']['Status'].get('StateChangeReason'), [])
|
adam/utils_audits.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
import getpass
|
|
3
|
+
import time
|
|
4
|
+
import requests
|
|
5
|
+
|
|
6
|
+
from adam.config import Config
|
|
7
|
+
from adam.utils import OffloadHandler, debug, log2, offload
|
|
8
|
+
from adam.utils_athena import Athena
|
|
9
|
+
from adam.utils_net import get_my_host
|
|
10
|
+
|
|
11
|
+
class AuditMeta:
|
|
12
|
+
def __init__(self, partitions_last_checked: float, cluster_last_checked: float):
|
|
13
|
+
self.partitions_last_checked = partitions_last_checked
|
|
14
|
+
self.cluster_last_checked = cluster_last_checked
|
|
15
|
+
|
|
16
|
+
# no state utility class
|
|
17
|
+
class Audits:
|
|
18
|
+
PARTITIONS_ADDED = 'partitions-added'
|
|
19
|
+
ADD_CLUSTERS = 'add-clusters'
|
|
20
|
+
|
|
21
|
+
def log(cmd: str, cluster = 'NA', drive: str = 'NA', duration: float = 0.0, audit_extra = None):
|
|
22
|
+
payload = {
|
|
23
|
+
'cluster': cluster if cluster else 'NA',
|
|
24
|
+
'ts': time.time(),
|
|
25
|
+
'host': get_my_host(),
|
|
26
|
+
'user': getpass.getuser(),
|
|
27
|
+
'line': cmd.replace('"', '""').replace('\n', ' '),
|
|
28
|
+
'drive': drive,
|
|
29
|
+
'duration': duration,
|
|
30
|
+
'audit_extra': audit_extra if audit_extra else '',
|
|
31
|
+
}
|
|
32
|
+
audit_endpoint = Config().get("audit.endpoint", "https://4psvtaxlcb.execute-api.us-west-2.amazonaws.com/prod/")
|
|
33
|
+
try:
|
|
34
|
+
response = requests.post(audit_endpoint, json=payload, timeout=Config().get("audit.timeout", 10))
|
|
35
|
+
if response.status_code in [200, 201]:
|
|
36
|
+
debug(response.text)
|
|
37
|
+
else:
|
|
38
|
+
log2(f"Error: {response.status_code} {response.text}")
|
|
39
|
+
except requests.exceptions.Timeout as e:
|
|
40
|
+
log2(f"Timeout occurred: {e}")
|
|
41
|
+
|
|
42
|
+
def get_meta() -> AuditMeta:
|
|
43
|
+
checked_in = 0.0
|
|
44
|
+
cluster_last_checked = 0.0
|
|
45
|
+
|
|
46
|
+
state, _, rs = Athena.query(f'select partitions_last_checked, clusters_last_checked from meta')
|
|
47
|
+
if state == 'SUCCEEDED':
|
|
48
|
+
if len(rs) > 1:
|
|
49
|
+
try:
|
|
50
|
+
row = rs[1]['Data']
|
|
51
|
+
checked_in = float(row[0]['VarCharValue'])
|
|
52
|
+
cluster_last_checked = float(row[1]['VarCharValue'])
|
|
53
|
+
except:
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
return AuditMeta(checked_in, cluster_last_checked)
|
|
57
|
+
|
|
58
|
+
def put_meta(action: str, meta: AuditMeta, clusters: list[str] = None):
|
|
59
|
+
payload = {
|
|
60
|
+
'action': action,
|
|
61
|
+
'partitions-last-checked': meta.partitions_last_checked,
|
|
62
|
+
'clusters-last-checked': meta.cluster_last_checked
|
|
63
|
+
}
|
|
64
|
+
if clusters:
|
|
65
|
+
payload['clusters'] = clusters
|
|
66
|
+
|
|
67
|
+
audit_endpoint = Config().get("audit.endpoint", "https://4psvtaxlcb.execute-api.us-west-2.amazonaws.com/prod/")
|
|
68
|
+
try:
|
|
69
|
+
response = requests.post(audit_endpoint, json=payload, timeout=Config().get("audit.timeout", 10))
|
|
70
|
+
if response.status_code in [200, 201]:
|
|
71
|
+
debug(response.text)
|
|
72
|
+
else:
|
|
73
|
+
log2(f"Error: {response.status_code} {response.text}")
|
|
74
|
+
except requests.exceptions.Timeout as e:
|
|
75
|
+
log2(f"Timeout occurred: {e}")
|
|
76
|
+
|
|
77
|
+
def find_new_clusters(cluster_last_checked: float) -> list[str]:
|
|
78
|
+
dt_object = datetime.fromtimestamp(cluster_last_checked)
|
|
79
|
+
|
|
80
|
+
# select distinct c2.name from cluster as c1 right outer join
|
|
81
|
+
# (select distinct c as name from audit where y = '1969' and m = '12' and d >= '31' or y = '1969' and m > '12' or y > '1969') as c2
|
|
82
|
+
# on c1.name = c2.name where c1.name is null
|
|
83
|
+
query = '\n '.join([
|
|
84
|
+
'select distinct c2.name from cluster as c1 right outer join',
|
|
85
|
+
f'(select distinct c as name from audit where {Audits.date_from(dt_object)}) as c2',
|
|
86
|
+
'on c1.name = c2.name where c1.name is null'])
|
|
87
|
+
log2(query)
|
|
88
|
+
state, _, rs = Athena.query(query)
|
|
89
|
+
if state == 'SUCCEEDED':
|
|
90
|
+
if len(rs) > 1:
|
|
91
|
+
try:
|
|
92
|
+
return [r['Data'][0]['VarCharValue'] for r in rs[1:]]
|
|
93
|
+
except:
|
|
94
|
+
pass
|
|
95
|
+
|
|
96
|
+
return []
|
|
97
|
+
|
|
98
|
+
def date_from(dt_object: datetime):
|
|
99
|
+
y = dt_object.strftime("%Y")
|
|
100
|
+
m = dt_object.strftime("%m")
|
|
101
|
+
d = dt_object.strftime("%d")
|
|
102
|
+
|
|
103
|
+
return f"y = '{y}' and m = '{m}' and d >= '{d}' or y = '{y}' and m > '{m}' or y > '{y}'"
|
|
104
|
+
|
|
105
|
+
def offload() -> OffloadHandler:
|
|
106
|
+
return offload(max_workers=Config().get('audit.workers', 3))
|
adam/utils_issues.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from adam.checks.check_result import CheckResult
|
|
2
|
+
from adam.checks.issue import Issue
|
|
3
|
+
from adam.repl_session import ReplSession
|
|
4
|
+
from adam.utils import lines_to_tabular, log, log2
|
|
5
|
+
|
|
6
|
+
class IssuesUtils:
|
|
7
|
+
def show(check_results: list[CheckResult], in_repl = False):
|
|
8
|
+
IssuesUtils.show_issues(CheckResult.collect_issues(check_results), in_repl=in_repl)
|
|
9
|
+
|
|
10
|
+
def show_issues(issues: list[Issue], in_repl = False):
|
|
11
|
+
if not issues:
|
|
12
|
+
log2('No issues found.')
|
|
13
|
+
else:
|
|
14
|
+
suggested = 0
|
|
15
|
+
log2(f'* {len(issues)} issues found.')
|
|
16
|
+
lines = []
|
|
17
|
+
for i, issue in enumerate(issues, start=1):
|
|
18
|
+
lines.append(f"{i}||{issue.category}||{issue.desc}")
|
|
19
|
+
lines.append(f"||statefulset||{issue.statefulset}@{issue.namespace}")
|
|
20
|
+
lines.append(f"||pod||{issue.pod}@{issue.namespace}")
|
|
21
|
+
if issue.details:
|
|
22
|
+
lines.append(f"||details||{issue.details}")
|
|
23
|
+
|
|
24
|
+
if issue.suggestion:
|
|
25
|
+
lines.append(f'||suggestion||{issue.suggestion}')
|
|
26
|
+
if in_repl:
|
|
27
|
+
ReplSession().prompt_session.history.append_string(issue.suggestion)
|
|
28
|
+
suggested += 1
|
|
29
|
+
log(lines_to_tabular(lines, separator='||'))
|
|
30
|
+
if suggested:
|
|
31
|
+
log2()
|
|
32
|
+
log2(f'* {suggested} suggested commands are added to history. Press <Up> arrow to access them.')
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
from typing import TypeVar
|
|
3
|
+
|
|
4
|
+
from adam.utils_k8s.app_pods import AppPods
|
|
5
|
+
from adam.pod_exec_result import PodExecResult
|
|
6
|
+
from adam.utils import log, log2
|
|
7
|
+
from adam.utils_k8s.pods import Pods
|
|
8
|
+
from .kube_context import KubeContext
|
|
9
|
+
|
|
10
|
+
T = TypeVar('T')
|
|
11
|
+
|
|
12
|
+
# utility collection on app clusters; methods are all static
|
|
13
|
+
class AppClusters:
|
|
14
|
+
def exec(pods: list[str], namespace: str, command: str, action: str = 'action',
|
|
15
|
+
max_workers=0, show_out=True, on_any = False, shell = '/bin/sh', background = False) -> list[PodExecResult]:
|
|
16
|
+
samples = 1 if on_any else sys.maxsize
|
|
17
|
+
msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
|
|
18
|
+
with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
|
|
19
|
+
results: list[PodExecResult] = exec.map(lambda pod: AppPods.exec(pod, namespace, command, False, False, shell, background))
|
|
20
|
+
for result in results:
|
|
21
|
+
if KubeContext.show_out(show_out):
|
|
22
|
+
log(result.command)
|
|
23
|
+
if result.stdout:
|
|
24
|
+
log(result.stdout)
|
|
25
|
+
if result.stderr:
|
|
26
|
+
log2(result.stderr, file=sys.stderr)
|
|
27
|
+
|
|
28
|
+
return results
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
from typing import List
|
|
3
|
+
from kubernetes import client
|
|
4
|
+
|
|
5
|
+
from adam.config import Config
|
|
6
|
+
from adam.utils_k8s.pods import Pods
|
|
7
|
+
from adam.pod_exec_result import PodExecResult
|
|
8
|
+
from adam.repl_session import ReplSession
|
|
9
|
+
|
|
10
|
+
# utility collection on app pods; methods are all static
|
|
11
|
+
class AppPods:
|
|
12
|
+
@functools.lru_cache()
|
|
13
|
+
def pod_names(namespace: str, env: str, app: str):
|
|
14
|
+
return [pod.metadata.name for pod in AppPods.app_pods(namespace, env, app)]
|
|
15
|
+
|
|
16
|
+
def app_pods(namespace: str, env: str, app: str) -> List[client.V1Pod]:
|
|
17
|
+
v1 = client.CoreV1Api()
|
|
18
|
+
|
|
19
|
+
env_key = Config().get('app.env', 'c3__env-0')
|
|
20
|
+
app_key = Config().get('app.app', 'c3__app-0')
|
|
21
|
+
label_selector = f'applicationGroup=c3,{env_key}=0{env}0,{app_key}=0{app}0'
|
|
22
|
+
|
|
23
|
+
return v1.list_namespaced_pod(namespace, label_selector=label_selector).items
|
|
24
|
+
|
|
25
|
+
def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh', background = False) -> PodExecResult:
|
|
26
|
+
container = Config().get('app.container-name', 'c3-server')
|
|
27
|
+
r = Pods.exec(pod_name, container, namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, background = background)
|
|
28
|
+
|
|
29
|
+
if r and Config().get('repl.history.push-cat-remote-log-file', True):
|
|
30
|
+
if r.log_file and ReplSession().prompt_session:
|
|
31
|
+
ReplSession().prompt_session.history.append_string(f'@{r.pod} cat {r.log_file}')
|
|
32
|
+
|
|
33
|
+
return r
|
|
@@ -1,34 +1,36 @@
|
|
|
1
|
-
from concurrent.futures import ThreadPoolExecutor
|
|
2
1
|
import sys
|
|
3
2
|
from typing import TypeVar
|
|
4
3
|
|
|
4
|
+
from adam.config import Config
|
|
5
5
|
from adam.utils_k8s.cassandra_nodes import CassandraNodes
|
|
6
6
|
from adam.pod_exec_result import PodExecResult
|
|
7
|
-
from adam.utils import log2
|
|
8
|
-
from .
|
|
9
|
-
from .
|
|
10
|
-
from .kube_context import KubeContext
|
|
7
|
+
from adam.utils import log, log2
|
|
8
|
+
from adam.utils_k8s.pods import Pods
|
|
9
|
+
from adam.utils_k8s.statefulsets import StatefulSets
|
|
11
10
|
|
|
12
11
|
T = TypeVar('T')
|
|
13
12
|
|
|
14
13
|
# utility collection on cassandra clusters; methods are all static
|
|
15
14
|
class CassandraClusters:
|
|
16
|
-
def exec(
|
|
17
|
-
max_workers=0, show_out=True, on_any = False, shell = '/bin/sh') -> list[PodExecResult]:
|
|
18
|
-
def body(executor: ThreadPoolExecutor, pod: str, namespace: str, show_out: bool):
|
|
19
|
-
if executor:
|
|
20
|
-
return executor.submit(CassandraNodes.exec, pod, namespace, command, False, False, shell)
|
|
15
|
+
def exec(sts: str, namespace: str, command: str, action: str = 'action',
|
|
16
|
+
max_workers=0, show_out=True, on_any = False, shell = '/bin/sh', background = False, log_file = None) -> list[PodExecResult]:
|
|
21
17
|
|
|
22
|
-
|
|
18
|
+
pods = StatefulSets.pod_names(sts, namespace)
|
|
19
|
+
samples = 1 if on_any else sys.maxsize
|
|
20
|
+
msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
|
|
21
|
+
with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
|
|
22
|
+
results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell, background, log_file))
|
|
23
|
+
for result in results:
|
|
24
|
+
if show_out and not Config().is_debug():
|
|
25
|
+
log(result.command)
|
|
26
|
+
if result.stdout:
|
|
27
|
+
log(result.stdout)
|
|
28
|
+
if result.stderr:
|
|
29
|
+
log2(result.stderr, file=sys.stderr)
|
|
23
30
|
|
|
24
|
-
|
|
25
|
-
if KubeContext.show_out(show_out):
|
|
26
|
-
print(result.command)
|
|
27
|
-
if result.stdout:
|
|
28
|
-
print(result.stdout)
|
|
29
|
-
if result.stderr:
|
|
30
|
-
log2(result.stderr, file=sys.stderr)
|
|
31
|
+
return results
|
|
31
32
|
|
|
32
|
-
|
|
33
|
+
def pod_names_by_host_id(sts: str, ns: str):
|
|
34
|
+
pods = StatefulSets.pods(sts, ns)
|
|
33
35
|
|
|
34
|
-
return
|
|
36
|
+
return {CassandraNodes.get_host_id(pod.metadata.name, ns): pod.metadata.name for pod in pods}
|
|
@@ -6,12 +6,12 @@ from adam.repl_session import ReplSession
|
|
|
6
6
|
|
|
7
7
|
# utility collection on cassandra nodes; methods are all static
|
|
8
8
|
class CassandraNodes:
|
|
9
|
-
def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh') -> PodExecResult:
|
|
10
|
-
r = Pods.exec(pod_name, "cassandra", namespace, command, show_out = show_out, throw_err = throw_err, shell = shell)
|
|
9
|
+
def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh', background = False, log_file = None) -> PodExecResult:
|
|
10
|
+
r = Pods.exec(pod_name, "cassandra", namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, background = background, log_file=log_file)
|
|
11
11
|
|
|
12
12
|
if r and Config().get('repl.history.push-cat-remote-log-file', True):
|
|
13
|
-
if r.log_file:
|
|
14
|
-
ReplSession().prompt_session.history.append_string(f'
|
|
13
|
+
if r.log_file and ReplSession().prompt_session:
|
|
14
|
+
ReplSession().prompt_session.history.append_string(f'@{r.pod} cat {r.log_file}')
|
|
15
15
|
|
|
16
16
|
return r
|
|
17
17
|
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import functools
|
|
1
2
|
import re
|
|
2
3
|
import time
|
|
3
4
|
from kubernetes import client
|
|
@@ -171,6 +172,10 @@ class CustomResources:
|
|
|
171
172
|
|
|
172
173
|
return None
|
|
173
174
|
|
|
175
|
+
def clear_caches():
|
|
176
|
+
CustomResources.medusa_show_backupjobs.cache_clear()
|
|
177
|
+
|
|
178
|
+
@functools.lru_cache()
|
|
174
179
|
def medusa_show_backupjobs(dc: str, ns: str) -> list[dict]:
|
|
175
180
|
api_instance = client.CustomObjectsApi()
|
|
176
181
|
group = 'medusa.k8ssandra.io'
|
adam/utils_k8s/ingresses.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from kubernetes import client
|
|
2
2
|
|
|
3
3
|
from adam.config import Config
|
|
4
|
-
from adam.utils import log2
|
|
4
|
+
from adam.utils import debug, log2
|
|
5
5
|
|
|
6
6
|
# utility collection on ingresses; methods are all static
|
|
7
7
|
class Ingresses:
|
|
@@ -63,7 +63,7 @@ class Ingresses:
|
|
|
63
63
|
log2(f"200 Ingress '{name}' in namespace '{namespace}' deleted successfully.")
|
|
64
64
|
else:
|
|
65
65
|
api.delete_namespaced_ingress(name=name, namespace=namespace)
|
|
66
|
-
|
|
66
|
+
debug(f"200 Ingress '{name}' in namespace '{namespace}' deleted successfully.")
|
|
67
67
|
except client.ApiException as e:
|
|
68
68
|
log2(f"Error deleting Ingress: {e}")
|
|
69
69
|
|