kaqing 2.0.93__py3-none-any.whl → 2.0.115__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaqing might be problematic. Click here for more details.
- adam/apps.py +2 -2
- adam/batch.py +2 -16
- adam/checks/check_utils.py +4 -4
- adam/checks/compactionstats.py +1 -1
- adam/checks/cpu.py +2 -2
- adam/checks/disk.py +1 -1
- adam/checks/gossip.py +1 -1
- adam/checks/memory.py +3 -3
- adam/checks/status.py +1 -1
- adam/commands/alter_tables.py +3 -14
- adam/commands/app.py +3 -3
- adam/commands/app_ping.py +2 -2
- adam/commands/audit/audit.py +26 -11
- adam/commands/audit/audit_repair_tables.py +39 -4
- adam/commands/audit/audit_run.py +58 -0
- adam/commands/audit/show_last10.py +51 -0
- adam/commands/audit/show_slow10.py +50 -0
- adam/commands/audit/show_top10.py +49 -0
- adam/commands/audit/utils_show_top10.py +59 -0
- adam/commands/bash/bash.py +124 -0
- adam/commands/bash/bash_completer.py +93 -0
- adam/commands/cat.py +55 -0
- adam/commands/cd.py +26 -14
- adam/commands/check.py +6 -0
- adam/commands/cli_commands.py +3 -3
- adam/commands/code.py +60 -0
- adam/commands/command.py +9 -4
- adam/commands/commands_utils.py +4 -5
- adam/commands/cql/cql_completions.py +7 -3
- adam/commands/cql/cql_utils.py +103 -11
- adam/commands/cql/cqlsh.py +10 -5
- adam/commands/deploy/code_utils.py +2 -2
- adam/commands/deploy/deploy.py +7 -1
- adam/commands/deploy/deploy_pg_agent.py +2 -2
- adam/commands/deploy/deploy_pod.py +6 -6
- adam/commands/deploy/deploy_utils.py +2 -2
- adam/commands/deploy/undeploy.py +7 -1
- adam/commands/deploy/undeploy_pg_agent.py +2 -2
- adam/commands/deploy/undeploy_pod.py +4 -4
- adam/commands/devices.py +29 -0
- adam/commands/export/export.py +60 -0
- adam/commands/export/export_on_x.py +76 -0
- adam/commands/export/export_rmdbs.py +65 -0
- adam/commands/export/export_select.py +68 -0
- adam/commands/export/export_use.py +56 -0
- adam/commands/export/utils_export.py +253 -0
- adam/commands/help.py +9 -5
- adam/commands/issues.py +6 -0
- adam/commands/kubectl.py +41 -0
- adam/commands/login.py +6 -3
- adam/commands/logs.py +2 -1
- adam/commands/ls.py +43 -31
- adam/commands/medusa/medusa_backup.py +2 -2
- adam/commands/medusa/medusa_restore.py +2 -2
- adam/commands/medusa/medusa_show_backupjobs.py +3 -2
- adam/commands/medusa/medusa_show_restorejobs.py +2 -2
- adam/commands/nodetool.py +11 -16
- adam/commands/postgres/postgres.py +4 -4
- adam/commands/postgres/{postgres_session.py → postgres_context.py} +29 -30
- adam/commands/postgres/postgres_utils.py +5 -5
- adam/commands/postgres/psql_completions.py +1 -1
- adam/commands/preview_table.py +18 -32
- adam/commands/pwd.py +4 -3
- adam/commands/reaper/reaper.py +3 -0
- adam/commands/reaper/reaper_restart.py +1 -1
- adam/commands/reaper/reaper_session.py +1 -1
- adam/commands/repair/repair.py +3 -3
- adam/commands/repair/repair_log.py +1 -1
- adam/commands/repair/repair_run.py +2 -2
- adam/commands/repair/repair_scan.py +1 -1
- adam/commands/repair/repair_stop.py +1 -1
- adam/commands/report.py +6 -0
- adam/commands/restart.py +2 -2
- adam/commands/rollout.py +1 -1
- adam/commands/show/show.py +3 -1
- adam/commands/show/show_app_actions.py +3 -0
- adam/commands/show/show_app_id.py +1 -1
- adam/commands/show/show_app_queues.py +3 -2
- adam/commands/show/show_cassandra_status.py +3 -3
- adam/commands/show/show_cassandra_version.py +3 -3
- adam/commands/show/show_login.py +3 -0
- adam/commands/show/show_processes.py +1 -1
- adam/commands/show/show_repairs.py +2 -2
- adam/commands/show/show_storage.py +1 -1
- adam/commands/watch.py +1 -1
- adam/config.py +2 -1
- adam/embedded_params.py +1 -1
- adam/pod_exec_result.py +7 -1
- adam/repl.py +125 -99
- adam/repl_commands.py +29 -17
- adam/repl_state.py +229 -49
- adam/sql/sql_completer.py +86 -62
- adam/sql/sql_state_machine.py +563 -0
- adam/sql/term_completer.py +3 -0
- adam/sso/cred_cache.py +1 -1
- adam/sso/idp.py +1 -1
- adam/utils_athena.py +108 -74
- adam/utils_audits.py +104 -0
- adam/utils_export.py +42 -0
- adam/utils_k8s/__init__.py +0 -0
- adam/utils_k8s/app_clusters.py +33 -0
- adam/utils_k8s/app_pods.py +31 -0
- adam/{k8s_utils → utils_k8s}/cassandra_clusters.py +5 -6
- adam/{k8s_utils → utils_k8s}/cassandra_nodes.py +11 -4
- adam/{k8s_utils → utils_k8s}/deployment.py +2 -2
- adam/{k8s_utils → utils_k8s}/pods.py +54 -11
- adam/{k8s_utils → utils_k8s}/statefulsets.py +2 -2
- adam/version.py +1 -1
- {kaqing-2.0.93.dist-info → kaqing-2.0.115.dist-info}/METADATA +1 -1
- kaqing-2.0.115.dist-info/RECORD +203 -0
- adam/commands/bash.py +0 -91
- adam/commands/cql/cql_table_completer.py +0 -8
- adam/commands/describe/describe.py +0 -46
- adam/commands/describe/describe_keyspace.py +0 -60
- adam/commands/describe/describe_keyspaces.py +0 -50
- adam/commands/describe/describe_table.py +0 -60
- adam/commands/describe/describe_tables.py +0 -50
- adam/commands/postgres/psql_table_completer.py +0 -11
- adam/sql/state_machine.py +0 -460
- kaqing-2.0.93.dist-info/RECORD +0 -190
- /adam/commands/{describe → bash}/__init__.py +0 -0
- /adam/{k8s_utils → commands/export}/__init__.py +0 -0
- /adam/{k8s_utils → utils_k8s}/config_maps.py +0 -0
- /adam/{k8s_utils → utils_k8s}/custom_resources.py +0 -0
- /adam/{k8s_utils → utils_k8s}/ingresses.py +0 -0
- /adam/{k8s_utils → utils_k8s}/jobs.py +0 -0
- /adam/{k8s_utils → utils_k8s}/kube_context.py +0 -0
- /adam/{k8s_utils → utils_k8s}/secrets.py +0 -0
- /adam/{k8s_utils → utils_k8s}/service_accounts.py +0 -0
- /adam/{k8s_utils → utils_k8s}/services.py +0 -0
- /adam/{k8s_utils → utils_k8s}/volumes.py +0 -0
- {kaqing-2.0.93.dist-info → kaqing-2.0.115.dist-info}/WHEEL +0 -0
- {kaqing-2.0.93.dist-info → kaqing-2.0.115.dist-info}/entry_points.txt +0 -0
- {kaqing-2.0.93.dist-info → kaqing-2.0.115.dist-info}/top_level.txt +0 -0
adam/utils_athena.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from datetime import datetime
|
|
1
2
|
import functools
|
|
2
3
|
import time
|
|
3
4
|
import boto3
|
|
@@ -5,88 +6,121 @@ import boto3
|
|
|
5
6
|
from adam.config import Config
|
|
6
7
|
from adam.utils import lines_to_tabular, log, log2
|
|
7
8
|
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
9
|
+
# no state utility class
|
|
10
|
+
class Athena:
|
|
11
|
+
@functools.lru_cache()
|
|
12
|
+
def database_names(prefix: str = 'export_'):
|
|
13
|
+
# this function is called only from export currently
|
|
14
|
+
Config().wait_log(f'Inspecting export database schema...')
|
|
13
15
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
+
query = f"SELECT schema_name FROM information_schema.schemata WHERE schema_name <> 'information_schema'"
|
|
17
|
+
if prefix:
|
|
18
|
+
query = f"{query} AND schema_name like '{prefix}%'"
|
|
16
19
|
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
20
|
+
state, reason, rs = Athena.query(query)
|
|
21
|
+
if rs:
|
|
22
|
+
names = []
|
|
23
|
+
for row in rs[1:]:
|
|
24
|
+
row_data = [col.get('VarCharValue') if col else '' for col in row['Data']]
|
|
25
|
+
names.append(row_data[0])
|
|
26
|
+
|
|
27
|
+
return names
|
|
28
|
+
|
|
29
|
+
return []
|
|
21
30
|
|
|
22
|
-
|
|
31
|
+
def clear_cache(cache: str = None):
|
|
32
|
+
if not cache or cache == 'databases':
|
|
33
|
+
Athena.database_names.cache_clear()
|
|
34
|
+
if not cache or cache == 'tables':
|
|
35
|
+
Athena.table_names.cache_clear()
|
|
36
|
+
if not cache or cache == 'columns':
|
|
37
|
+
Athena.column_names.cache_clear()
|
|
23
38
|
|
|
24
|
-
@functools.lru_cache()
|
|
25
|
-
def
|
|
26
|
-
|
|
27
|
-
|
|
39
|
+
@functools.lru_cache()
|
|
40
|
+
def table_names(database: str = 'audit', function: str = 'audit'):
|
|
41
|
+
region_name = Config().get(f'{function}.athena.region', 'us-west-2')
|
|
42
|
+
database_name = Config().get(f'{function}.athena.database', database)
|
|
43
|
+
catalog_name = Config().get(f'{function}.athena.catalog', 'AwsDataCatalog')
|
|
28
44
|
|
|
29
|
-
|
|
30
|
-
|
|
45
|
+
athena_client = boto3.client('athena', region_name=region_name)
|
|
46
|
+
paginator = athena_client.get_paginator('list_table_metadata')
|
|
31
47
|
|
|
32
|
-
|
|
48
|
+
table_names = []
|
|
49
|
+
for page in paginator.paginate(CatalogName=catalog_name, DatabaseName=database_name):
|
|
50
|
+
for table_metadata in page.get('TableMetadataList', []):
|
|
51
|
+
table_names.append(table_metadata['Name'])
|
|
33
52
|
|
|
34
|
-
|
|
35
|
-
_, _, rs = audit_query(query)
|
|
36
|
-
if rs:
|
|
37
|
-
return [row['Data'][0].get('VarCharValue') for row in rs[1:]]
|
|
53
|
+
return table_names
|
|
38
54
|
|
|
39
|
-
|
|
55
|
+
@functools.lru_cache()
|
|
56
|
+
def column_names(tables: list[str] = [], database: str = None, function: str = 'audit', partition_cols_only = False):
|
|
57
|
+
if not database:
|
|
58
|
+
database = Config().get(f'{function}.athena.database', 'audit')
|
|
40
59
|
|
|
41
|
-
|
|
42
|
-
|
|
60
|
+
if not tables:
|
|
61
|
+
tables = Config().get(f'{function}.athena.tables', 'audit').split(',')
|
|
43
62
|
|
|
44
|
-
|
|
63
|
+
table_names = "'" + "','".join([table.strip() for table in tables]) + "'"
|
|
64
|
+
|
|
65
|
+
query = f"select column_name from information_schema.columns where table_name in ({table_names}) and table_schema = '{database}'"
|
|
66
|
+
if partition_cols_only:
|
|
67
|
+
query = f"{query} and extra_info = 'partition key'"
|
|
68
|
+
|
|
69
|
+
_, _, rs = Athena.query(query)
|
|
45
70
|
if rs:
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
}
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
71
|
+
return [row['Data'][0].get('VarCharValue') for row in rs[1:]]
|
|
72
|
+
|
|
73
|
+
return []
|
|
74
|
+
|
|
75
|
+
def run_query(sql: str, database: str = None):
|
|
76
|
+
state, reason, rs = Athena.query(sql, database)
|
|
77
|
+
|
|
78
|
+
if state == 'SUCCEEDED':
|
|
79
|
+
if rs:
|
|
80
|
+
column_info = rs[0]['Data']
|
|
81
|
+
columns = [col.get('VarCharValue') for col in column_info]
|
|
82
|
+
lines = []
|
|
83
|
+
for row in rs[1:]:
|
|
84
|
+
row_data = [col.get('VarCharValue') if col else '' for col in row['Data']]
|
|
85
|
+
lines.append('\t'.join(row_data))
|
|
86
|
+
|
|
87
|
+
log(lines_to_tabular(lines, header='\t'.join(columns), separator='\t'))
|
|
88
|
+
else:
|
|
89
|
+
log2(f"Query failed or was cancelled. State: {state}")
|
|
90
|
+
log2(f"Reason: {reason}")
|
|
91
|
+
|
|
92
|
+
def query(sql: str, database: str = None, function: str = 'audit') -> tuple[str, str, list]:
|
|
93
|
+
athena_client = boto3.client('athena')
|
|
94
|
+
|
|
95
|
+
if not database:
|
|
96
|
+
database = Config().get(f'{function}.athena.database', 'audit')
|
|
97
|
+
|
|
98
|
+
s3_output_location = Config().get(f'{function}.athena.output', f's3://s3.ops--{function}/ddl/results')
|
|
99
|
+
|
|
100
|
+
response = athena_client.start_query_execution(
|
|
101
|
+
QueryString=sql,
|
|
102
|
+
QueryExecutionContext={
|
|
103
|
+
'Database': database
|
|
104
|
+
},
|
|
105
|
+
ResultConfiguration={
|
|
106
|
+
'OutputLocation': s3_output_location
|
|
107
|
+
}
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
query_execution_id = response['QueryExecutionId']
|
|
111
|
+
|
|
112
|
+
while True:
|
|
113
|
+
query_status = athena_client.get_query_execution(QueryExecutionId=query_execution_id)
|
|
114
|
+
state = query_status['QueryExecution']['Status']['State']
|
|
115
|
+
if state in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
|
|
116
|
+
break
|
|
117
|
+
time.sleep(1)
|
|
118
|
+
|
|
119
|
+
if state == 'SUCCEEDED':
|
|
120
|
+
results_response = athena_client.get_query_results(QueryExecutionId=query_execution_id)
|
|
121
|
+
if results_response['ResultSet']['Rows']:
|
|
122
|
+
return (state, None, results_response['ResultSet']['Rows'])
|
|
123
|
+
|
|
124
|
+
return (state, None, [])
|
|
125
|
+
else:
|
|
126
|
+
return (state, query_status['QueryExecution']['Status'].get('StateChangeReason'), [])
|
adam/utils_audits.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
import functools
|
|
3
|
+
import getpass
|
|
4
|
+
import time
|
|
5
|
+
import boto3
|
|
6
|
+
import requests
|
|
7
|
+
|
|
8
|
+
from adam.config import Config
|
|
9
|
+
from adam.utils import lines_to_tabular, log, log2
|
|
10
|
+
from adam.utils_net import get_my_host
|
|
11
|
+
|
|
12
|
+
class AuditMeta:
|
|
13
|
+
def __init__(self, partitions_last_checked: float, cluster_last_checked: float):
|
|
14
|
+
self.partitions_last_checked = partitions_last_checked
|
|
15
|
+
self.cluster_last_checked = cluster_last_checked
|
|
16
|
+
|
|
17
|
+
# no state utility class
|
|
18
|
+
class Audits:
|
|
19
|
+
PARTITIONS_ADDED = 'partitions-added'
|
|
20
|
+
ADD_CLUSTERS = 'add-clusters'
|
|
21
|
+
|
|
22
|
+
def log(cmd: str, cluster = 'NA', drive: str = 'NA', duration: float = 0.0, audit_extra = None):
|
|
23
|
+
payload = {
|
|
24
|
+
'cluster': cluster if cluster else 'NA',
|
|
25
|
+
'ts': time.time(),
|
|
26
|
+
'host': get_my_host(),
|
|
27
|
+
'user': getpass.getuser(),
|
|
28
|
+
'line': cmd.replace('"', '""').replace('\n', ' '),
|
|
29
|
+
'drive': drive,
|
|
30
|
+
'duration': duration,
|
|
31
|
+
'audit_extra': audit_extra if audit_extra else '',
|
|
32
|
+
}
|
|
33
|
+
audit_endpoint = Config().get("audit.endpoint", "https://4psvtaxlcb.execute-api.us-west-2.amazonaws.com/prod/")
|
|
34
|
+
try:
|
|
35
|
+
response = requests.post(audit_endpoint, json=payload, timeout=Config().get("audit.timeout", 10))
|
|
36
|
+
if response.status_code in [200, 201]:
|
|
37
|
+
Config().debug(response.text)
|
|
38
|
+
else:
|
|
39
|
+
log2(f"Error: {response.status_code} {response.text}")
|
|
40
|
+
except requests.exceptions.Timeout as e:
|
|
41
|
+
log2(f"Timeout occurred: {e}")
|
|
42
|
+
|
|
43
|
+
def get_meta() -> AuditMeta:
|
|
44
|
+
checked_in = 0.0
|
|
45
|
+
cluster_last_checked = 0.0
|
|
46
|
+
|
|
47
|
+
state, _, rs = Audits.audit_query(f'select partitions_last_checked, clusters_last_checked from meta')
|
|
48
|
+
if state == 'SUCCEEDED':
|
|
49
|
+
if len(rs) > 1:
|
|
50
|
+
try:
|
|
51
|
+
row = rs[1]['Data']
|
|
52
|
+
checked_in = float(row[0]['VarCharValue'])
|
|
53
|
+
cluster_last_checked = float(row[1]['VarCharValue'])
|
|
54
|
+
except:
|
|
55
|
+
pass
|
|
56
|
+
|
|
57
|
+
return AuditMeta(checked_in, cluster_last_checked)
|
|
58
|
+
|
|
59
|
+
def put_meta(action: str, meta: AuditMeta, clusters: list[str] = None):
|
|
60
|
+
payload = {
|
|
61
|
+
'action': action,
|
|
62
|
+
'partitions-last-checked': meta.partitions_last_checked,
|
|
63
|
+
'clusters-last-checked': meta.cluster_last_checked
|
|
64
|
+
}
|
|
65
|
+
if clusters:
|
|
66
|
+
payload['clusters'] = clusters
|
|
67
|
+
|
|
68
|
+
audit_endpoint = Config().get("audit.endpoint", "https://4psvtaxlcb.execute-api.us-west-2.amazonaws.com/prod/")
|
|
69
|
+
try:
|
|
70
|
+
response = requests.post(audit_endpoint, json=payload, timeout=Config().get("audit.timeout", 10))
|
|
71
|
+
if response.status_code in [200, 201]:
|
|
72
|
+
Config().debug(response.text)
|
|
73
|
+
else:
|
|
74
|
+
log2(f"Error: {response.status_code} {response.text}")
|
|
75
|
+
except requests.exceptions.Timeout as e:
|
|
76
|
+
log2(f"Timeout occurred: {e}")
|
|
77
|
+
|
|
78
|
+
def find_new_clusters(cluster_last_checked: float) -> list[str]:
|
|
79
|
+
dt_object = datetime.fromtimestamp(cluster_last_checked)
|
|
80
|
+
|
|
81
|
+
# select distinct c2.name from cluster as c1 right outer join
|
|
82
|
+
# (select distinct c as name from audit where y = '1969' and m = '12' and d >= '31' or y = '1969' and m > '12' or y > '1969') as c2
|
|
83
|
+
# on c1.name = c2.name where c1.name is null
|
|
84
|
+
query = '\n '.join([
|
|
85
|
+
'select distinct c2.name from cluster as c1 right outer join',
|
|
86
|
+
f'(select distinct c as name from audit where {Audits.date_from(dt_object)}) as c2',
|
|
87
|
+
'on c1.name = c2.name where c1.name is null'])
|
|
88
|
+
log2(query)
|
|
89
|
+
state, _, rs = Audits.audit_query(query)
|
|
90
|
+
if state == 'SUCCEEDED':
|
|
91
|
+
if len(rs) > 1:
|
|
92
|
+
try:
|
|
93
|
+
return [r['Data'][0]['VarCharValue'] for r in rs[1:]]
|
|
94
|
+
except:
|
|
95
|
+
pass
|
|
96
|
+
|
|
97
|
+
return []
|
|
98
|
+
|
|
99
|
+
def date_from(dt_object: datetime):
|
|
100
|
+
y = dt_object.strftime("%Y")
|
|
101
|
+
m = dt_object.strftime("%m")
|
|
102
|
+
d = dt_object.strftime("%d")
|
|
103
|
+
|
|
104
|
+
return f"y = '{y}' and m = '{m}' and d >= '{d}' or y = '{y}' and m > '{m}' or y > '{y}'"
|
adam/utils_export.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
import boto3
|
|
3
|
+
|
|
4
|
+
from adam.config import Config
|
|
5
|
+
|
|
6
|
+
# no state utility class
|
|
7
|
+
class Exports:
|
|
8
|
+
@functools.lru_cache()
|
|
9
|
+
def table_names(db: str):
|
|
10
|
+
region_name = Config().get('audit.athena.region', 'us-west-2')
|
|
11
|
+
database_name = Config().get('audit.athena.database', db)
|
|
12
|
+
catalog_name = Config().get('audit.athena.catalog', 'AwsDataCatalog')
|
|
13
|
+
|
|
14
|
+
athena_client = boto3.client('athena', region_name=region_name)
|
|
15
|
+
paginator = athena_client.get_paginator('list_table_metadata')
|
|
16
|
+
|
|
17
|
+
table_names = []
|
|
18
|
+
for page in paginator.paginate(CatalogName=catalog_name, DatabaseName=database_name):
|
|
19
|
+
for table_metadata in page.get('TableMetadataList', []):
|
|
20
|
+
table_names.append(table_metadata['Name'])
|
|
21
|
+
|
|
22
|
+
return table_names
|
|
23
|
+
|
|
24
|
+
@functools.lru_cache()
|
|
25
|
+
def column_names(tables: list[str] = [], database: str = None, partition_cols_only = False):
|
|
26
|
+
if not database:
|
|
27
|
+
database = Config().get('audit.athena.database', 'audit')
|
|
28
|
+
|
|
29
|
+
if not tables:
|
|
30
|
+
tables = Config().get('audit.athena.tables', 'audit').split(',')
|
|
31
|
+
|
|
32
|
+
table_names = "'" + "','".join([table.strip() for table in tables]) + "'"
|
|
33
|
+
|
|
34
|
+
query = f"select column_name from information_schema.columns where table_name in ({table_names}) and table_schema = '{database}'"
|
|
35
|
+
if partition_cols_only:
|
|
36
|
+
query = f"{query} and extra_info = 'partition key'"
|
|
37
|
+
|
|
38
|
+
_, _, rs = Exports.audit_query(query)
|
|
39
|
+
if rs:
|
|
40
|
+
return [row['Data'][0].get('VarCharValue') for row in rs[1:]]
|
|
41
|
+
|
|
42
|
+
return []
|
|
File without changes
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
2
|
+
import sys
|
|
3
|
+
from typing import TypeVar
|
|
4
|
+
|
|
5
|
+
from adam.utils_k8s.app_pods import AppPods
|
|
6
|
+
from adam.pod_exec_result import PodExecResult
|
|
7
|
+
from adam.utils import log2
|
|
8
|
+
from adam.utils_k8s.pods import Pods
|
|
9
|
+
from .kube_context import KubeContext
|
|
10
|
+
|
|
11
|
+
T = TypeVar('T')
|
|
12
|
+
|
|
13
|
+
# utility collection on app clusters; methods are all static
|
|
14
|
+
class AppClusters:
|
|
15
|
+
def exec(pods: list[str], namespace: str, command: str, action: str = 'action',
|
|
16
|
+
max_workers=0, show_out=True, on_any = False, shell = '/bin/sh', background = False) -> list[PodExecResult]:
|
|
17
|
+
def body(executor: ThreadPoolExecutor, pod: str, namespace: str, show_out: bool):
|
|
18
|
+
if executor:
|
|
19
|
+
return executor.submit(AppPods.exec, pod, namespace, command, False, False, shell, background)
|
|
20
|
+
|
|
21
|
+
return AppPods.exec(pod, namespace, command, show_out=show_out, background=background)
|
|
22
|
+
|
|
23
|
+
def post(result, show_out: bool):
|
|
24
|
+
if KubeContext.show_out(show_out):
|
|
25
|
+
print(result.command)
|
|
26
|
+
if result.stdout:
|
|
27
|
+
print(result.stdout)
|
|
28
|
+
if result.stderr:
|
|
29
|
+
log2(result.stderr, file=sys.stderr)
|
|
30
|
+
|
|
31
|
+
return result
|
|
32
|
+
|
|
33
|
+
return Pods.on_pods(pods, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out, on_any=on_any, background=background)
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
from kubernetes import client
|
|
3
|
+
|
|
4
|
+
from adam.config import Config
|
|
5
|
+
from adam.utils_k8s.pods import Pods
|
|
6
|
+
from adam.pod_exec_result import PodExecResult
|
|
7
|
+
from adam.repl_session import ReplSession
|
|
8
|
+
|
|
9
|
+
# utility collection on app pods; methods are all static
|
|
10
|
+
class AppPods:
|
|
11
|
+
def pod_names(namespace: str, env: str, app: str):
|
|
12
|
+
return [pod.metadata.name for pod in AppPods.app_pods(namespace, env, app)]
|
|
13
|
+
|
|
14
|
+
def app_pods(namespace: str, env: str, app: str) -> List[client.V1Pod]:
|
|
15
|
+
v1 = client.CoreV1Api()
|
|
16
|
+
|
|
17
|
+
env_key = Config().get('app.env', 'c3__env-0')
|
|
18
|
+
app_key = Config().get('app.app', 'c3__app-0')
|
|
19
|
+
label_selector = f'applicationGroup=c3,{env_key}=0{env}0,{app_key}=0{app}0'
|
|
20
|
+
|
|
21
|
+
return v1.list_namespaced_pod(namespace, label_selector=label_selector).items
|
|
22
|
+
|
|
23
|
+
def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh', background = False) -> PodExecResult:
|
|
24
|
+
container = Config().get('app.container-name', 'c3-server')
|
|
25
|
+
r = Pods.exec(pod_name, container, namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, background = background)
|
|
26
|
+
|
|
27
|
+
if r and Config().get('repl.history.push-cat-remote-log-file', True):
|
|
28
|
+
if r.log_file and ReplSession().prompt_session:
|
|
29
|
+
ReplSession().prompt_session.history.append_string(f'@{r.pod} cat {r.log_file}')
|
|
30
|
+
|
|
31
|
+
return r
|
|
@@ -2,11 +2,10 @@ from concurrent.futures import ThreadPoolExecutor
|
|
|
2
2
|
import sys
|
|
3
3
|
from typing import TypeVar
|
|
4
4
|
|
|
5
|
-
from adam.
|
|
5
|
+
from adam.utils_k8s.cassandra_nodes import CassandraNodes
|
|
6
6
|
from adam.pod_exec_result import PodExecResult
|
|
7
7
|
from adam.utils import log2
|
|
8
8
|
from .statefulsets import StatefulSets
|
|
9
|
-
from .pods import Pods
|
|
10
9
|
from .kube_context import KubeContext
|
|
11
10
|
|
|
12
11
|
T = TypeVar('T')
|
|
@@ -14,12 +13,12 @@ T = TypeVar('T')
|
|
|
14
13
|
# utility collection on cassandra clusters; methods are all static
|
|
15
14
|
class CassandraClusters:
|
|
16
15
|
def exec(statefulset: str, namespace: str, command: str, action: str = 'action',
|
|
17
|
-
max_workers=0, show_out=True, on_any = False, shell = '/bin/sh') -> list[PodExecResult]:
|
|
16
|
+
max_workers=0, show_out=True, on_any = False, shell = '/bin/sh', background = False) -> list[PodExecResult]:
|
|
18
17
|
def body(executor: ThreadPoolExecutor, pod: str, namespace: str, show_out: bool):
|
|
19
18
|
if executor:
|
|
20
|
-
return executor.submit(CassandraNodes.exec, pod, namespace, command, False, False, shell)
|
|
19
|
+
return executor.submit(CassandraNodes.exec, pod, namespace, command, False, False, shell, background)
|
|
21
20
|
|
|
22
|
-
return CassandraNodes.exec(pod, namespace, command, show_out=show_out)
|
|
21
|
+
return CassandraNodes.exec(pod, namespace, command, show_out=show_out, background=background)
|
|
23
22
|
|
|
24
23
|
def post(result, show_out: bool):
|
|
25
24
|
if KubeContext.show_out(show_out):
|
|
@@ -31,4 +30,4 @@ class CassandraClusters:
|
|
|
31
30
|
|
|
32
31
|
return result
|
|
33
32
|
|
|
34
|
-
return StatefulSets.on_cluster(statefulset, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out, on_any=on_any)
|
|
33
|
+
return StatefulSets.on_cluster(statefulset, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out, on_any=on_any, background=background)
|
|
@@ -1,12 +1,19 @@
|
|
|
1
1
|
from adam.config import Config
|
|
2
|
-
from adam.
|
|
3
|
-
from adam.
|
|
2
|
+
from adam.utils_k8s.pods import Pods
|
|
3
|
+
from adam.utils_k8s.secrets import Secrets
|
|
4
4
|
from adam.pod_exec_result import PodExecResult
|
|
5
|
+
from adam.repl_session import ReplSession
|
|
5
6
|
|
|
6
7
|
# utility collection on cassandra nodes; methods are all static
|
|
7
8
|
class CassandraNodes:
|
|
8
|
-
def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh') -> PodExecResult:
|
|
9
|
-
|
|
9
|
+
def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh', background = False) -> PodExecResult:
|
|
10
|
+
r = Pods.exec(pod_name, "cassandra", namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, background = background)
|
|
11
|
+
|
|
12
|
+
if r and Config().get('repl.history.push-cat-remote-log-file', True):
|
|
13
|
+
if r.log_file and ReplSession().prompt_session:
|
|
14
|
+
ReplSession().prompt_session.history.append_string(f'@{r.pod} cat {r.log_file}')
|
|
15
|
+
|
|
16
|
+
return r
|
|
10
17
|
|
|
11
18
|
def get_host_id(pod_name: str, ns: str):
|
|
12
19
|
try:
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from kubernetes import client
|
|
2
2
|
|
|
3
|
-
from adam.
|
|
4
|
-
from adam.
|
|
3
|
+
from adam.utils_k8s.pods import Pods
|
|
4
|
+
from adam.utils_k8s.volumes import ConfigMapMount
|
|
5
5
|
|
|
6
6
|
# utility collection on deployments; methods are all static
|
|
7
7
|
class Deployments:
|
|
@@ -9,9 +9,9 @@ from kubernetes.stream import stream
|
|
|
9
9
|
from kubernetes.stream.ws_client import ERROR_CHANNEL
|
|
10
10
|
|
|
11
11
|
from adam.config import Config
|
|
12
|
-
from adam.
|
|
12
|
+
from adam.utils_k8s.volumes import ConfigMapMount
|
|
13
13
|
from adam.pod_exec_result import PodExecResult
|
|
14
|
-
from adam.utils import elapsed_time, log2
|
|
14
|
+
from adam.utils import elapsed_time, log2
|
|
15
15
|
from .kube_context import KubeContext
|
|
16
16
|
|
|
17
17
|
T = TypeVar('T')
|
|
@@ -28,7 +28,7 @@ class Pods:
|
|
|
28
28
|
def delete(pod_name: str, namespace: str, grace_period_seconds: int = None):
|
|
29
29
|
try:
|
|
30
30
|
v1 = client.CoreV1Api()
|
|
31
|
-
|
|
31
|
+
v1.delete_namespaced_pod(pod_name, namespace, grace_period_seconds=grace_period_seconds)
|
|
32
32
|
except Exception as e:
|
|
33
33
|
log2("Exception when calling CoreV1Api->delete_namespaced_pod: %s\n" % e)
|
|
34
34
|
|
|
@@ -43,7 +43,11 @@ class Pods:
|
|
|
43
43
|
namespace: str,
|
|
44
44
|
body: Callable[[ThreadPoolExecutor, str, str, bool], T],
|
|
45
45
|
post: Callable[[T], T] = None,
|
|
46
|
-
action: str = 'action',
|
|
46
|
+
action: str = 'action',
|
|
47
|
+
max_workers=0,
|
|
48
|
+
show_out=True,
|
|
49
|
+
on_any = False,
|
|
50
|
+
background = False) -> list[T]:
|
|
47
51
|
show_out = KubeContext.show_out(show_out)
|
|
48
52
|
|
|
49
53
|
if not max_workers:
|
|
@@ -94,7 +98,9 @@ class Pods:
|
|
|
94
98
|
|
|
95
99
|
return results
|
|
96
100
|
|
|
97
|
-
def exec(pod_name: str, container: str, namespace: str, command: str,
|
|
101
|
+
def exec(pod_name: str, container: str, namespace: str, command: str,
|
|
102
|
+
show_out = True, throw_err = False, shell = '/bin/sh',
|
|
103
|
+
background = False,
|
|
98
104
|
interaction: Callable[[any, list[str]], any] = None):
|
|
99
105
|
if _TEST_POD_EXEC_OUTS:
|
|
100
106
|
return _TEST_POD_EXEC_OUTS
|
|
@@ -102,17 +108,24 @@ class Pods:
|
|
|
102
108
|
show_out = KubeContext.show_out(show_out)
|
|
103
109
|
|
|
104
110
|
api = client.CoreV1Api()
|
|
105
|
-
log_file = None
|
|
106
111
|
|
|
112
|
+
log_file = None
|
|
107
113
|
tty = True
|
|
108
114
|
exec_command = [shell, '-c', command]
|
|
109
|
-
if command.endswith(' &'):
|
|
110
|
-
|
|
111
|
-
command = f"nohup {command.strip(' &')} > {log_file} 2>&1 &"
|
|
112
|
-
exec_command = [shell, '-c', command]
|
|
113
|
-
# should be false for starting a backgroud process
|
|
115
|
+
if background or command.endswith(' &'):
|
|
116
|
+
# should be false for starting a background process
|
|
114
117
|
tty = False
|
|
115
118
|
|
|
119
|
+
if Config().get('repl.background-process.auto-nohup', True):
|
|
120
|
+
command = command.strip(' &')
|
|
121
|
+
cmd_name = ''
|
|
122
|
+
if command.startswith('nodetool '):
|
|
123
|
+
cmd_name = f".{'_'.join(command.split(' ')[5:])}"
|
|
124
|
+
|
|
125
|
+
log_file = f'/tmp/qing-{datetime.now().strftime("%d%H%M%S")}{cmd_name}.log'
|
|
126
|
+
command = f"nohup {command} > {log_file} 2>&1 &"
|
|
127
|
+
exec_command = [shell, '-c', command]
|
|
128
|
+
|
|
116
129
|
k_command = f'kubectl exec {pod_name} -c {container} -n {namespace} -- {shell} -c "{command}"'
|
|
117
130
|
if show_out:
|
|
118
131
|
print(k_command)
|
|
@@ -163,6 +176,36 @@ class Pods:
|
|
|
163
176
|
|
|
164
177
|
return PodExecResult("".join(stdout), "".join(stderr), k_command, error_output, pod=pod_name, log_file=log_file)
|
|
165
178
|
|
|
179
|
+
def read_file(pod_name: str, container: str, namespace: str, file_path: str):
|
|
180
|
+
v1 = client.CoreV1Api()
|
|
181
|
+
|
|
182
|
+
resp = stream(
|
|
183
|
+
v1.connect_get_namespaced_pod_exec,
|
|
184
|
+
name=pod_name,
|
|
185
|
+
namespace=namespace,
|
|
186
|
+
container=container,
|
|
187
|
+
command=["cat", file_path],
|
|
188
|
+
stderr=True, stdin=False,
|
|
189
|
+
stdout=True, tty=False,
|
|
190
|
+
_preload_content=False # Important for streaming
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
try:
|
|
194
|
+
while resp.is_open():
|
|
195
|
+
resp.update(timeout=1)
|
|
196
|
+
if resp.peek_stdout():
|
|
197
|
+
yield resp.read_stdout()
|
|
198
|
+
|
|
199
|
+
try:
|
|
200
|
+
# get the exit code from server
|
|
201
|
+
error_output = resp.read_channel(ERROR_CHANNEL)
|
|
202
|
+
except Exception as e:
|
|
203
|
+
pass
|
|
204
|
+
except Exception as e:
|
|
205
|
+
raise e
|
|
206
|
+
finally:
|
|
207
|
+
resp.close()
|
|
208
|
+
|
|
166
209
|
def get_container(namespace: str, pod_name: str, container_name: str):
|
|
167
210
|
pod = Pods.get(namespace, pod_name)
|
|
168
211
|
if not pod:
|
|
@@ -62,10 +62,10 @@ class StatefulSets:
|
|
|
62
62
|
namespace: str,
|
|
63
63
|
body: Callable[[ThreadPoolExecutor, str, str, bool], T],
|
|
64
64
|
post: Callable[[T], T] = None,
|
|
65
|
-
action: str = 'action', max_workers=0, show_out=True, on_any = False) -> list[T]:
|
|
65
|
+
action: str = 'action', max_workers=0, show_out=True, on_any = False, background = False) -> list[T]:
|
|
66
66
|
pods = StatefulSets.pod_names(statefulset, namespace)
|
|
67
67
|
|
|
68
|
-
return Pods.on_pods(pods, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out, on_any=on_any)
|
|
68
|
+
return Pods.on_pods(pods, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out, on_any=on_any, background=background)
|
|
69
69
|
|
|
70
70
|
@functools.lru_cache()
|
|
71
71
|
def pod_names(ss: str, ns: str):
|
adam/version.py
CHANGED