kaqing 2.0.95__py3-none-any.whl → 2.0.115__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaqing might be problematic. Click here for more details.
- adam/batch.py +1 -15
- adam/commands/alter_tables.py +3 -14
- adam/commands/app.py +3 -3
- adam/commands/app_ping.py +2 -2
- adam/commands/audit/audit.py +26 -11
- adam/commands/audit/audit_repair_tables.py +20 -37
- adam/commands/audit/audit_run.py +58 -0
- adam/commands/audit/show_last10.py +51 -0
- adam/commands/audit/show_slow10.py +50 -0
- adam/commands/audit/show_top10.py +49 -0
- adam/commands/audit/utils_show_top10.py +59 -0
- adam/commands/bash/bash.py +124 -0
- adam/commands/bash/bash_completer.py +93 -0
- adam/commands/cat.py +55 -0
- adam/commands/cd.py +23 -11
- adam/commands/check.py +6 -0
- adam/commands/code.py +60 -0
- adam/commands/command.py +9 -4
- adam/commands/commands_utils.py +1 -2
- adam/commands/cql/cql_completions.py +7 -3
- adam/commands/cql/cql_utils.py +100 -8
- adam/commands/cql/cqlsh.py +10 -5
- adam/commands/deploy/deploy.py +7 -1
- adam/commands/deploy/deploy_pg_agent.py +2 -2
- adam/commands/deploy/undeploy.py +7 -1
- adam/commands/deploy/undeploy_pg_agent.py +2 -2
- adam/commands/devices.py +29 -0
- adam/commands/export/__init__.py +0 -0
- adam/commands/export/export.py +60 -0
- adam/commands/export/export_on_x.py +76 -0
- adam/commands/export/export_rmdbs.py +65 -0
- adam/commands/export/export_select.py +68 -0
- adam/commands/export/export_use.py +56 -0
- adam/commands/export/utils_export.py +253 -0
- adam/commands/help.py +9 -5
- adam/commands/issues.py +6 -0
- adam/commands/kubectl.py +41 -0
- adam/commands/login.py +6 -3
- adam/commands/logs.py +1 -0
- adam/commands/ls.py +39 -27
- adam/commands/medusa/medusa_show_backupjobs.py +1 -0
- adam/commands/nodetool.py +5 -2
- adam/commands/postgres/postgres.py +4 -4
- adam/commands/postgres/{postgres_session.py → postgres_context.py} +26 -27
- adam/commands/postgres/postgres_utils.py +5 -5
- adam/commands/postgres/psql_completions.py +1 -1
- adam/commands/preview_table.py +18 -32
- adam/commands/pwd.py +4 -3
- adam/commands/reaper/reaper.py +3 -0
- adam/commands/repair/repair.py +3 -3
- adam/commands/report.py +6 -0
- adam/commands/show/show.py +3 -1
- adam/commands/show/show_app_actions.py +3 -0
- adam/commands/show/show_app_queues.py +3 -2
- adam/commands/show/show_login.py +3 -0
- adam/config.py +1 -1
- adam/embedded_params.py +1 -1
- adam/pod_exec_result.py +7 -1
- adam/repl.py +121 -97
- adam/repl_commands.py +29 -17
- adam/repl_state.py +224 -44
- adam/sql/sql_completer.py +86 -62
- adam/sql/sql_state_machine.py +563 -0
- adam/sql/term_completer.py +3 -0
- adam/utils_athena.py +108 -74
- adam/utils_audits.py +104 -0
- adam/utils_export.py +42 -0
- adam/utils_k8s/app_clusters.py +33 -0
- adam/utils_k8s/app_pods.py +31 -0
- adam/utils_k8s/cassandra_clusters.py +4 -5
- adam/utils_k8s/cassandra_nodes.py +4 -4
- adam/utils_k8s/pods.py +42 -6
- adam/utils_k8s/statefulsets.py +2 -2
- adam/version.py +1 -1
- {kaqing-2.0.95.dist-info → kaqing-2.0.115.dist-info}/METADATA +1 -1
- {kaqing-2.0.95.dist-info → kaqing-2.0.115.dist-info}/RECORD +80 -67
- adam/commands/bash.py +0 -92
- adam/commands/cql/cql_table_completer.py +0 -8
- adam/commands/describe/describe.py +0 -46
- adam/commands/describe/describe_keyspace.py +0 -60
- adam/commands/describe/describe_keyspaces.py +0 -50
- adam/commands/describe/describe_table.py +0 -60
- adam/commands/describe/describe_tables.py +0 -50
- adam/commands/postgres/psql_table_completer.py +0 -11
- adam/sql/state_machine.py +0 -460
- /adam/commands/{describe → bash}/__init__.py +0 -0
- {kaqing-2.0.95.dist-info → kaqing-2.0.115.dist-info}/WHEEL +0 -0
- {kaqing-2.0.95.dist-info → kaqing-2.0.115.dist-info}/entry_points.txt +0 -0
- {kaqing-2.0.95.dist-info → kaqing-2.0.115.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
import io
|
|
4
|
+
import re
|
|
5
|
+
import time
|
|
6
|
+
from typing import Callable
|
|
7
|
+
import boto3
|
|
8
|
+
|
|
9
|
+
from adam.commands.cql.cql_utils import cassandra_table_names, run_cql, table_spec
|
|
10
|
+
from adam.config import Config
|
|
11
|
+
from adam.repl_state import ReplState
|
|
12
|
+
from adam.utils import elapsed_time, log2
|
|
13
|
+
from adam.utils_athena import Athena
|
|
14
|
+
from adam.utils_k8s.cassandra_nodes import CassandraNodes
|
|
15
|
+
from adam.utils_k8s.pods import Pods
|
|
16
|
+
|
|
17
|
+
def export_tables(args: list[str], state: ReplState, max_workers = 0):
|
|
18
|
+
consistency = None
|
|
19
|
+
specs = None
|
|
20
|
+
|
|
21
|
+
if args:
|
|
22
|
+
consistency, specs = ExportSpec.parse_multiple(' '.join(args))
|
|
23
|
+
|
|
24
|
+
if not specs:
|
|
25
|
+
specs = [ExportSpec(t) for t in cassandra_table_names(state, keyspace=f'{state.namespace}_db')]
|
|
26
|
+
|
|
27
|
+
if not max_workers:
|
|
28
|
+
max_workers = Config().action_workers('export', 8)
|
|
29
|
+
|
|
30
|
+
if max_workers > 1 and len(specs) > 1:
|
|
31
|
+
log2(f'Executing on {len(specs)} Cassandra tables in parallel...')
|
|
32
|
+
start_time = time.time()
|
|
33
|
+
try:
|
|
34
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
35
|
+
futures = [executor.submit(export_table, spec, state, True, consistency=consistency) for spec in specs]
|
|
36
|
+
if len(futures) == 0:
|
|
37
|
+
return []
|
|
38
|
+
|
|
39
|
+
return [future.result() for future in as_completed(futures)]
|
|
40
|
+
finally:
|
|
41
|
+
log2(f"{len(specs)} parallel table export elapsed time: {elapsed_time(start_time)} with {max_workers} workers")
|
|
42
|
+
else:
|
|
43
|
+
return [export_table(spec, state, multi_tables=len(specs) > 1, consistency=consistency) for spec in specs]
|
|
44
|
+
|
|
45
|
+
class ExportSpec:
|
|
46
|
+
def __init__(self, table: str, columns: str = None, target_table: str = None):
|
|
47
|
+
self.table = table
|
|
48
|
+
self.columns = columns
|
|
49
|
+
self.target_table = target_table
|
|
50
|
+
|
|
51
|
+
def parse(spec_str: str) -> 'ExportSpec':
|
|
52
|
+
target = None
|
|
53
|
+
|
|
54
|
+
p = re.compile(r"(.*?)\s+as\s+(.*)", re.IGNORECASE)
|
|
55
|
+
match = p.match(spec_str)
|
|
56
|
+
if match:
|
|
57
|
+
spec_str = match.group(1)
|
|
58
|
+
target = match.group(2)
|
|
59
|
+
|
|
60
|
+
table = spec_str
|
|
61
|
+
columns = None
|
|
62
|
+
|
|
63
|
+
p = re.compile('(.*?)\((.*)\)')
|
|
64
|
+
match = p.match(spec_str)
|
|
65
|
+
if match:
|
|
66
|
+
table = match.group(1)
|
|
67
|
+
columns = match.group(2)
|
|
68
|
+
|
|
69
|
+
return ExportSpec(table, columns, target)
|
|
70
|
+
|
|
71
|
+
def __eq__(self, other):
|
|
72
|
+
if isinstance(other, ExportSpec):
|
|
73
|
+
return self.table == other.table and self.columns == other.columns and self.target_table == other.target_table
|
|
74
|
+
|
|
75
|
+
return False
|
|
76
|
+
|
|
77
|
+
def __str__(self):
|
|
78
|
+
return f'{self.table}, {self.columns}, {self.target_table}'
|
|
79
|
+
|
|
80
|
+
def parse_multiple(spec_str: str) -> tuple[str, list['ExportSpec']]:
|
|
81
|
+
consistency = None
|
|
82
|
+
|
|
83
|
+
p = re.compile(r"(.*?)with\s+consistency\s+(.*)", re.IGNORECASE)
|
|
84
|
+
match = p.match(spec_str)
|
|
85
|
+
if match:
|
|
86
|
+
spec_str = match.group(1).strip(' ')
|
|
87
|
+
consistency = match.group(2)
|
|
88
|
+
|
|
89
|
+
if spec_str:
|
|
90
|
+
p = r",\s*(?![^()]*\))"
|
|
91
|
+
specs = re.split(p, spec_str)
|
|
92
|
+
|
|
93
|
+
return consistency, [ExportSpec.parse(spec) for spec in specs]
|
|
94
|
+
|
|
95
|
+
return consistency, []
|
|
96
|
+
|
|
97
|
+
def export_table(spec: ExportSpec, state: ReplState, multi_tables = True, consistency: str = None):
|
|
98
|
+
table = spec.table
|
|
99
|
+
columns = spec.columns
|
|
100
|
+
if not columns:
|
|
101
|
+
columns = Config().get('export.columns', f'<keys>')
|
|
102
|
+
|
|
103
|
+
if columns == '<keys>':
|
|
104
|
+
columns = ','.join(table_spec(state, table, on_any=True).keys())
|
|
105
|
+
elif columns == '<row-key>':
|
|
106
|
+
columns = table_spec(state, table, on_any=True).row_key()
|
|
107
|
+
elif columns == '*':
|
|
108
|
+
columns = ','.join([c.name for c in table_spec(state, table, on_any=True).columns])
|
|
109
|
+
|
|
110
|
+
if not columns:
|
|
111
|
+
log2(f'ERROR: Empty columns on {table}.')
|
|
112
|
+
return table
|
|
113
|
+
|
|
114
|
+
athena_table = spec.target_table if spec.target_table else table
|
|
115
|
+
if '.' in athena_table:
|
|
116
|
+
athena_table = athena_table.split('.')[-1]
|
|
117
|
+
|
|
118
|
+
temp_dir = Config().get('export.temp_dir', '/c3/cassandra/tmp')
|
|
119
|
+
session = state.export_session
|
|
120
|
+
create_db = not session
|
|
121
|
+
if create_db:
|
|
122
|
+
session = datetime.now().strftime("%Y%m%d%H%M%S")
|
|
123
|
+
state.export_session = session
|
|
124
|
+
db = f'export_{session}'
|
|
125
|
+
|
|
126
|
+
CassandraNodes.exec(state.pod, state.namespace, f'mkdir -p {temp_dir}/{session}', show_out=not multi_tables, shell='bash')
|
|
127
|
+
csv_file = f'{temp_dir}/{session}/{table}.csv'
|
|
128
|
+
succeeded = False
|
|
129
|
+
try:
|
|
130
|
+
suppress_ing_log = Config().is_debug() or multi_tables
|
|
131
|
+
queries = []
|
|
132
|
+
if consistency:
|
|
133
|
+
queries.append(f'CONSISTENCY {consistency}')
|
|
134
|
+
queries.append(f"COPY {table}({columns}) TO '{csv_file}' WITH HEADER = TRUE")
|
|
135
|
+
ing(f'Dumping table {table}{f" with consistency {consistency}" if consistency else ""}',
|
|
136
|
+
lambda: run_cql(state, ';'.join(queries), show_out=False),
|
|
137
|
+
suppress_log=suppress_ing_log)
|
|
138
|
+
|
|
139
|
+
def upload_to_s3():
|
|
140
|
+
bytes = Pods.read_file(state.pod, 'cassandra', state.namespace, csv_file)
|
|
141
|
+
|
|
142
|
+
s3 = boto3.client('s3')
|
|
143
|
+
s3.upload_fileobj(GeneratorStream(bytes), 'c3.ops--qing', f'export/{session}/{athena_table}/{table}.csv')
|
|
144
|
+
|
|
145
|
+
ing(f'Uploading to S3', upload_to_s3, suppress_log=suppress_ing_log)
|
|
146
|
+
|
|
147
|
+
def create_schema():
|
|
148
|
+
query = f'CREATE DATABASE IF NOT EXISTS {db};'
|
|
149
|
+
if Config().is_debug():
|
|
150
|
+
log2(query)
|
|
151
|
+
Athena.query(query, 'default')
|
|
152
|
+
|
|
153
|
+
query = f'DROP TABLE IF EXISTS {athena_table};'
|
|
154
|
+
if Config().is_debug():
|
|
155
|
+
log2(query)
|
|
156
|
+
Athena.query(query, db)
|
|
157
|
+
|
|
158
|
+
# columns = ', '.join([f'{h.strip(" ")} string' for h in header[0].split(',')])
|
|
159
|
+
athena_columns = ', '.join([f'{c} string' for c in columns.split(',')])
|
|
160
|
+
query = f'CREATE EXTERNAL TABLE IF NOT EXISTS {athena_table}(\n' + \
|
|
161
|
+
f' {athena_columns})\n' + \
|
|
162
|
+
"ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde'\n" + \
|
|
163
|
+
'WITH SERDEPROPERTIES (\n' + \
|
|
164
|
+
' "separatorChar" = ",",\n' + \
|
|
165
|
+
' "quoteChar" = "\\"")\n' + \
|
|
166
|
+
f"LOCATION 's3://c3.ops--qing/export/{session}/{athena_table}'\n" + \
|
|
167
|
+
'TBLPROPERTIES ("skip.header.line.count"="1");'
|
|
168
|
+
if Config().is_debug():
|
|
169
|
+
log2(query)
|
|
170
|
+
try:
|
|
171
|
+
Athena.query(query, db)
|
|
172
|
+
except Exception as e:
|
|
173
|
+
log2(f'*** Failed query:\n{query}')
|
|
174
|
+
raise e
|
|
175
|
+
|
|
176
|
+
ing(f"Creating database {db}" if create_db else f"Creating table {athena_table}", create_schema, suppress_log=suppress_ing_log)
|
|
177
|
+
|
|
178
|
+
succeeded = True
|
|
179
|
+
except Exception as e:
|
|
180
|
+
log2(e)
|
|
181
|
+
finally:
|
|
182
|
+
ing('Cleaning up temporary files',
|
|
183
|
+
lambda: CassandraNodes.exec(state.pod, state.namespace, f'rm -rf {csv_file}', show_out=False, shell='bash'),
|
|
184
|
+
suppress_log=suppress_ing_log)
|
|
185
|
+
|
|
186
|
+
if succeeded:
|
|
187
|
+
Athena.clear_cache()
|
|
188
|
+
|
|
189
|
+
if not suppress_ing_log:
|
|
190
|
+
query = f'select * from {athena_table} limit 10'
|
|
191
|
+
log2(query)
|
|
192
|
+
Athena.run_query(query, db)
|
|
193
|
+
|
|
194
|
+
return table
|
|
195
|
+
|
|
196
|
+
def ing(msg: str, body: Callable[[], None], suppress_log=False):
|
|
197
|
+
if not suppress_log:
|
|
198
|
+
log2(f'{msg}...', nl=False)
|
|
199
|
+
body()
|
|
200
|
+
if not suppress_log:
|
|
201
|
+
log2(' OK')
|
|
202
|
+
|
|
203
|
+
class GeneratorStream(io.RawIOBase):
|
|
204
|
+
def __init__(self, generator):
|
|
205
|
+
self._generator = generator
|
|
206
|
+
self._buffer = b'' # Buffer to store leftover bytes from generator yields
|
|
207
|
+
|
|
208
|
+
def readable(self):
|
|
209
|
+
return True
|
|
210
|
+
|
|
211
|
+
def _read_from_generator(self):
|
|
212
|
+
try:
|
|
213
|
+
chunk = next(self._generator)
|
|
214
|
+
if isinstance(chunk, str):
|
|
215
|
+
chunk = chunk.encode('utf-8') # Encode if generator yields strings
|
|
216
|
+
self._buffer += chunk
|
|
217
|
+
except StopIteration:
|
|
218
|
+
pass # Generator exhausted
|
|
219
|
+
|
|
220
|
+
def readinto(self, b):
|
|
221
|
+
# Fill the buffer if necessary
|
|
222
|
+
while len(self._buffer) < len(b):
|
|
223
|
+
old_buffer_len = len(self._buffer)
|
|
224
|
+
self._read_from_generator()
|
|
225
|
+
if len(self._buffer) == old_buffer_len: # Generator exhausted and buffer empty
|
|
226
|
+
break
|
|
227
|
+
|
|
228
|
+
bytes_to_read = min(len(b), len(self._buffer))
|
|
229
|
+
b[:bytes_to_read] = self._buffer[:bytes_to_read]
|
|
230
|
+
self._buffer = self._buffer[bytes_to_read:]
|
|
231
|
+
return bytes_to_read
|
|
232
|
+
|
|
233
|
+
def read(self, size=-1):
|
|
234
|
+
if size == -1: # Read all remaining data
|
|
235
|
+
while True:
|
|
236
|
+
old_buffer_len = len(self._buffer)
|
|
237
|
+
self._read_from_generator()
|
|
238
|
+
if len(self._buffer) == old_buffer_len:
|
|
239
|
+
break
|
|
240
|
+
data = self._buffer
|
|
241
|
+
self._buffer = b''
|
|
242
|
+
return data
|
|
243
|
+
else:
|
|
244
|
+
# Ensure enough data in buffer
|
|
245
|
+
while len(self._buffer) < size:
|
|
246
|
+
old_buffer_len = len(self._buffer)
|
|
247
|
+
self._read_from_generator()
|
|
248
|
+
if len(self._buffer) == old_buffer_len:
|
|
249
|
+
break
|
|
250
|
+
|
|
251
|
+
data = self._buffer[:size]
|
|
252
|
+
self._buffer = self._buffer[size:]
|
|
253
|
+
return data
|
adam/commands/help.py
CHANGED
|
@@ -30,14 +30,18 @@ class Help(Command):
|
|
|
30
30
|
lines.append('NAVIGATION')
|
|
31
31
|
lines.append(' a: | c: | l: | p:\t switch to another operational device: App, Cassandra, Audit or Postgres')
|
|
32
32
|
lines.extend(section(ReplCommands.navigation()))
|
|
33
|
-
lines.append('
|
|
34
|
-
lines.extend(section(ReplCommands.cassandra_check()))
|
|
35
|
-
lines.append('CASSANDRA OPERATIONS')
|
|
33
|
+
lines.append('CASSANDRA')
|
|
36
34
|
lines.extend(section(ReplCommands.cassandra_ops()))
|
|
35
|
+
lines.append('POSTGRES')
|
|
36
|
+
lines.extend(section(ReplCommands.postgres_ops()))
|
|
37
|
+
lines.append('APP')
|
|
38
|
+
lines.extend(section(ReplCommands.app_ops()))
|
|
39
|
+
lines.append('EXPORT DB')
|
|
40
|
+
lines.extend(section(ReplCommands.export_ops()))
|
|
41
|
+
lines.append('AUDIT')
|
|
42
|
+
lines.extend(section(ReplCommands.audit_ops()))
|
|
37
43
|
lines.append('TOOLS')
|
|
38
44
|
lines.extend(section(ReplCommands.tools()))
|
|
39
|
-
lines.append('APP')
|
|
40
|
-
lines.extend(section(ReplCommands.app()))
|
|
41
45
|
lines.append('')
|
|
42
46
|
lines.extend(section(ReplCommands.exit()))
|
|
43
47
|
|
adam/commands/issues.py
CHANGED
|
@@ -21,11 +21,17 @@ class Issues(Command):
|
|
|
21
21
|
def command(self):
|
|
22
22
|
return Issues.COMMAND
|
|
23
23
|
|
|
24
|
+
def required(self):
|
|
25
|
+
return ReplState.NON_L
|
|
26
|
+
|
|
24
27
|
def run(self, cmd: str, state: ReplState):
|
|
25
28
|
if not(args := self.args(cmd)):
|
|
26
29
|
return super().run(cmd, state)
|
|
27
30
|
|
|
28
31
|
state, args = self.apply_state(args, state)
|
|
32
|
+
if not self.validate_state(state):
|
|
33
|
+
return state
|
|
34
|
+
|
|
29
35
|
args, show = Command.extract_options(args, ['-s', '--show'])
|
|
30
36
|
|
|
31
37
|
results = run_checks(state.sts, state.namespace, state.pod, show_output=show)
|
adam/commands/kubectl.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
|
|
3
|
+
from adam.commands.command import Command
|
|
4
|
+
from adam.repl_state import ReplState, RequiredState
|
|
5
|
+
|
|
6
|
+
class Kubectl(Command):
|
|
7
|
+
COMMAND = 'k'
|
|
8
|
+
|
|
9
|
+
# the singleton pattern
|
|
10
|
+
def __new__(cls, *args, **kwargs):
|
|
11
|
+
if not hasattr(cls, 'instance'): cls.instance = super(Kubectl, cls).__new__(cls)
|
|
12
|
+
|
|
13
|
+
return cls.instance
|
|
14
|
+
|
|
15
|
+
def __init__(self, successor: Command=None):
|
|
16
|
+
super().__init__(successor)
|
|
17
|
+
|
|
18
|
+
def command(self):
|
|
19
|
+
return Kubectl.COMMAND
|
|
20
|
+
|
|
21
|
+
def required(self):
|
|
22
|
+
return RequiredState.NAMESPACE
|
|
23
|
+
|
|
24
|
+
def run(self, cmd: str, state: ReplState):
|
|
25
|
+
if not(args := self.args(cmd)):
|
|
26
|
+
return super().run(cmd, state)
|
|
27
|
+
|
|
28
|
+
state, args = self.apply_state(args, state)
|
|
29
|
+
if not self.validate_state(state):
|
|
30
|
+
return state
|
|
31
|
+
|
|
32
|
+
subprocess.run(["kubectl"] + args)
|
|
33
|
+
|
|
34
|
+
return state
|
|
35
|
+
|
|
36
|
+
def completion(self, state: ReplState):
|
|
37
|
+
return super().completion(state)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def help(self, _: ReplState):
|
|
41
|
+
return f'{Kubectl.COMMAND} \t run a kubectl command'
|
adam/commands/login.py
CHANGED
|
@@ -8,7 +8,7 @@ from adam.config import Config
|
|
|
8
8
|
from adam.sso.idp import Idp
|
|
9
9
|
from adam.sso.idp_login import IdpLogin
|
|
10
10
|
from adam.commands.command import Command
|
|
11
|
-
from adam.repl_state import ReplState
|
|
11
|
+
from adam.repl_state import ReplState, RequiredState
|
|
12
12
|
from adam.utils import log, log2
|
|
13
13
|
|
|
14
14
|
class Login(Command):
|
|
@@ -26,6 +26,9 @@ class Login(Command):
|
|
|
26
26
|
def command(self):
|
|
27
27
|
return Login.COMMAND
|
|
28
28
|
|
|
29
|
+
def required(self):
|
|
30
|
+
return ReplState.NON_L
|
|
31
|
+
|
|
29
32
|
def run(self, cmd: str, state: ReplState):
|
|
30
33
|
def custom_handler(signum, frame):
|
|
31
34
|
AppSession.ctrl_c_entered = True
|
|
@@ -59,8 +62,8 @@ class Login(Command):
|
|
|
59
62
|
|
|
60
63
|
return state
|
|
61
64
|
|
|
62
|
-
def completion(self,
|
|
63
|
-
return
|
|
65
|
+
def completion(self, state: ReplState):
|
|
66
|
+
return super().completion(state)
|
|
64
67
|
|
|
65
68
|
def help(self, _: ReplState):
|
|
66
69
|
return f'{Login.COMMAND}\t SSO login'
|
adam/commands/logs.py
CHANGED
adam/commands/ls.py
CHANGED
|
@@ -1,20 +1,21 @@
|
|
|
1
1
|
import copy
|
|
2
2
|
|
|
3
|
+
from adam.commands.bash.bash import Bash
|
|
3
4
|
from adam.commands.command import Command
|
|
4
5
|
from adam.commands.commands_utils import show_pods, show_rollout
|
|
5
|
-
from adam.commands.cql.cqlsh import Cqlsh
|
|
6
6
|
from adam.commands.postgres.postgres_utils import pg_database_names, pg_table_names
|
|
7
|
-
from adam.commands.postgres.
|
|
7
|
+
from adam.commands.postgres.postgres_context import PostgresContext
|
|
8
8
|
from adam.config import Config
|
|
9
|
+
from adam.utils_athena import Athena
|
|
10
|
+
from adam.utils_k8s.app_pods import AppPods
|
|
9
11
|
from adam.utils_k8s.custom_resources import CustomResources
|
|
10
12
|
from adam.utils_k8s.ingresses import Ingresses
|
|
11
13
|
from adam.utils_k8s.kube_context import KubeContext
|
|
12
14
|
from adam.utils_k8s.statefulsets import StatefulSets
|
|
13
|
-
from adam.pod_exec_result import PodExecResult
|
|
14
15
|
from adam.repl_state import ReplState
|
|
15
16
|
from adam.utils import lines_to_tabular, log, log2
|
|
16
17
|
from adam.apps import Apps
|
|
17
|
-
from adam.
|
|
18
|
+
from adam.utils_audits import Audits
|
|
18
19
|
|
|
19
20
|
class Ls(Command):
|
|
20
21
|
COMMAND = 'ls'
|
|
@@ -45,7 +46,7 @@ class Ls(Command):
|
|
|
45
46
|
|
|
46
47
|
if state.device == ReplState.P:
|
|
47
48
|
if state.pg_path:
|
|
48
|
-
pg =
|
|
49
|
+
pg: PostgresContext = PostgresContext.apply(state.namespace, state.pg_path)
|
|
49
50
|
if pg.db:
|
|
50
51
|
self.show_pg_tables(pg)
|
|
51
52
|
else:
|
|
@@ -53,7 +54,13 @@ class Ls(Command):
|
|
|
53
54
|
else:
|
|
54
55
|
self.show_pg_hosts(state)
|
|
55
56
|
elif state.device == ReplState.A:
|
|
56
|
-
if state.
|
|
57
|
+
if state.app_pod:
|
|
58
|
+
return Bash().run('bash ' + cmd, state)
|
|
59
|
+
elif state.app_app:
|
|
60
|
+
pods = AppPods.pod_names(state.namespace, state.app_env, state.app_app)
|
|
61
|
+
|
|
62
|
+
log(lines_to_tabular(pods, 'POD_NAME'))
|
|
63
|
+
elif state.app_env:
|
|
57
64
|
def line(n: str, ns: str):
|
|
58
65
|
host = Ingresses.get_host(Config().get('app.login.ingress', '{app_id}-k8singr-appleader-001').replace('{app_id}', f'{ns}-{n}'), ns)
|
|
59
66
|
if not host:
|
|
@@ -76,10 +83,7 @@ class Ls(Command):
|
|
|
76
83
|
self.show_audit_log_tables()
|
|
77
84
|
else:
|
|
78
85
|
if state.pod:
|
|
79
|
-
|
|
80
|
-
if r.stderr:
|
|
81
|
-
log(r.stderr)
|
|
82
|
-
log(r.stdout)
|
|
86
|
+
return Bash().run('bash ' + cmd, state)
|
|
83
87
|
elif state.sts and state.namespace:
|
|
84
88
|
show_pods(StatefulSets.pods(state.sts, state.namespace), state.namespace, show_namespace=not KubeContext.in_cluster_namespace())
|
|
85
89
|
show_rollout(state.sts, state.namespace)
|
|
@@ -91,7 +95,7 @@ class Ls(Command):
|
|
|
91
95
|
def show_statefulsets(self):
|
|
92
96
|
ss = StatefulSets.list_sts_names()
|
|
93
97
|
if len(ss) == 0:
|
|
94
|
-
log2('No
|
|
98
|
+
log2('No Cassandra clusters found.')
|
|
95
99
|
return
|
|
96
100
|
|
|
97
101
|
app_ids = CustomResources.get_app_ids()
|
|
@@ -110,37 +114,45 @@ class Ls(Command):
|
|
|
110
114
|
|
|
111
115
|
def show_pg_hosts(self, state: ReplState):
|
|
112
116
|
if state.namespace:
|
|
113
|
-
def line(pg:
|
|
114
|
-
return f'{pg.
|
|
117
|
+
def line(pg: PostgresContext):
|
|
118
|
+
return f'{pg.path()},{pg.endpoint()}:{pg.port()},{pg.username()},{pg.password()}'
|
|
115
119
|
|
|
116
|
-
lines = [line(
|
|
120
|
+
lines = [line(PostgresContext.apply(state.namespace, pg)) for pg in PostgresContext.hosts(state.namespace)]
|
|
117
121
|
|
|
118
122
|
log(lines_to_tabular(lines, 'NAME,ENDPOINT,USERNAME,PASSWORD', separator=','))
|
|
119
123
|
else:
|
|
120
|
-
def line(pg:
|
|
121
|
-
return f'{pg.
|
|
124
|
+
def line(pg: PostgresContext):
|
|
125
|
+
return f'{pg.path()},{pg.namespace},{pg.endpoint()}:{pg.port()},{pg.username()},{pg.password()}'
|
|
122
126
|
|
|
123
|
-
lines = [line(
|
|
127
|
+
lines = [line(PostgresContext.apply(state.namespace, pg)) for pg in PostgresContext.hosts(state.namespace)]
|
|
124
128
|
|
|
125
129
|
log(lines_to_tabular(lines, 'NAME,NAMESPACE,ENDPOINT,USERNAME,PASSWORD', separator=','))
|
|
126
130
|
|
|
127
|
-
def show_pg_databases(self, pg:
|
|
128
|
-
log(lines_to_tabular(pg_database_names(pg.namespace, pg.
|
|
131
|
+
def show_pg_databases(self, pg: PostgresContext):
|
|
132
|
+
log(lines_to_tabular(pg_database_names(pg.namespace, pg.path()), 'DATABASE', separator=','))
|
|
129
133
|
|
|
130
|
-
def show_pg_tables(self, pg:
|
|
131
|
-
log(lines_to_tabular(pg_table_names(pg.namespace, pg.
|
|
134
|
+
def show_pg_tables(self, pg: PostgresContext):
|
|
135
|
+
log(lines_to_tabular(pg_table_names(pg.namespace, pg.path()), 'NAME', separator=','))
|
|
132
136
|
|
|
133
137
|
def show_audit_log_tables(self):
|
|
134
|
-
log(lines_to_tabular(
|
|
138
|
+
log(lines_to_tabular(Athena.table_names(), 'NAME', separator=','))
|
|
135
139
|
|
|
136
140
|
def completion(self, state: ReplState):
|
|
137
|
-
if state.
|
|
138
|
-
|
|
141
|
+
if state.device == ReplState.C:
|
|
142
|
+
def pod_names():
|
|
143
|
+
return [p for p in StatefulSets.pod_names(state.sts, state.namespace)]
|
|
144
|
+
|
|
145
|
+
if state.sts:
|
|
146
|
+
return super().completion(state) | {f'@{p}': {'ls': None} for p in pod_names()}
|
|
147
|
+
else:
|
|
148
|
+
return {Ls.COMMAND: {n: None for n in StatefulSets.list_sts_names()}}
|
|
149
|
+
elif state.device == ReplState.A and state.app_app:
|
|
150
|
+
def pod_names():
|
|
151
|
+
return [p for p in AppPods.pod_names(state.namespace, state.app_env, state.app_app)]
|
|
139
152
|
|
|
140
|
-
|
|
141
|
-
return {Ls.COMMAND: {n: None for n in StatefulSets.list_sts_names()}}
|
|
153
|
+
return super().completion(state) | {f'@{p}': {'ls': None} for p in pod_names()}
|
|
142
154
|
|
|
143
|
-
return
|
|
155
|
+
return super().completion(state)
|
|
144
156
|
|
|
145
157
|
def help(self, _: ReplState):
|
|
146
158
|
return f'{Ls.COMMAND} [device:]\t list apps, envs, clusters, nodes, pg hosts or pg databases'
|
adam/commands/nodetool.py
CHANGED
|
@@ -8,6 +8,7 @@ from adam.utils_k8s.cassandra_clusters import CassandraClusters
|
|
|
8
8
|
from adam.utils_k8s.cassandra_nodes import CassandraNodes
|
|
9
9
|
from adam.repl_state import ReplState, RequiredState
|
|
10
10
|
from adam.utils import log
|
|
11
|
+
from adam.utils_k8s.statefulsets import StatefulSets
|
|
11
12
|
|
|
12
13
|
class NodeTool(Command):
|
|
13
14
|
COMMAND = 'nodetool'
|
|
@@ -47,12 +48,14 @@ class NodeTool(Command):
|
|
|
47
48
|
|
|
48
49
|
def completion(self, state: ReplState):
|
|
49
50
|
if state.pod or state.sts:
|
|
50
|
-
|
|
51
|
+
d = {c: {'&': None} for c in NODETOOL_COMMANDS}
|
|
52
|
+
return {NodeTool.COMMAND: {'help': None} | d} | \
|
|
53
|
+
{f'@{p}': {NodeTool.COMMAND: d} for p in StatefulSets.pod_names(state.sts, state.namespace)}
|
|
51
54
|
|
|
52
55
|
return {}
|
|
53
56
|
|
|
54
57
|
def help(self, _: ReplState):
|
|
55
|
-
return f'{NodeTool.COMMAND} <sub-command
|
|
58
|
+
return f'{NodeTool.COMMAND} <sub-command> [&]\t run nodetool with arguments'
|
|
56
59
|
|
|
57
60
|
class NodeToolCommandHelper(click.Command):
|
|
58
61
|
def get_help(self, ctx: click.Context):
|
|
@@ -5,7 +5,7 @@ from adam.commands.postgres.psql_completions import psql_completions
|
|
|
5
5
|
from adam.commands.postgres.postgres_utils import pg_table_names
|
|
6
6
|
from .postgres_ls import PostgresLs
|
|
7
7
|
from .postgres_preview import PostgresPreview
|
|
8
|
-
from .
|
|
8
|
+
from .postgres_context import PostgresContext
|
|
9
9
|
from adam.repl_state import ReplState
|
|
10
10
|
from adam.utils import log, log2
|
|
11
11
|
|
|
@@ -62,7 +62,7 @@ class Postgres(Command):
|
|
|
62
62
|
|
|
63
63
|
return state
|
|
64
64
|
|
|
65
|
-
|
|
65
|
+
PostgresContext.apply(state.namespace, state.pg_path).run_sql(' '.join(args))
|
|
66
66
|
|
|
67
67
|
def completion(self, state: ReplState):
|
|
68
68
|
if state.device != state.P:
|
|
@@ -70,7 +70,7 @@ class Postgres(Command):
|
|
|
70
70
|
return {}
|
|
71
71
|
|
|
72
72
|
leaf = {}
|
|
73
|
-
session =
|
|
73
|
+
session = PostgresContext.apply(state.namespace, state.pg_path)
|
|
74
74
|
if session.db:
|
|
75
75
|
if pg_table_names(state.namespace, state.pg_path):
|
|
76
76
|
leaf = psql_completions(state.namespace, state.pg_path)
|
|
@@ -86,7 +86,7 @@ class Postgres(Command):
|
|
|
86
86
|
return {}
|
|
87
87
|
|
|
88
88
|
def help(self, _: ReplState):
|
|
89
|
-
return f'
|
|
89
|
+
return f'<sql-statements>\t run queries on Postgres databases'
|
|
90
90
|
|
|
91
91
|
class PostgresCommandHelper(click.Command):
|
|
92
92
|
def get_help(self, ctx: click.Context):
|
|
@@ -8,7 +8,28 @@ from adam.utils_k8s.pods import Pods
|
|
|
8
8
|
from adam.utils_k8s.secrets import Secrets
|
|
9
9
|
from adam.utils import log2
|
|
10
10
|
|
|
11
|
-
class
|
|
11
|
+
class PostgresContext:
|
|
12
|
+
def apply(namespace: str, path: str, arg: str = None) -> 'PostgresContext':
|
|
13
|
+
context = PostgresContext(namespace, path)
|
|
14
|
+
|
|
15
|
+
if arg:
|
|
16
|
+
if arg == '..':
|
|
17
|
+
if context.db:
|
|
18
|
+
context.db = None
|
|
19
|
+
else:
|
|
20
|
+
context.host = None
|
|
21
|
+
else:
|
|
22
|
+
tks = arg.split('@')
|
|
23
|
+
if not context.host:
|
|
24
|
+
context.host = tks[0]
|
|
25
|
+
else:
|
|
26
|
+
context.db = tks[0]
|
|
27
|
+
|
|
28
|
+
if not namespace and tks[1]:
|
|
29
|
+
context.namespace = tks[1]
|
|
30
|
+
|
|
31
|
+
return context
|
|
32
|
+
|
|
12
33
|
def __init__(self, ns: str, path: str):
|
|
13
34
|
self.namespace = ns
|
|
14
35
|
self.conn_details = None
|
|
@@ -25,29 +46,7 @@ class PostgresSession:
|
|
|
25
46
|
if len(tks) > 1:
|
|
26
47
|
self.db = tks[1]
|
|
27
48
|
|
|
28
|
-
def
|
|
29
|
-
if arg:
|
|
30
|
-
tks = arg.split('@')
|
|
31
|
-
if len(tks) > 1:
|
|
32
|
-
return tks[1]
|
|
33
|
-
|
|
34
|
-
return None
|
|
35
|
-
|
|
36
|
-
def directory(self, arg: str = None):
|
|
37
|
-
if arg:
|
|
38
|
-
if arg == '..':
|
|
39
|
-
if self.db:
|
|
40
|
-
self.db = None
|
|
41
|
-
else:
|
|
42
|
-
self.host = None
|
|
43
|
-
else:
|
|
44
|
-
tks = arg.split('@')
|
|
45
|
-
arg = tks[0]
|
|
46
|
-
if not self.host:
|
|
47
|
-
self.host = arg
|
|
48
|
-
else:
|
|
49
|
-
self.db = arg
|
|
50
|
-
|
|
49
|
+
def path(self):
|
|
51
50
|
if not self.host:
|
|
52
51
|
return None
|
|
53
52
|
|
|
@@ -58,7 +57,7 @@ class PostgresSession:
|
|
|
58
57
|
return f'{self.host}/{self.db}'
|
|
59
58
|
|
|
60
59
|
def hosts(ns: str):
|
|
61
|
-
return
|
|
60
|
+
return PostgresContext.hosts_for_namespace(ns)
|
|
62
61
|
|
|
63
62
|
@functools.lru_cache()
|
|
64
63
|
def hosts_for_namespace(ns: str):
|
|
@@ -133,7 +132,7 @@ class PostgresSession:
|
|
|
133
132
|
return dbs
|
|
134
133
|
|
|
135
134
|
def run_sql(self, sql: str, show_out = True):
|
|
136
|
-
db = self.db if self.db else
|
|
135
|
+
db = self.db if self.db else PostgresContext.default_db()
|
|
137
136
|
|
|
138
137
|
if KubeContext.in_cluster():
|
|
139
138
|
cmd1 = f'env PGPASSWORD={self.password()} psql -h {self.endpoint()} -p {self.port()} -U {self.username()} {db} --pset pager=off -c'
|
|
@@ -151,7 +150,7 @@ class PostgresSession:
|
|
|
151
150
|
pod_name = Config().get('pg.agent.name', 'ops-pg-agent')
|
|
152
151
|
|
|
153
152
|
if Config().get('pg.agent.just-in-time', False):
|
|
154
|
-
if not
|
|
153
|
+
if not PostgresContext.deploy_pg_agent(pod_name, ns):
|
|
155
154
|
return
|
|
156
155
|
|
|
157
156
|
real_pod_name = pod_name
|