kaqing 2.0.93__py3-none-any.whl → 2.0.115__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaqing might be problematic. Click here for more details.

Files changed (134) hide show
  1. adam/apps.py +2 -2
  2. adam/batch.py +2 -16
  3. adam/checks/check_utils.py +4 -4
  4. adam/checks/compactionstats.py +1 -1
  5. adam/checks/cpu.py +2 -2
  6. adam/checks/disk.py +1 -1
  7. adam/checks/gossip.py +1 -1
  8. adam/checks/memory.py +3 -3
  9. adam/checks/status.py +1 -1
  10. adam/commands/alter_tables.py +3 -14
  11. adam/commands/app.py +3 -3
  12. adam/commands/app_ping.py +2 -2
  13. adam/commands/audit/audit.py +26 -11
  14. adam/commands/audit/audit_repair_tables.py +39 -4
  15. adam/commands/audit/audit_run.py +58 -0
  16. adam/commands/audit/show_last10.py +51 -0
  17. adam/commands/audit/show_slow10.py +50 -0
  18. adam/commands/audit/show_top10.py +49 -0
  19. adam/commands/audit/utils_show_top10.py +59 -0
  20. adam/commands/bash/bash.py +124 -0
  21. adam/commands/bash/bash_completer.py +93 -0
  22. adam/commands/cat.py +55 -0
  23. adam/commands/cd.py +26 -14
  24. adam/commands/check.py +6 -0
  25. adam/commands/cli_commands.py +3 -3
  26. adam/commands/code.py +60 -0
  27. adam/commands/command.py +9 -4
  28. adam/commands/commands_utils.py +4 -5
  29. adam/commands/cql/cql_completions.py +7 -3
  30. adam/commands/cql/cql_utils.py +103 -11
  31. adam/commands/cql/cqlsh.py +10 -5
  32. adam/commands/deploy/code_utils.py +2 -2
  33. adam/commands/deploy/deploy.py +7 -1
  34. adam/commands/deploy/deploy_pg_agent.py +2 -2
  35. adam/commands/deploy/deploy_pod.py +6 -6
  36. adam/commands/deploy/deploy_utils.py +2 -2
  37. adam/commands/deploy/undeploy.py +7 -1
  38. adam/commands/deploy/undeploy_pg_agent.py +2 -2
  39. adam/commands/deploy/undeploy_pod.py +4 -4
  40. adam/commands/devices.py +29 -0
  41. adam/commands/export/export.py +60 -0
  42. adam/commands/export/export_on_x.py +76 -0
  43. adam/commands/export/export_rmdbs.py +65 -0
  44. adam/commands/export/export_select.py +68 -0
  45. adam/commands/export/export_use.py +56 -0
  46. adam/commands/export/utils_export.py +253 -0
  47. adam/commands/help.py +9 -5
  48. adam/commands/issues.py +6 -0
  49. adam/commands/kubectl.py +41 -0
  50. adam/commands/login.py +6 -3
  51. adam/commands/logs.py +2 -1
  52. adam/commands/ls.py +43 -31
  53. adam/commands/medusa/medusa_backup.py +2 -2
  54. adam/commands/medusa/medusa_restore.py +2 -2
  55. adam/commands/medusa/medusa_show_backupjobs.py +3 -2
  56. adam/commands/medusa/medusa_show_restorejobs.py +2 -2
  57. adam/commands/nodetool.py +11 -16
  58. adam/commands/postgres/postgres.py +4 -4
  59. adam/commands/postgres/{postgres_session.py → postgres_context.py} +29 -30
  60. adam/commands/postgres/postgres_utils.py +5 -5
  61. adam/commands/postgres/psql_completions.py +1 -1
  62. adam/commands/preview_table.py +18 -32
  63. adam/commands/pwd.py +4 -3
  64. adam/commands/reaper/reaper.py +3 -0
  65. adam/commands/reaper/reaper_restart.py +1 -1
  66. adam/commands/reaper/reaper_session.py +1 -1
  67. adam/commands/repair/repair.py +3 -3
  68. adam/commands/repair/repair_log.py +1 -1
  69. adam/commands/repair/repair_run.py +2 -2
  70. adam/commands/repair/repair_scan.py +1 -1
  71. adam/commands/repair/repair_stop.py +1 -1
  72. adam/commands/report.py +6 -0
  73. adam/commands/restart.py +2 -2
  74. adam/commands/rollout.py +1 -1
  75. adam/commands/show/show.py +3 -1
  76. adam/commands/show/show_app_actions.py +3 -0
  77. adam/commands/show/show_app_id.py +1 -1
  78. adam/commands/show/show_app_queues.py +3 -2
  79. adam/commands/show/show_cassandra_status.py +3 -3
  80. adam/commands/show/show_cassandra_version.py +3 -3
  81. adam/commands/show/show_login.py +3 -0
  82. adam/commands/show/show_processes.py +1 -1
  83. adam/commands/show/show_repairs.py +2 -2
  84. adam/commands/show/show_storage.py +1 -1
  85. adam/commands/watch.py +1 -1
  86. adam/config.py +2 -1
  87. adam/embedded_params.py +1 -1
  88. adam/pod_exec_result.py +7 -1
  89. adam/repl.py +125 -99
  90. adam/repl_commands.py +29 -17
  91. adam/repl_state.py +229 -49
  92. adam/sql/sql_completer.py +86 -62
  93. adam/sql/sql_state_machine.py +563 -0
  94. adam/sql/term_completer.py +3 -0
  95. adam/sso/cred_cache.py +1 -1
  96. adam/sso/idp.py +1 -1
  97. adam/utils_athena.py +108 -74
  98. adam/utils_audits.py +104 -0
  99. adam/utils_export.py +42 -0
  100. adam/utils_k8s/__init__.py +0 -0
  101. adam/utils_k8s/app_clusters.py +33 -0
  102. adam/utils_k8s/app_pods.py +31 -0
  103. adam/{k8s_utils → utils_k8s}/cassandra_clusters.py +5 -6
  104. adam/{k8s_utils → utils_k8s}/cassandra_nodes.py +11 -4
  105. adam/{k8s_utils → utils_k8s}/deployment.py +2 -2
  106. adam/{k8s_utils → utils_k8s}/pods.py +54 -11
  107. adam/{k8s_utils → utils_k8s}/statefulsets.py +2 -2
  108. adam/version.py +1 -1
  109. {kaqing-2.0.93.dist-info → kaqing-2.0.115.dist-info}/METADATA +1 -1
  110. kaqing-2.0.115.dist-info/RECORD +203 -0
  111. adam/commands/bash.py +0 -91
  112. adam/commands/cql/cql_table_completer.py +0 -8
  113. adam/commands/describe/describe.py +0 -46
  114. adam/commands/describe/describe_keyspace.py +0 -60
  115. adam/commands/describe/describe_keyspaces.py +0 -50
  116. adam/commands/describe/describe_table.py +0 -60
  117. adam/commands/describe/describe_tables.py +0 -50
  118. adam/commands/postgres/psql_table_completer.py +0 -11
  119. adam/sql/state_machine.py +0 -460
  120. kaqing-2.0.93.dist-info/RECORD +0 -190
  121. /adam/commands/{describe → bash}/__init__.py +0 -0
  122. /adam/{k8s_utils → commands/export}/__init__.py +0 -0
  123. /adam/{k8s_utils → utils_k8s}/config_maps.py +0 -0
  124. /adam/{k8s_utils → utils_k8s}/custom_resources.py +0 -0
  125. /adam/{k8s_utils → utils_k8s}/ingresses.py +0 -0
  126. /adam/{k8s_utils → utils_k8s}/jobs.py +0 -0
  127. /adam/{k8s_utils → utils_k8s}/kube_context.py +0 -0
  128. /adam/{k8s_utils → utils_k8s}/secrets.py +0 -0
  129. /adam/{k8s_utils → utils_k8s}/service_accounts.py +0 -0
  130. /adam/{k8s_utils → utils_k8s}/services.py +0 -0
  131. /adam/{k8s_utils → utils_k8s}/volumes.py +0 -0
  132. {kaqing-2.0.93.dist-info → kaqing-2.0.115.dist-info}/WHEEL +0 -0
  133. {kaqing-2.0.93.dist-info → kaqing-2.0.115.dist-info}/entry_points.txt +0 -0
  134. {kaqing-2.0.93.dist-info → kaqing-2.0.115.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,253 @@
1
+ from concurrent.futures import ThreadPoolExecutor, as_completed
2
+ from datetime import datetime
3
+ import io
4
+ import re
5
+ import time
6
+ from typing import Callable
7
+ import boto3
8
+
9
+ from adam.commands.cql.cql_utils import cassandra_table_names, run_cql, table_spec
10
+ from adam.config import Config
11
+ from adam.repl_state import ReplState
12
+ from adam.utils import elapsed_time, log2
13
+ from adam.utils_athena import Athena
14
+ from adam.utils_k8s.cassandra_nodes import CassandraNodes
15
+ from adam.utils_k8s.pods import Pods
16
+
17
+ def export_tables(args: list[str], state: ReplState, max_workers = 0):
18
+ consistency = None
19
+ specs = None
20
+
21
+ if args:
22
+ consistency, specs = ExportSpec.parse_multiple(' '.join(args))
23
+
24
+ if not specs:
25
+ specs = [ExportSpec(t) for t in cassandra_table_names(state, keyspace=f'{state.namespace}_db')]
26
+
27
+ if not max_workers:
28
+ max_workers = Config().action_workers('export', 8)
29
+
30
+ if max_workers > 1 and len(specs) > 1:
31
+ log2(f'Executing on {len(specs)} Cassandra tables in parallel...')
32
+ start_time = time.time()
33
+ try:
34
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
35
+ futures = [executor.submit(export_table, spec, state, True, consistency=consistency) for spec in specs]
36
+ if len(futures) == 0:
37
+ return []
38
+
39
+ return [future.result() for future in as_completed(futures)]
40
+ finally:
41
+ log2(f"{len(specs)} parallel table export elapsed time: {elapsed_time(start_time)} with {max_workers} workers")
42
+ else:
43
+ return [export_table(spec, state, multi_tables=len(specs) > 1, consistency=consistency) for spec in specs]
44
+
45
+ class ExportSpec:
46
+ def __init__(self, table: str, columns: str = None, target_table: str = None):
47
+ self.table = table
48
+ self.columns = columns
49
+ self.target_table = target_table
50
+
51
+ def parse(spec_str: str) -> 'ExportSpec':
52
+ target = None
53
+
54
+ p = re.compile(r"(.*?)\s+as\s+(.*)", re.IGNORECASE)
55
+ match = p.match(spec_str)
56
+ if match:
57
+ spec_str = match.group(1)
58
+ target = match.group(2)
59
+
60
+ table = spec_str
61
+ columns = None
62
+
63
+ p = re.compile('(.*?)\((.*)\)')
64
+ match = p.match(spec_str)
65
+ if match:
66
+ table = match.group(1)
67
+ columns = match.group(2)
68
+
69
+ return ExportSpec(table, columns, target)
70
+
71
+ def __eq__(self, other):
72
+ if isinstance(other, ExportSpec):
73
+ return self.table == other.table and self.columns == other.columns and self.target_table == other.target_table
74
+
75
+ return False
76
+
77
+ def __str__(self):
78
+ return f'{self.table}, {self.columns}, {self.target_table}'
79
+
80
+ def parse_multiple(spec_str: str) -> tuple[str, list['ExportSpec']]:
81
+ consistency = None
82
+
83
+ p = re.compile(r"(.*?)with\s+consistency\s+(.*)", re.IGNORECASE)
84
+ match = p.match(spec_str)
85
+ if match:
86
+ spec_str = match.group(1).strip(' ')
87
+ consistency = match.group(2)
88
+
89
+ if spec_str:
90
+ p = r",\s*(?![^()]*\))"
91
+ specs = re.split(p, spec_str)
92
+
93
+ return consistency, [ExportSpec.parse(spec) for spec in specs]
94
+
95
+ return consistency, []
96
+
97
+ def export_table(spec: ExportSpec, state: ReplState, multi_tables = True, consistency: str = None):
98
+ table = spec.table
99
+ columns = spec.columns
100
+ if not columns:
101
+ columns = Config().get('export.columns', f'<keys>')
102
+
103
+ if columns == '<keys>':
104
+ columns = ','.join(table_spec(state, table, on_any=True).keys())
105
+ elif columns == '<row-key>':
106
+ columns = table_spec(state, table, on_any=True).row_key()
107
+ elif columns == '*':
108
+ columns = ','.join([c.name for c in table_spec(state, table, on_any=True).columns])
109
+
110
+ if not columns:
111
+ log2(f'ERROR: Empty columns on {table}.')
112
+ return table
113
+
114
+ athena_table = spec.target_table if spec.target_table else table
115
+ if '.' in athena_table:
116
+ athena_table = athena_table.split('.')[-1]
117
+
118
+ temp_dir = Config().get('export.temp_dir', '/c3/cassandra/tmp')
119
+ session = state.export_session
120
+ create_db = not session
121
+ if create_db:
122
+ session = datetime.now().strftime("%Y%m%d%H%M%S")
123
+ state.export_session = session
124
+ db = f'export_{session}'
125
+
126
+ CassandraNodes.exec(state.pod, state.namespace, f'mkdir -p {temp_dir}/{session}', show_out=not multi_tables, shell='bash')
127
+ csv_file = f'{temp_dir}/{session}/{table}.csv'
128
+ succeeded = False
129
+ try:
130
+ suppress_ing_log = Config().is_debug() or multi_tables
131
+ queries = []
132
+ if consistency:
133
+ queries.append(f'CONSISTENCY {consistency}')
134
+ queries.append(f"COPY {table}({columns}) TO '{csv_file}' WITH HEADER = TRUE")
135
+ ing(f'Dumping table {table}{f" with consistency {consistency}" if consistency else ""}',
136
+ lambda: run_cql(state, ';'.join(queries), show_out=False),
137
+ suppress_log=suppress_ing_log)
138
+
139
+ def upload_to_s3():
140
+ bytes = Pods.read_file(state.pod, 'cassandra', state.namespace, csv_file)
141
+
142
+ s3 = boto3.client('s3')
143
+ s3.upload_fileobj(GeneratorStream(bytes), 'c3.ops--qing', f'export/{session}/{athena_table}/{table}.csv')
144
+
145
+ ing(f'Uploading to S3', upload_to_s3, suppress_log=suppress_ing_log)
146
+
147
+ def create_schema():
148
+ query = f'CREATE DATABASE IF NOT EXISTS {db};'
149
+ if Config().is_debug():
150
+ log2(query)
151
+ Athena.query(query, 'default')
152
+
153
+ query = f'DROP TABLE IF EXISTS {athena_table};'
154
+ if Config().is_debug():
155
+ log2(query)
156
+ Athena.query(query, db)
157
+
158
+ # columns = ', '.join([f'{h.strip(" ")} string' for h in header[0].split(',')])
159
+ athena_columns = ', '.join([f'{c} string' for c in columns.split(',')])
160
+ query = f'CREATE EXTERNAL TABLE IF NOT EXISTS {athena_table}(\n' + \
161
+ f' {athena_columns})\n' + \
162
+ "ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde'\n" + \
163
+ 'WITH SERDEPROPERTIES (\n' + \
164
+ ' "separatorChar" = ",",\n' + \
165
+ ' "quoteChar" = "\\"")\n' + \
166
+ f"LOCATION 's3://c3.ops--qing/export/{session}/{athena_table}'\n" + \
167
+ 'TBLPROPERTIES ("skip.header.line.count"="1");'
168
+ if Config().is_debug():
169
+ log2(query)
170
+ try:
171
+ Athena.query(query, db)
172
+ except Exception as e:
173
+ log2(f'*** Failed query:\n{query}')
174
+ raise e
175
+
176
+ ing(f"Creating database {db}" if create_db else f"Creating table {athena_table}", create_schema, suppress_log=suppress_ing_log)
177
+
178
+ succeeded = True
179
+ except Exception as e:
180
+ log2(e)
181
+ finally:
182
+ ing('Cleaning up temporary files',
183
+ lambda: CassandraNodes.exec(state.pod, state.namespace, f'rm -rf {csv_file}', show_out=False, shell='bash'),
184
+ suppress_log=suppress_ing_log)
185
+
186
+ if succeeded:
187
+ Athena.clear_cache()
188
+
189
+ if not suppress_ing_log:
190
+ query = f'select * from {athena_table} limit 10'
191
+ log2(query)
192
+ Athena.run_query(query, db)
193
+
194
+ return table
195
+
196
+ def ing(msg: str, body: Callable[[], None], suppress_log=False):
197
+ if not suppress_log:
198
+ log2(f'{msg}...', nl=False)
199
+ body()
200
+ if not suppress_log:
201
+ log2(' OK')
202
+
203
+ class GeneratorStream(io.RawIOBase):
204
+ def __init__(self, generator):
205
+ self._generator = generator
206
+ self._buffer = b'' # Buffer to store leftover bytes from generator yields
207
+
208
+ def readable(self):
209
+ return True
210
+
211
+ def _read_from_generator(self):
212
+ try:
213
+ chunk = next(self._generator)
214
+ if isinstance(chunk, str):
215
+ chunk = chunk.encode('utf-8') # Encode if generator yields strings
216
+ self._buffer += chunk
217
+ except StopIteration:
218
+ pass # Generator exhausted
219
+
220
+ def readinto(self, b):
221
+ # Fill the buffer if necessary
222
+ while len(self._buffer) < len(b):
223
+ old_buffer_len = len(self._buffer)
224
+ self._read_from_generator()
225
+ if len(self._buffer) == old_buffer_len: # Generator exhausted and buffer empty
226
+ break
227
+
228
+ bytes_to_read = min(len(b), len(self._buffer))
229
+ b[:bytes_to_read] = self._buffer[:bytes_to_read]
230
+ self._buffer = self._buffer[bytes_to_read:]
231
+ return bytes_to_read
232
+
233
+ def read(self, size=-1):
234
+ if size == -1: # Read all remaining data
235
+ while True:
236
+ old_buffer_len = len(self._buffer)
237
+ self._read_from_generator()
238
+ if len(self._buffer) == old_buffer_len:
239
+ break
240
+ data = self._buffer
241
+ self._buffer = b''
242
+ return data
243
+ else:
244
+ # Ensure enough data in buffer
245
+ while len(self._buffer) < size:
246
+ old_buffer_len = len(self._buffer)
247
+ self._read_from_generator()
248
+ if len(self._buffer) == old_buffer_len:
249
+ break
250
+
251
+ data = self._buffer[:size]
252
+ self._buffer = self._buffer[size:]
253
+ return data
adam/commands/help.py CHANGED
@@ -30,14 +30,18 @@ class Help(Command):
30
30
  lines.append('NAVIGATION')
31
31
  lines.append(' a: | c: | l: | p:\t switch to another operational device: App, Cassandra, Audit or Postgres')
32
32
  lines.extend(section(ReplCommands.navigation()))
33
- lines.append('CHECK CASSANDRA')
34
- lines.extend(section(ReplCommands.cassandra_check()))
35
- lines.append('CASSANDRA OPERATIONS')
33
+ lines.append('CASSANDRA')
36
34
  lines.extend(section(ReplCommands.cassandra_ops()))
35
+ lines.append('POSTGRES')
36
+ lines.extend(section(ReplCommands.postgres_ops()))
37
+ lines.append('APP')
38
+ lines.extend(section(ReplCommands.app_ops()))
39
+ lines.append('EXPORT DB')
40
+ lines.extend(section(ReplCommands.export_ops()))
41
+ lines.append('AUDIT')
42
+ lines.extend(section(ReplCommands.audit_ops()))
37
43
  lines.append('TOOLS')
38
44
  lines.extend(section(ReplCommands.tools()))
39
- lines.append('APP')
40
- lines.extend(section(ReplCommands.app()))
41
45
  lines.append('')
42
46
  lines.extend(section(ReplCommands.exit()))
43
47
 
adam/commands/issues.py CHANGED
@@ -21,11 +21,17 @@ class Issues(Command):
21
21
  def command(self):
22
22
  return Issues.COMMAND
23
23
 
24
+ def required(self):
25
+ return ReplState.NON_L
26
+
24
27
  def run(self, cmd: str, state: ReplState):
25
28
  if not(args := self.args(cmd)):
26
29
  return super().run(cmd, state)
27
30
 
28
31
  state, args = self.apply_state(args, state)
32
+ if not self.validate_state(state):
33
+ return state
34
+
29
35
  args, show = Command.extract_options(args, ['-s', '--show'])
30
36
 
31
37
  results = run_checks(state.sts, state.namespace, state.pod, show_output=show)
@@ -0,0 +1,41 @@
1
+ import subprocess
2
+
3
+ from adam.commands.command import Command
4
+ from adam.repl_state import ReplState, RequiredState
5
+
6
+ class Kubectl(Command):
7
+ COMMAND = 'k'
8
+
9
+ # the singleton pattern
10
+ def __new__(cls, *args, **kwargs):
11
+ if not hasattr(cls, 'instance'): cls.instance = super(Kubectl, cls).__new__(cls)
12
+
13
+ return cls.instance
14
+
15
+ def __init__(self, successor: Command=None):
16
+ super().__init__(successor)
17
+
18
+ def command(self):
19
+ return Kubectl.COMMAND
20
+
21
+ def required(self):
22
+ return RequiredState.NAMESPACE
23
+
24
+ def run(self, cmd: str, state: ReplState):
25
+ if not(args := self.args(cmd)):
26
+ return super().run(cmd, state)
27
+
28
+ state, args = self.apply_state(args, state)
29
+ if not self.validate_state(state):
30
+ return state
31
+
32
+ subprocess.run(["kubectl"] + args)
33
+
34
+ return state
35
+
36
+ def completion(self, state: ReplState):
37
+ return super().completion(state)
38
+
39
+
40
+ def help(self, _: ReplState):
41
+ return f'{Kubectl.COMMAND} \t run a kubectl command'
adam/commands/login.py CHANGED
@@ -8,7 +8,7 @@ from adam.config import Config
8
8
  from adam.sso.idp import Idp
9
9
  from adam.sso.idp_login import IdpLogin
10
10
  from adam.commands.command import Command
11
- from adam.repl_state import ReplState
11
+ from adam.repl_state import ReplState, RequiredState
12
12
  from adam.utils import log, log2
13
13
 
14
14
  class Login(Command):
@@ -26,6 +26,9 @@ class Login(Command):
26
26
  def command(self):
27
27
  return Login.COMMAND
28
28
 
29
+ def required(self):
30
+ return ReplState.NON_L
31
+
29
32
  def run(self, cmd: str, state: ReplState):
30
33
  def custom_handler(signum, frame):
31
34
  AppSession.ctrl_c_entered = True
@@ -59,8 +62,8 @@ class Login(Command):
59
62
 
60
63
  return state
61
64
 
62
- def completion(self, _: ReplState):
63
- return {}
65
+ def completion(self, state: ReplState):
66
+ return super().completion(state)
64
67
 
65
68
  def help(self, _: ReplState):
66
69
  return f'{Login.COMMAND}\t SSO login'
adam/commands/logs.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from adam.commands.command import Command
2
2
  from adam.config import Config
3
- from adam.k8s_utils.cassandra_nodes import CassandraNodes
3
+ from adam.utils_k8s.cassandra_nodes import CassandraNodes
4
4
  from adam.repl_state import ReplState, RequiredState
5
5
 
6
6
  class Logs(Command):
@@ -33,6 +33,7 @@ class Logs(Command):
33
33
  return CassandraNodes.exec(state.pod, state.namespace, f'cat {path}')
34
34
 
35
35
  def completion(self, _: ReplState):
36
+ # available only on cli
36
37
  return {}
37
38
 
38
39
  def help(self, _: ReplState):
adam/commands/ls.py CHANGED
@@ -1,20 +1,21 @@
1
1
  import copy
2
2
 
3
+ from adam.commands.bash.bash import Bash
3
4
  from adam.commands.command import Command
4
5
  from adam.commands.commands_utils import show_pods, show_rollout
5
- from adam.commands.cql.cqlsh import Cqlsh
6
6
  from adam.commands.postgres.postgres_utils import pg_database_names, pg_table_names
7
- from adam.commands.postgres.postgres_session import PostgresSession
7
+ from adam.commands.postgres.postgres_context import PostgresContext
8
8
  from adam.config import Config
9
- from adam.k8s_utils.custom_resources import CustomResources
10
- from adam.k8s_utils.ingresses import Ingresses
11
- from adam.k8s_utils.kube_context import KubeContext
12
- from adam.k8s_utils.statefulsets import StatefulSets
13
- from adam.pod_exec_result import PodExecResult
9
+ from adam.utils_athena import Athena
10
+ from adam.utils_k8s.app_pods import AppPods
11
+ from adam.utils_k8s.custom_resources import CustomResources
12
+ from adam.utils_k8s.ingresses import Ingresses
13
+ from adam.utils_k8s.kube_context import KubeContext
14
+ from adam.utils_k8s.statefulsets import StatefulSets
14
15
  from adam.repl_state import ReplState
15
16
  from adam.utils import lines_to_tabular, log, log2
16
17
  from adam.apps import Apps
17
- from adam.utils_athena import audit_table_names
18
+ from adam.utils_audits import Audits
18
19
 
19
20
  class Ls(Command):
20
21
  COMMAND = 'ls'
@@ -45,7 +46,7 @@ class Ls(Command):
45
46
 
46
47
  if state.device == ReplState.P:
47
48
  if state.pg_path:
48
- pg = PostgresSession(state.namespace, state.pg_path)
49
+ pg: PostgresContext = PostgresContext.apply(state.namespace, state.pg_path)
49
50
  if pg.db:
50
51
  self.show_pg_tables(pg)
51
52
  else:
@@ -53,7 +54,13 @@ class Ls(Command):
53
54
  else:
54
55
  self.show_pg_hosts(state)
55
56
  elif state.device == ReplState.A:
56
- if state.app_env:
57
+ if state.app_pod:
58
+ return Bash().run('bash ' + cmd, state)
59
+ elif state.app_app:
60
+ pods = AppPods.pod_names(state.namespace, state.app_env, state.app_app)
61
+
62
+ log(lines_to_tabular(pods, 'POD_NAME'))
63
+ elif state.app_env:
57
64
  def line(n: str, ns: str):
58
65
  host = Ingresses.get_host(Config().get('app.login.ingress', '{app_id}-k8singr-appleader-001').replace('{app_id}', f'{ns}-{n}'), ns)
59
66
  if not host:
@@ -76,10 +83,7 @@ class Ls(Command):
76
83
  self.show_audit_log_tables()
77
84
  else:
78
85
  if state.pod:
79
- r: PodExecResult = Cqlsh().run(f'cql describe tables', state)
80
- if r.stderr:
81
- log(r.stderr)
82
- log(r.stdout)
86
+ return Bash().run('bash ' + cmd, state)
83
87
  elif state.sts and state.namespace:
84
88
  show_pods(StatefulSets.pods(state.sts, state.namespace), state.namespace, show_namespace=not KubeContext.in_cluster_namespace())
85
89
  show_rollout(state.sts, state.namespace)
@@ -91,7 +95,7 @@ class Ls(Command):
91
95
  def show_statefulsets(self):
92
96
  ss = StatefulSets.list_sts_names()
93
97
  if len(ss) == 0:
94
- log2('No cassandra statefulsets found.')
98
+ log2('No Cassandra clusters found.')
95
99
  return
96
100
 
97
101
  app_ids = CustomResources.get_app_ids()
@@ -110,37 +114,45 @@ class Ls(Command):
110
114
 
111
115
  def show_pg_hosts(self, state: ReplState):
112
116
  if state.namespace:
113
- def line(pg: PostgresSession):
114
- return f'{pg.directory()},{pg.endpoint()}:{pg.port()},{pg.username()},{pg.password()}'
117
+ def line(pg: PostgresContext):
118
+ return f'{pg.path()},{pg.endpoint()}:{pg.port()},{pg.username()},{pg.password()}'
115
119
 
116
- lines = [line(PostgresSession(state.namespace, pg)) for pg in PostgresSession.hosts(state.namespace)]
120
+ lines = [line(PostgresContext.apply(state.namespace, pg)) for pg in PostgresContext.hosts(state.namespace)]
117
121
 
118
122
  log(lines_to_tabular(lines, 'NAME,ENDPOINT,USERNAME,PASSWORD', separator=','))
119
123
  else:
120
- def line(pg: PostgresSession):
121
- return f'{pg.directory()},{pg.namespace},{pg.endpoint()}:{pg.port()},{pg.username()},{pg.password()}'
124
+ def line(pg: PostgresContext):
125
+ return f'{pg.path()},{pg.namespace},{pg.endpoint()}:{pg.port()},{pg.username()},{pg.password()}'
122
126
 
123
- lines = [line(PostgresSession(state.namespace, pg)) for pg in PostgresSession.hosts(state.namespace)]
127
+ lines = [line(PostgresContext.apply(state.namespace, pg)) for pg in PostgresContext.hosts(state.namespace)]
124
128
 
125
129
  log(lines_to_tabular(lines, 'NAME,NAMESPACE,ENDPOINT,USERNAME,PASSWORD', separator=','))
126
130
 
127
- def show_pg_databases(self, pg: PostgresSession):
128
- log(lines_to_tabular(pg_database_names(pg.namespace, pg.directory()), 'DATABASE', separator=','))
131
+ def show_pg_databases(self, pg: PostgresContext):
132
+ log(lines_to_tabular(pg_database_names(pg.namespace, pg.path()), 'DATABASE', separator=','))
129
133
 
130
- def show_pg_tables(self, pg: PostgresSession):
131
- log(lines_to_tabular(pg_table_names(pg.namespace, pg.directory()), 'NAME', separator=','))
134
+ def show_pg_tables(self, pg: PostgresContext):
135
+ log(lines_to_tabular(pg_table_names(pg.namespace, pg.path()), 'NAME', separator=','))
132
136
 
133
137
  def show_audit_log_tables(self):
134
- log(lines_to_tabular(audit_table_names(), 'NAME', separator=','))
138
+ log(lines_to_tabular(Athena.table_names(), 'NAME', separator=','))
135
139
 
136
140
  def completion(self, state: ReplState):
137
- if state.pod:
138
- return {}
141
+ if state.device == ReplState.C:
142
+ def pod_names():
143
+ return [p for p in StatefulSets.pod_names(state.sts, state.namespace)]
144
+
145
+ if state.sts:
146
+ return super().completion(state) | {f'@{p}': {'ls': None} for p in pod_names()}
147
+ else:
148
+ return {Ls.COMMAND: {n: None for n in StatefulSets.list_sts_names()}}
149
+ elif state.device == ReplState.A and state.app_app:
150
+ def pod_names():
151
+ return [p for p in AppPods.pod_names(state.namespace, state.app_env, state.app_app)]
139
152
 
140
- if not state.sts:
141
- return {Ls.COMMAND: {n: None for n in StatefulSets.list_sts_names()}}
153
+ return super().completion(state) | {f'@{p}': {'ls': None} for p in pod_names()}
142
154
 
143
- return {Ls.COMMAND: None}
155
+ return super().completion(state)
144
156
 
145
157
  def help(self, _: ReplState):
146
158
  return f'{Ls.COMMAND} [device:]\t list apps, envs, clusters, nodes, pg hosts or pg databases'
@@ -2,9 +2,9 @@ from datetime import datetime
2
2
  import re
3
3
 
4
4
  from adam.commands.command import Command
5
- from adam.k8s_utils.statefulsets import StatefulSets
5
+ from adam.utils_k8s.statefulsets import StatefulSets
6
6
  from adam.repl_state import ReplState, RequiredState
7
- from adam.k8s_utils.custom_resources import CustomResources
7
+ from adam.utils_k8s.custom_resources import CustomResources
8
8
  from adam.utils import log2
9
9
 
10
10
 
@@ -1,9 +1,9 @@
1
1
  from datetime import datetime
2
2
 
3
3
  from adam.commands.command import Command
4
- from adam.k8s_utils.statefulsets import StatefulSets
4
+ from adam.utils_k8s.statefulsets import StatefulSets
5
5
  from adam.repl_state import ReplState, RequiredState
6
- from adam.k8s_utils.custom_resources import CustomResources
6
+ from adam.utils_k8s.custom_resources import CustomResources
7
7
  from adam.config import Config
8
8
  from adam.utils import lines_to_tabular, log2
9
9
 
@@ -1,7 +1,7 @@
1
1
  from adam.commands.command import Command
2
- from adam.k8s_utils.statefulsets import StatefulSets
2
+ from adam.utils_k8s.statefulsets import StatefulSets
3
3
  from adam.repl_state import ReplState, RequiredState
4
- from adam.k8s_utils.custom_resources import CustomResources
4
+ from adam.utils_k8s.custom_resources import CustomResources
5
5
  from adam.utils import lines_to_tabular, log2
6
6
 
7
7
 
@@ -29,6 +29,7 @@ class MedusaShowBackupJobs(Command):
29
29
  state, args = self.apply_state(args, state)
30
30
  if not self.validate_state(state):
31
31
  return state
32
+
32
33
  ns = state.namespace
33
34
  dc = StatefulSets.get_datacenter(state.sts, ns)
34
35
  if not dc:
@@ -1,7 +1,7 @@
1
1
  from adam.commands.command import Command
2
- from adam.k8s_utils.statefulsets import StatefulSets
2
+ from adam.utils_k8s.statefulsets import StatefulSets
3
3
  from adam.repl_state import ReplState, RequiredState
4
- from adam.k8s_utils.custom_resources import CustomResources
4
+ from adam.utils_k8s.custom_resources import CustomResources
5
5
  from adam.utils import lines_to_tabular, log2
6
6
 
7
7
  class MedusaShowRestoreJobs(Command):
adam/commands/nodetool.py CHANGED
@@ -4,12 +4,11 @@ from adam.commands.command import Command
4
4
  from adam.commands.command_helpers import ClusterOrPodCommandHelper
5
5
  from adam.commands.nodetool_commands import NODETOOL_COMMANDS
6
6
  from adam.config import Config
7
- from adam.k8s_utils.cassandra_clusters import CassandraClusters
8
- from adam.k8s_utils.cassandra_nodes import CassandraNodes
9
- from adam.pod_exec_result import PodExecResult
10
- from adam.repl_session import ReplSession
7
+ from adam.utils_k8s.cassandra_clusters import CassandraClusters
8
+ from adam.utils_k8s.cassandra_nodes import CassandraNodes
11
9
  from adam.repl_state import ReplState, RequiredState
12
- from adam.utils import log, random_alphanumeric
10
+ from adam.utils import log
11
+ from adam.utils_k8s.statefulsets import StatefulSets
13
12
 
14
13
  class NodeTool(Command):
15
14
  COMMAND = 'nodetool'
@@ -41,26 +40,22 @@ class NodeTool(Command):
41
40
  command = f"nodetool -u {user} -pw {pw} {' '.join(args)}"
42
41
 
43
42
  if state.pod:
44
- results: PodExecResult = CassandraNodes.exec(state.pod, state.namespace, command, show_out=True)
45
- if results and results.log_file and Config().get('repl.history.push-cat-remote-log-file', True):
46
- ReplSession().prompt_session.history.append_string(f'bash cat {results.log_file}')
43
+ return CassandraNodes.exec(state.pod, state.namespace, command, show_out=True)
47
44
  elif state.sts:
48
- results: list[PodExecResult] = CassandraClusters.exec(state.sts, state.namespace, command, action='nodetool', show_out=True)
49
- if results and Config().get('repl.history.push-cat-remote-log-file', True):
50
- for result in results:
51
- if result.log_file:
52
- ReplSession().prompt_session.history.append_string(f'bash {result.pod} cat {result.log_file}')
45
+ return CassandraClusters.exec(state.sts, state.namespace, command, action='nodetool', show_out=True)
53
46
 
54
- return results
47
+ return state
55
48
 
56
49
  def completion(self, state: ReplState):
57
50
  if state.pod or state.sts:
58
- return {NodeTool.COMMAND: {'help': None} | {c: None for c in NODETOOL_COMMANDS}}
51
+ d = {c: {'&': None} for c in NODETOOL_COMMANDS}
52
+ return {NodeTool.COMMAND: {'help': None} | d} | \
53
+ {f'@{p}': {NodeTool.COMMAND: d} for p in StatefulSets.pod_names(state.sts, state.namespace)}
59
54
 
60
55
  return {}
61
56
 
62
57
  def help(self, _: ReplState):
63
- return f'{NodeTool.COMMAND} <sub-command>\t run nodetool with arguments'
58
+ return f'{NodeTool.COMMAND} <sub-command> [&]\t run nodetool with arguments'
64
59
 
65
60
  class NodeToolCommandHelper(click.Command):
66
61
  def get_help(self, ctx: click.Context):
@@ -5,7 +5,7 @@ from adam.commands.postgres.psql_completions import psql_completions
5
5
  from adam.commands.postgres.postgres_utils import pg_table_names
6
6
  from .postgres_ls import PostgresLs
7
7
  from .postgres_preview import PostgresPreview
8
- from .postgres_session import PostgresSession
8
+ from .postgres_context import PostgresContext
9
9
  from adam.repl_state import ReplState
10
10
  from adam.utils import log, log2
11
11
 
@@ -62,7 +62,7 @@ class Postgres(Command):
62
62
 
63
63
  return state
64
64
 
65
- PostgresSession(state.namespace, state.pg_path).run_sql(' '.join(args))
65
+ PostgresContext.apply(state.namespace, state.pg_path).run_sql(' '.join(args))
66
66
 
67
67
  def completion(self, state: ReplState):
68
68
  if state.device != state.P:
@@ -70,7 +70,7 @@ class Postgres(Command):
70
70
  return {}
71
71
 
72
72
  leaf = {}
73
- session = PostgresSession(state.namespace, state.pg_path)
73
+ session = PostgresContext.apply(state.namespace, state.pg_path)
74
74
  if session.db:
75
75
  if pg_table_names(state.namespace, state.pg_path):
76
76
  leaf = psql_completions(state.namespace, state.pg_path)
@@ -86,7 +86,7 @@ class Postgres(Command):
86
86
  return {}
87
87
 
88
88
  def help(self, _: ReplState):
89
- return f'[{Postgres.COMMAND}] <sql-statements>\t run psql with queries'
89
+ return f'<sql-statements>\t run queries on Postgres databases'
90
90
 
91
91
  class PostgresCommandHelper(click.Command):
92
92
  def get_help(self, ctx: click.Context):