kaqing 2.0.145__py3-none-any.whl → 2.0.174__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaqing might be problematic. Click here for more details.

Files changed (174) hide show
  1. adam/__init__.py +0 -2
  2. adam/app_session.py +8 -11
  3. adam/batch.py +3 -3
  4. adam/checks/check_utils.py +14 -46
  5. adam/checks/cpu.py +7 -1
  6. adam/checks/cpu_metrics.py +52 -0
  7. adam/checks/disk.py +2 -3
  8. adam/columns/columns.py +3 -1
  9. adam/columns/cpu.py +3 -1
  10. adam/columns/cpu_metrics.py +22 -0
  11. adam/columns/memory.py +3 -4
  12. adam/commands/__init__.py +22 -0
  13. adam/commands/alter_tables.py +33 -48
  14. adam/commands/audit/audit.py +22 -23
  15. adam/commands/audit/audit_repair_tables.py +14 -17
  16. adam/commands/audit/audit_run.py +15 -23
  17. adam/commands/audit/show_last10.py +10 -13
  18. adam/commands/audit/show_slow10.py +10 -13
  19. adam/commands/audit/show_top10.py +10 -13
  20. adam/commands/audit/utils_show_top10.py +2 -3
  21. adam/commands/bash/__init__.py +5 -0
  22. adam/commands/bash/bash.py +7 -104
  23. adam/commands/bash/utils_bash.py +16 -0
  24. adam/commands/cat.py +7 -23
  25. adam/commands/cd.py +7 -11
  26. adam/commands/check.py +14 -23
  27. adam/commands/cli_commands.py +2 -3
  28. adam/commands/code.py +20 -23
  29. adam/commands/command.py +152 -37
  30. adam/commands/commands_utils.py +8 -17
  31. adam/commands/cp.py +18 -32
  32. adam/commands/cql/cql_completions.py +11 -7
  33. adam/commands/cql/cqlsh.py +10 -30
  34. adam/commands/cql/{cql_utils.py → utils_cql.py} +147 -15
  35. adam/commands/deploy/code_start.py +7 -10
  36. adam/commands/deploy/code_stop.py +4 -21
  37. adam/commands/deploy/code_utils.py +3 -3
  38. adam/commands/deploy/deploy.py +4 -27
  39. adam/commands/deploy/deploy_frontend.py +14 -17
  40. adam/commands/deploy/deploy_pg_agent.py +2 -5
  41. adam/commands/deploy/deploy_pod.py +64 -68
  42. adam/commands/deploy/undeploy.py +4 -27
  43. adam/commands/deploy/undeploy_frontend.py +4 -7
  44. adam/commands/deploy/undeploy_pg_agent.py +4 -7
  45. adam/commands/deploy/undeploy_pod.py +9 -12
  46. adam/commands/devices/device.py +93 -2
  47. adam/commands/devices/device_app.py +37 -10
  48. adam/commands/devices/device_auit_log.py +8 -2
  49. adam/commands/devices/device_cass.py +47 -7
  50. adam/commands/devices/device_export.py +9 -11
  51. adam/commands/devices/device_postgres.py +41 -6
  52. adam/commands/exit.py +1 -4
  53. adam/commands/export/clean_up_all_export_sessions.py +37 -0
  54. adam/commands/export/clean_up_export_sessions.py +12 -8
  55. adam/commands/export/drop_export_database.py +7 -26
  56. adam/commands/export/drop_export_databases.py +5 -14
  57. adam/commands/export/export.py +8 -38
  58. adam/commands/export/export_databases.py +86 -27
  59. adam/commands/export/export_select.py +25 -27
  60. adam/commands/export/export_select_x.py +3 -3
  61. adam/commands/export/export_sessions.py +124 -0
  62. adam/commands/export/export_use.py +8 -17
  63. adam/commands/export/exporter.py +88 -158
  64. adam/commands/export/import_session.py +7 -35
  65. adam/commands/export/importer.py +12 -5
  66. adam/commands/export/importer_athena.py +21 -20
  67. adam/commands/export/importer_sqlite.py +16 -21
  68. adam/commands/export/show_column_counts.py +7 -25
  69. adam/commands/export/show_export_databases.py +4 -6
  70. adam/commands/export/show_export_session.py +7 -18
  71. adam/commands/export/show_export_sessions.py +9 -12
  72. adam/commands/export/utils_export.py +26 -1
  73. adam/commands/intermediate_command.py +49 -0
  74. adam/commands/issues.py +11 -43
  75. adam/commands/kubectl.py +3 -6
  76. adam/commands/login.py +22 -24
  77. adam/commands/logs.py +3 -6
  78. adam/commands/ls.py +8 -9
  79. adam/commands/medusa/medusa.py +4 -22
  80. adam/commands/medusa/medusa_backup.py +20 -25
  81. adam/commands/medusa/medusa_restore.py +34 -36
  82. adam/commands/medusa/medusa_show_backupjobs.py +14 -18
  83. adam/commands/medusa/medusa_show_restorejobs.py +11 -18
  84. adam/commands/nodetool.py +6 -15
  85. adam/commands/param_get.py +11 -13
  86. adam/commands/param_set.py +8 -12
  87. adam/commands/postgres/postgres.py +22 -38
  88. adam/commands/postgres/postgres_context.py +47 -23
  89. adam/commands/postgres/postgres_ls.py +4 -8
  90. adam/commands/postgres/postgres_preview.py +5 -9
  91. adam/commands/postgres/psql_completions.py +1 -1
  92. adam/commands/postgres/utils_postgres.py +70 -0
  93. adam/commands/preview_table.py +6 -45
  94. adam/commands/pwd.py +13 -16
  95. adam/commands/reaper/reaper.py +4 -27
  96. adam/commands/reaper/reaper_forward.py +48 -55
  97. adam/commands/reaper/reaper_forward_session.py +6 -0
  98. adam/commands/reaper/reaper_forward_stop.py +10 -16
  99. adam/commands/reaper/reaper_restart.py +7 -14
  100. adam/commands/reaper/reaper_run_abort.py +8 -33
  101. adam/commands/reaper/reaper_runs.py +42 -57
  102. adam/commands/reaper/reaper_runs_abort.py +29 -49
  103. adam/commands/reaper/reaper_schedule_activate.py +9 -32
  104. adam/commands/reaper/reaper_schedule_start.py +9 -32
  105. adam/commands/reaper/reaper_schedule_stop.py +9 -32
  106. adam/commands/reaper/reaper_schedules.py +4 -14
  107. adam/commands/reaper/reaper_status.py +8 -16
  108. adam/commands/reaper/utils_reaper.py +196 -0
  109. adam/commands/repair/repair.py +4 -22
  110. adam/commands/repair/repair_log.py +5 -11
  111. adam/commands/repair/repair_run.py +27 -34
  112. adam/commands/repair/repair_scan.py +32 -38
  113. adam/commands/repair/repair_stop.py +5 -11
  114. adam/commands/report.py +27 -29
  115. adam/commands/restart.py +25 -26
  116. adam/commands/rollout.py +19 -24
  117. adam/commands/shell.py +10 -4
  118. adam/commands/show/show.py +10 -26
  119. adam/commands/show/show_cassandra_repairs.py +35 -0
  120. adam/commands/show/show_cassandra_status.py +32 -43
  121. adam/commands/show/show_cassandra_version.py +5 -18
  122. adam/commands/show/show_commands.py +19 -24
  123. adam/commands/show/show_host.py +1 -1
  124. adam/commands/show/show_login.py +20 -27
  125. adam/commands/show/show_processes.py +15 -19
  126. adam/commands/show/show_storage.py +10 -20
  127. adam/commands/watch.py +26 -29
  128. adam/config.py +4 -16
  129. adam/embedded_params.py +1 -1
  130. adam/log.py +4 -4
  131. adam/pod_exec_result.py +3 -3
  132. adam/repl.py +31 -32
  133. adam/repl_commands.py +11 -11
  134. adam/repl_state.py +52 -26
  135. adam/sql/sql_completer.py +4 -6
  136. adam/sql/sql_state_machine.py +21 -14
  137. adam/sso/authn_ad.py +6 -8
  138. adam/sso/authn_okta.py +4 -6
  139. adam/sso/cred_cache.py +3 -5
  140. adam/sso/idp.py +9 -12
  141. adam/utils.py +393 -33
  142. adam/utils_athena.py +14 -13
  143. adam/utils_audits.py +12 -12
  144. adam/utils_issues.py +32 -0
  145. adam/utils_k8s/app_clusters.py +13 -18
  146. adam/utils_k8s/app_pods.py +2 -0
  147. adam/utils_k8s/cassandra_clusters.py +21 -18
  148. adam/utils_k8s/custom_resources.py +16 -17
  149. adam/utils_k8s/ingresses.py +2 -2
  150. adam/utils_k8s/jobs.py +7 -11
  151. adam/utils_k8s/k8s.py +87 -0
  152. adam/utils_k8s/pods.py +14 -76
  153. adam/utils_k8s/secrets.py +4 -4
  154. adam/utils_k8s/service_accounts.py +5 -4
  155. adam/utils_k8s/services.py +2 -2
  156. adam/utils_k8s/statefulsets.py +1 -12
  157. adam/utils_repl/state_machine.py +3 -3
  158. adam/utils_sqlite.py +78 -42
  159. adam/version.py +1 -1
  160. {kaqing-2.0.145.dist-info → kaqing-2.0.174.dist-info}/METADATA +1 -1
  161. kaqing-2.0.174.dist-info/RECORD +230 -0
  162. adam/commands/app.py +0 -67
  163. adam/commands/app_ping.py +0 -44
  164. adam/commands/export/clean_up_export_session.py +0 -53
  165. adam/commands/postgres/postgres_utils.py +0 -31
  166. adam/commands/reaper/reaper_session.py +0 -159
  167. adam/commands/show/show_app_actions.py +0 -56
  168. adam/commands/show/show_app_id.py +0 -47
  169. adam/commands/show/show_app_queues.py +0 -45
  170. adam/commands/show/show_repairs.py +0 -47
  171. kaqing-2.0.145.dist-info/RECORD +0 -227
  172. {kaqing-2.0.145.dist-info → kaqing-2.0.174.dist-info}/WHEEL +0 -0
  173. {kaqing-2.0.145.dist-info → kaqing-2.0.174.dist-info}/entry_points.txt +0 -0
  174. {kaqing-2.0.145.dist-info → kaqing-2.0.174.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,196 @@
1
+ from collections.abc import Callable
2
+ from functools import partial
3
+ from typing import List, cast
4
+ from kubernetes import client
5
+ import re
6
+
7
+ import requests
8
+ from adam.config import Config
9
+ from adam.repl_state import ReplState
10
+ from adam.utils import lines_to_tabular, log2, wait_log
11
+ from adam.utils_k8s.k8s import port_forwarding
12
+
13
+ class ReaperService:
14
+ def __init__(self, state: ReplState, local_addr: str, remote_addr: str, show_out = True):
15
+ self.state = state
16
+ self.local_addr = local_addr
17
+ self.remote_addr = remote_addr
18
+ self.show_out = show_out
19
+ self.headers = None
20
+
21
+ def get(self, path: str, params: dict[str, any] = {}):
22
+ with logging(self, 'GET', path) as (url, headers):
23
+ return requests.get(url, headers=headers, params=params)
24
+
25
+ def put(self, path: str, params: dict[str, any] = {}):
26
+ with logging(self, 'PUT', path) as (url, headers):
27
+ return requests.put(url, headers=headers, params=params)
28
+
29
+ def post(self, path: str, params: dict[str, any] = {}):
30
+ with logging(self, 'POST', path) as (url, headers):
31
+ return requests.post(url, headers=headers, params=params)
32
+
33
+ class ReaperLogginHandler:
34
+ def __init__(self, svc: ReaperService, method: str, path: str):
35
+ self.svc = svc
36
+ self.method = method
37
+ self.path = path
38
+
39
+ def __enter__(self) -> tuple[str, dict[str, any]]:
40
+ if not self.svc.headers:
41
+ self.svc.headers = Reapers.cookie_header(self.svc.state, self.svc.local_addr, self.svc.remote_addr, show_output=self.svc.show_out)
42
+
43
+ if self.svc.show_out and self.method:
44
+ log2(f'{self.method} {self.svc.remote_addr}/{self.path}')
45
+
46
+ return (f'http://{self.svc.local_addr}/{self.path}', self.svc.headers)
47
+
48
+ def __exit__(self, exc_type, exc_val, exc_tb):
49
+ if exc_val and isinstance(exc_val, requests.Response):
50
+ if int(exc_val.status_code / 100) != 2:
51
+ if self.svc.show_out:
52
+ log2(exc_val.status_code)
53
+
54
+ return False
55
+
56
+ def logging(svc: ReaperService, method: str, path: str):
57
+ return ReaperLogginHandler(svc, method, path)
58
+
59
+ class ReaperHandler:
60
+ def __init__(self, state: ReplState, show_out = True):
61
+ self.state = state
62
+ self.show_out = show_out
63
+ self.headers = None
64
+ self.forwarding = None
65
+
66
+ def __enter__(self):
67
+ self.forwarding = port_forwarding(self.state, Reapers.local_port(), partial(Reapers.svc_or_pod, self.state), Reapers.target_port())
68
+ (local_addr, remote_addr) = self.forwarding.__enter__()
69
+
70
+ return ReaperService(self.state, local_addr, remote_addr, show_out=self.show_out)
71
+
72
+ def __exit__(self, exc_type, exc_val, exc_tb):
73
+ if self.forwarding:
74
+ return self.forwarding.__exit__(exc_type, exc_val, exc_tb)
75
+
76
+ return False
77
+
78
+ def reaper(state: ReplState, show_out = True):
79
+ return ReaperHandler(state, show_out=show_out)
80
+
81
+ class Reapers:
82
+ schedules_ids_by_cluster: dict[str, list[str]] = {}
83
+
84
+ def pod_name(state: ReplState):
85
+ pods = Reapers.list_reaper_pods(state.sts if state.sts else state.pod, state.namespace)
86
+ if pods:
87
+ return pods[0].metadata.name
88
+
89
+ return None
90
+
91
+ def show_schedule(state: ReplState, schedule_id: str):
92
+ def filter(schedules: list[dict]):
93
+ return [schedule for schedule in schedules if schedule['id'] == schedule_id]
94
+
95
+ Reapers.show_schedules(state, filter)
96
+
97
+ def show_schedules(state: ReplState, filter: Callable[[list[dict]], dict] = None):
98
+ schedules = Reapers.list_schedules(state, filter=filter)
99
+
100
+ # forced refresh of schedule list
101
+ if not filter:
102
+ Reapers.schedules_ids_by_cluster[state.sts] = [schedule['id'] for schedule in schedules]
103
+ Reapers.show_schedules_tabular(schedules)
104
+
105
+ def schedule_ids(state: ReplState, show_output = True, filter: Callable[[list[dict]], dict] = None):
106
+ schedules = Reapers.list_schedules(state, show_output=show_output, filter=filter)
107
+ return [schedule['id'] for schedule in schedules]
108
+
109
+ def list_schedules(state: ReplState, show_output = True, filter: Callable[[list[dict]], dict] = None) -> list[dict]:
110
+ with reaper(state, show_out=show_output) as requests:
111
+ if not (response := requests.get('repair_schedule')):
112
+ return
113
+
114
+ res = response.json()
115
+ if filter:
116
+ res = filter(res)
117
+
118
+ return res
119
+
120
+ def show_schedules_tabular(schedules: list[dict]):
121
+ log2(lines_to_tabular([f"{schedule['id']} {schedule['state']} {schedule['cluster_name']} {schedule['keyspace_name']}" for schedule in schedules], 'ID STATE CLUSTER KEYSPACE'))
122
+
123
+ def list_reaper_pods(sts_name: str, namespace: str) -> List[client.V1Pod]:
124
+ v1 = client.CoreV1Api()
125
+
126
+ # k8ssandra.io/reaper: cs-d0767a536f-cs-d0767a536f-reaper
127
+ groups = re.match(Config().get('reaper.pod.cluster-regex', r'(.*?-.*?-.*?-.*?)-.*'), sts_name)
128
+ label_selector = Config().get('reaper.pod.label-selector', 'k8ssandra.io/reaper={cluster}-reaper').replace('{cluster}', groups[1])
129
+
130
+ return cast(List[client.V1Pod], v1.list_namespaced_pod(namespace, label_selector=label_selector).items)
131
+
132
+ def cookie_header(state: ReplState, local_addr, remote_addr, show_output = True):
133
+ return {'Cookie': Reapers.login(state, local_addr, remote_addr, show_output=show_output)}
134
+
135
+ def login(state: ReplState, local_addr: str, remote_addr: str, show_output = True) -> str :
136
+ user, pw = state.user_pass(secret_path='reaper.secret')
137
+
138
+ response = requests.post(f'http://{local_addr}/login', headers={
139
+ 'Accept': '*'
140
+ },data={
141
+ 'username':user,
142
+ 'password':pw})
143
+ if show_output:
144
+ log2(f'POST {remote_addr}/login')
145
+ log2(f' username={user}&password={pw}')
146
+
147
+ if int(response.status_code / 100) != 2:
148
+ if show_output:
149
+ log2("login failed")
150
+ return None
151
+
152
+ return response.headers['Set-Cookie']
153
+
154
+ def reaper_spec(state: ReplState) -> dict[str, any]:
155
+ if not (pod := Reapers.pod_name(state)):
156
+ return {}
157
+
158
+ user, pw = state.user_pass(secret_path='reaper.secret')
159
+
160
+ return {
161
+ 'pod': pod,
162
+ 'exec': f'kubectl exec -it {pod} -n {state.namespace} -- bash',
163
+ 'forward': f'kubectl port-forward pods/{pod} -n {state.namespace} {Reapers.local_port()}:{Reapers.target_port()}',
164
+ 'web-uri': f'http://localhost:{Reapers.local_port()}/webui',
165
+ 'username': user,
166
+ 'password': pw
167
+ }
168
+
169
+ def cached_schedule_ids(state: ReplState) -> list[str]:
170
+ if state.sts in Reapers.schedules_ids_by_cluster:
171
+ return Reapers.schedules_ids_by_cluster[state.sts]
172
+
173
+ if pod := Reapers.pod_name(state):
174
+ wait_log('Inspecting Cassandra Reaper...')
175
+
176
+ schedules = Reapers.schedule_ids(state, show_output = False)
177
+ Reapers.schedules_ids_by_cluster[state.sts] = schedules
178
+
179
+ return schedules
180
+
181
+ return []
182
+
183
+ def svc_name():
184
+ return Config().get('reaper.service-name', 'reaper-service')
185
+
186
+ def local_port():
187
+ return Config().get('reaper.port-forward.local-port', 9001)
188
+
189
+ def target_port():
190
+ return 8080
191
+
192
+ def svc_or_pod(state: ReplState, is_service: bool):
193
+ if is_service:
194
+ return Reapers.svc_name()
195
+ else:
196
+ return Reapers.pod_name(state)
@@ -1,13 +1,12 @@
1
1
  import click
2
2
 
3
- from adam.commands.command import Command
3
+ from adam.commands.intermediate_command import IntermediateCommand
4
4
  from .repair_run import RepairRun
5
5
  from .repair_scan import RepairScan
6
6
  from .repair_stop import RepairStop
7
7
  from .repair_log import RepairLog
8
- from adam.repl_state import ReplState, RequiredState
9
8
 
10
- class Repair(Command):
9
+ class Repair(IntermediateCommand):
11
10
  COMMAND = 'repair'
12
11
 
13
12
  # the singleton pattern
@@ -16,29 +15,12 @@ class Repair(Command):
16
15
 
17
16
  return cls.instance
18
17
 
19
- def __init__(self, successor: Command=None):
20
- super().__init__(successor)
21
-
22
18
  def command(self):
23
19
  return Repair.COMMAND
24
20
 
25
- def required(self):
26
- return RequiredState.CLUSTER
27
-
28
- def run(self, cmd: str, state: ReplState):
29
- if not(args := self.args(cmd)):
30
- return super().run(cmd, state)
31
- if not self.validate_state(state):
32
- return state
33
-
34
- return super().intermediate_run(cmd, state, args, Repair.cmd_list())
35
-
36
- def cmd_list():
21
+ def cmd_list(self):
37
22
  return [RepairRun(), RepairScan(), RepairStop(), RepairLog()]
38
23
 
39
- def completion(self, state: ReplState):
40
- return super().completion(state)
41
-
42
24
  class RepairCommandHelper(click.Command):
43
25
  def get_help(self, ctx: click.Context):
44
- Command.intermediate_help(super().get_help(ctx), Repair.COMMAND, Repair.cmd_list(), show_cluster_help=True)
26
+ IntermediateCommand.intermediate_help(super().get_help(ctx), Repair.COMMAND, Repair().cmd_list(), show_cluster_help=True)
@@ -24,20 +24,14 @@ class RepairLog(Command):
24
24
  if not(args := self.args(cmd)):
25
25
  return super().run(cmd, state)
26
26
 
27
- state, args = self.apply_state(args, state)
28
- if not self.validate_state(state):
29
- return state
30
-
31
- ns = state.namespace
32
- Jobs.get_logs('cassrepair-'+state.sts, ns)
27
+ with self.validate(args, state) as (args, state):
28
+ ns = state.namespace
29
+ Jobs.get_logs('cassrepair-'+state.sts, ns)
33
30
 
34
- return state
31
+ return state
35
32
 
36
33
  def completion(self, state: ReplState):
37
- if state.sts:
38
- return super().completion(state)
39
-
40
- return {}
34
+ return super().completion(state)
41
35
 
42
36
  def help(self, _: ReplState):
43
37
  return f'{RepairLog.COMMAND}\t get repair job logs'
@@ -1,12 +1,12 @@
1
1
  from adam.commands.command import Command
2
+ from adam.commands.reaper.utils_reaper import Reapers
2
3
  from adam.utils_k8s.jobs import Jobs
3
4
  from adam.utils_k8s.volumes import Volumes
4
5
  from adam.repl_state import ReplState, RequiredState
5
6
  from adam.config import Config
6
- from adam.commands.reaper.reaper_session import ReaperSession
7
7
  from adam.commands.reaper.reaper_runs_abort import ReaperRunsAbort
8
8
  from adam.commands.reaper.reaper_schedule_stop import ReaperScheduleStop
9
- from adam.utils import log2
9
+ from adam.utils import log2, log_exc
10
10
 
11
11
  class RepairRun(Command):
12
12
  COMMAND = 'repair run'
@@ -30,43 +30,36 @@ class RepairRun(Command):
30
30
  if not(args := self.args(cmd)):
31
31
  return super().run(cmd, state)
32
32
 
33
- state, args = self.apply_state(args, state)
34
- if not self.validate_state(state):
35
- return state
36
-
37
- replace = False
38
- if len(args) == 1:
39
- replace = args[0] == 'replace'
33
+ with self.validate(args, state) as (args, state):
34
+ replace = False
35
+ if len(args) == 1:
36
+ replace = args[0] == 'replace'
40
37
 
41
- log2("Stopping all reaper schedules...")
42
- reaper = ReaperSession.create(state)
43
- schedules = reaper.schedule_ids(state)
44
- for schedule_id in schedules:
45
- ReaperScheduleStop().run(f'reaper stop schedule {schedule_id}', state)
46
- log2("Aborting all reaper runs...")
47
- state = ReaperRunsAbort().run('reaper abort runs', state)
38
+ with log_exc():
39
+ log2("Stopping all reaper schedules...")
40
+ for schedule_id in Reapers.cached_schedule_ids(state):
41
+ ReaperScheduleStop().run(f'reaper stop schedule {schedule_id}', state)
42
+ log2("Aborting all reaper runs...")
43
+ state = ReaperRunsAbort().run('reaper abort runs', state)
48
44
 
49
- image = Config().get('repair.image', 'ci-registry.c3iot.io/cloudops/cassrepair:2.0.11')
50
- secret = Config().get('repair.secret', 'ciregistryc3iotio')
51
- log_path = Config().get('repair.log-path', '/home/cassrepair/logs/')
52
- user, _ = state.user_pass()
53
- ns = state.namespace
54
- env = Config().get('repair.env', {})
55
- env["cluster"] = ns
56
- env_from = {"username": user, "password": user}
57
- pvc_name ='cassrepair-log-' + state.sts
58
- Volumes.create_pvc(pvc_name, 30, ns)
59
- if replace:
60
- Jobs.delete('cassrepair-'+state.sts, ns)
61
- Jobs.create('cassrepair-'+state.sts, ns, image, secret, env, env_from, 'cassrepair', pvc_name, log_path)
45
+ image = Config().get('repair.image', 'ci-registry.c3iot.io/cloudops/cassrepair:2.0.11')
46
+ secret = Config().get('repair.secret', 'ciregistryc3iotio')
47
+ log_path = Config().get('repair.log-path', '/home/cassrepair/logs/')
48
+ user, _ = state.user_pass()
49
+ ns = state.namespace
50
+ env = Config().get('repair.env', {})
51
+ env["cluster"] = ns
52
+ env_from = {"username": user, "password": user}
53
+ pvc_name ='cassrepair-log-' + state.sts
54
+ Volumes.create_pvc(pvc_name, 30, ns)
55
+ if replace:
56
+ Jobs.delete('cassrepair-'+state.sts, ns)
57
+ Jobs.create('cassrepair-'+state.sts, ns, image, secret, env, env_from, 'cassrepair', pvc_name, log_path)
62
58
 
63
- return state
59
+ return state
64
60
 
65
61
  def completion(self, state: ReplState):
66
- if state.sts:
67
- return super().completion(state)
68
-
69
- return {}
62
+ return super().completion(state)
70
63
 
71
64
  def help(self, _: ReplState):
72
65
  return f'{RepairRun.COMMAND} [replace]\t start a repair job, default not replacing'
@@ -28,47 +28,41 @@ class RepairScan(Command):
28
28
  if not(args := self.args(cmd)):
29
29
  return super().run(cmd, state)
30
30
 
31
- state, args = self.apply_state(args, state)
32
- if not self.validate_state(state):
33
- return state
34
-
35
- n = "7"
36
- if len(args) == 1:
37
- n = str(args[0])
38
- image = Config().get('repair.image', 'ci-registry.c3iot.io/cloudops/cassrepair:2.0.11')
39
- secret = Config().get('repair.secret', 'ciregistryc3iotio')
40
- log_path = secret = Config().get('repair.log-path', '/home/cassrepair/logs/')
41
- ns = state.namespace
42
- pvc_name ='cassrepair-log-' + state.sts
43
- pod_name = 'repair-scan'
44
-
45
- try:
46
- Pods.create(ns, pod_name, image, ["sh", "-c", "tail -f /dev/null"],
47
- secret=secret,
48
- env={},
49
- volume_name='cassrepair-log',
50
- pvc_name=pvc_name,
51
- mount_path='/home/cassrepair/logs/')
52
- except Exception as e:
53
- if e.status == 409:
54
- log2(f"Pod {pod_name} already exists")
55
- else:
56
- log2("Exception when calling BatchV1Apii->create_namespaced_job: %s\n" % e)
57
-
58
- Pods.wait_for_running(ns, pod_name, 'Waiting for the scanner pod to start up...')
31
+ with self.validate(args, state) as (args, state):
32
+ n = "7"
33
+ if len(args) == 1:
34
+ n = str(args[0])
35
+ image = Config().get('repair.image', 'ci-registry.c3iot.io/cloudops/cassrepair:2.0.11')
36
+ secret = Config().get('repair.secret', 'ciregistryc3iotio')
37
+ log_path = secret = Config().get('repair.log-path', '/home/cassrepair/logs/')
38
+ ns = state.namespace
39
+ pvc_name ='cassrepair-log-' + state.sts
40
+ pod_name = 'repair-scan'
41
+
42
+ try:
43
+ Pods.create(ns, pod_name, image, ["sh", "-c", "tail -f /dev/null"],
44
+ secret=secret,
45
+ env={},
46
+ volume_name='cassrepair-log',
47
+ pvc_name=pvc_name,
48
+ mount_path='/home/cassrepair/logs/')
49
+ except Exception as e:
50
+ if e.status == 409:
51
+ log2(f"Pod {pod_name} already exists")
52
+ else:
53
+ log2("Exception when calling BatchV1Apii->create_namespaced_job: %s\n" % e)
54
+
55
+ Pods.wait_for_running(ns, pod_name, 'Waiting for the scanner pod to start up...')
56
+
57
+ try:
58
+ Pods.exec(pod_name, pod_name, ns, f"find {log_path} -type f -mtime -{n} -print0 | xargs -0 grep failed")
59
+ finally:
60
+ Pods.delete(pod_name, ns)
59
61
 
60
- try:
61
- Pods.exec(pod_name, pod_name, ns, f"find {log_path} -type f -mtime -{n} -print0 | xargs -0 grep failed")
62
- finally:
63
- Pods.delete(pod_name, ns)
64
-
65
- return state
62
+ return state
66
63
 
67
64
  def completion(self, state: ReplState):
68
- if state.sts:
69
- return super().completion(state)
70
-
71
- return {}
65
+ return super().completion(state)
72
66
 
73
67
  def help(self, _: ReplState):
74
68
  return f'{RepairScan.COMMAND} [n]\t scan last n days repair log, default 7 days'
@@ -25,20 +25,14 @@ class RepairStop(Command):
25
25
  if not(args := self.args(cmd)):
26
26
  return super().run(cmd, state)
27
27
 
28
- state, args = self.apply_state(args, state)
29
- if not self.validate_state(state):
30
- return state
31
-
32
- ns = state.namespace
33
- Jobs.delete('cassrepair-'+state.sts, ns)
28
+ with self.validate(args, state) as (args, state):
29
+ ns = state.namespace
30
+ Jobs.delete('cassrepair-'+state.sts, ns)
34
31
 
35
- return state
32
+ return state
36
33
 
37
34
  def completion(self, state: ReplState):
38
- if state.sts:
39
- return super().completion(state)
40
-
41
- return {}
35
+ return super().completion(state)
42
36
 
43
37
  def help(self, _: ReplState):
44
38
  return f'{RepairStop.COMMAND}\t delete a repair job'
adam/commands/report.py CHANGED
@@ -29,35 +29,33 @@ class Report(Command):
29
29
  if not(args := self.args(cmd)):
30
30
  return super().run(cmd, state)
31
31
 
32
- output: dict[str, any] = {}
33
- state, args = self.apply_state(args, state)
34
- if not self.validate_state(state):
35
- return state
36
-
37
- if state.in_repl:
38
- args, show = Command.extract_options(args, ['-s', '--show'])
39
-
40
- args, redirect = Command.extract_options(args, ['>'])
41
- if not redirect or not args:
42
- log2('Please specify file name: e.g. report > /tmp/report.log')
43
- return 'no-report-destination'
44
-
45
- results = run_checks(state.sts, state.namespace, state.pod, show_output=show)
46
- output = CheckResult.report(results)
47
- with open(args[0], "w") as json_file:
48
- json.dump(output, json_file, indent=2)
49
- log2(f'Report stored in {args[0]}.')
50
- else:
51
- args, show = Command.extract_options(args, ['-s', '--show'])
52
-
53
- results = run_checks(state.sts, state.namespace, state.pod, show_output=show)
54
- output = CheckResult.report(results)
55
- click.echo(json.dumps(output, indent=2))
56
-
57
- return output
58
-
59
- def completion(self, _: ReplState):
60
- return {Report.COMMAND: {">": None}}
32
+ with self.validate(args, state) as (args, state):
33
+ output: dict[str, any] = {}
34
+
35
+ if state.in_repl:
36
+ args, show = Command.extract_options(args, ['-s', '--show'])
37
+
38
+ args, redirect = Command.extract_options(args, ['>'])
39
+ if not redirect or not args:
40
+ log2('Please specify file name: e.g. report > /tmp/report.log')
41
+ return 'no-report-destination'
42
+
43
+ results = run_checks(state.sts, state.namespace, state.pod, show_out=show)
44
+ output = CheckResult.report(results)
45
+ with open(args[0], "w") as json_file:
46
+ json.dump(output, json_file, indent=2)
47
+ log2(f'Report stored in {args[0]}.')
48
+ else:
49
+ args, show = Command.extract_options(args, ['-s', '--show'])
50
+
51
+ results = run_checks(state.sts, state.namespace, state.pod, show_out=show)
52
+ output = CheckResult.report(results)
53
+ click.echo(json.dumps(output, indent=2))
54
+
55
+ return output
56
+
57
+ def completion(self, state: ReplState):
58
+ return super().completion(state, {">": None})
61
59
 
62
60
  def help(self, _: ReplState):
63
61
  return f"{Report.COMMAND} > <file-name>\t generate report"
adam/commands/restart.py CHANGED
@@ -1,3 +1,4 @@
1
+ from adam.commands import extract_options
1
2
  from adam.commands.command import Command
2
3
  from adam.utils_k8s.pods import Pods
3
4
  from adam.utils_k8s.statefulsets import StatefulSets
@@ -26,34 +27,32 @@ class Restart(Command):
26
27
  if not(args := self.args(cmd)):
27
28
  return super().run(cmd, state)
28
29
 
29
- state, args = self.apply_state(args, state)
30
- if not self.validate_state(state):
31
- return state
32
-
33
- args, forced = Command.extract_options(args, '--force')
34
- if not args:
35
- if state.pod:
36
- log2(f'Restarting {state.pod}...')
37
- Pods.delete(state.pod, state.namespace)
38
- else:
39
- if not forced:
40
- log2('Please add --force for restarting all nodes in a cluster.')
41
- return 'force-needed'
42
-
43
- log2(f'Restarting all pods from {state.sts}...')
44
- for pod_name in StatefulSets.pod_names(state.sts, state.namespace):
45
- Pods.delete(pod_name, state.namespace)
46
- else:
47
- for arg in args:
48
- Pods.delete(arg, state.namespace)
49
-
50
- return state
30
+ with self.validate(args, state) as (args, state):
31
+ with extract_options(args, '--force') as (args, forced):
32
+ if not args:
33
+ if state.pod:
34
+ log2(f'Restarting {state.pod}...')
35
+ Pods.delete(state.pod, state.namespace)
36
+ else:
37
+ if not forced:
38
+ log2('Please add --force for restarting all nodes in a cluster.')
39
+ return 'force-needed'
40
+
41
+ log2(f'Restarting all pods from {state.sts}...')
42
+ for pod_name in StatefulSets.pod_names(state.sts, state.namespace):
43
+ Pods.delete(pod_name, state.namespace)
44
+ else:
45
+ for arg in args:
46
+ Pods.delete(arg, state.namespace)
47
+
48
+ return state
51
49
 
52
50
  def completion(self, state: ReplState):
53
- if state.pod:
54
- return {Restart.COMMAND: None}
55
- elif state.sts:
56
- return {Restart.COMMAND: {p: None for p in StatefulSets.pod_names(state.sts, state.namespace)}}
51
+ if super().completion(state):
52
+ if state.pod:
53
+ return {Restart.COMMAND: None}
54
+ elif state.sts:
55
+ return {Restart.COMMAND: {p: None for p in StatefulSets.pod_names(state.sts, state.namespace)}}
57
56
 
58
57
  return {}
59
58
 
adam/commands/rollout.py CHANGED
@@ -2,6 +2,7 @@ import datetime
2
2
  from kubernetes import client
3
3
  from kubernetes.client.rest import ApiException
4
4
 
5
+ from adam.commands import extract_options
5
6
  from adam.commands.command import Command
6
7
  from adam.commands.watch import Watch
7
8
  from adam.utils_k8s.statefulsets import StatefulSets
@@ -31,32 +32,28 @@ class RollOut(Command):
31
32
  if not(args := self.args(cmd)):
32
33
  return super().run(cmd, state)
33
34
 
34
- state, args = self.apply_state(args, state)
35
- if not self.validate_state(state):
36
- return state
35
+ with self.validate(args, state) as (args, state):
36
+ with extract_options(args, '--force') as (args, forced):
37
+ restarted, rollingout = StatefulSets.restarted_at(state.sts, state.namespace)
38
+ if rollingout and not forced:
39
+ log2(f"* Cluster is being rolled out for {duration(restarted)}. Please wait until it's done or use --force.")
37
40
 
38
- args, forced = Command.extract_options(args, '--force')
41
+ return state
39
42
 
40
- restarted, rollingout = StatefulSets.restarted_at(state.sts, state.namespace)
41
- if rollingout and not forced:
42
- log2(f"* Cluster is being rolled out for {duration(restarted)}. Please wait until it's done or use --force.")
43
+ self.rolling_restart(state.sts, state.namespace)
43
44
 
44
- return state
45
+ auto_watch = False
46
+ if (auto_watch_cmds := Config().get('watch.auto', 'rollout')):
47
+ cmds = [c.strip(' ') for c in auto_watch_cmds.split(',')]
48
+ if self.command() in cmds:
49
+ auto_watch = True
50
+ log2('Rolling out cluster with auto watch...')
51
+ Watch().run('watch', state)
45
52
 
46
- self.rolling_restart(state.sts, state.namespace)
53
+ if not auto_watch:
54
+ log2('Rolling out cluster...')
47
55
 
48
- auto_watch = False
49
- if (auto_watch_cmds := Config().get('watch.auto', 'rollout')):
50
- cmds = [c.strip(' ') for c in auto_watch_cmds.split(',')]
51
- if self.command() in cmds:
52
- auto_watch = True
53
- log2('Rolling out cluster with auto watch...')
54
- Watch().run('watch', state)
55
-
56
- if not auto_watch:
57
- log2('Rolling out cluster...')
58
-
59
- return state
56
+ return state
60
57
 
61
58
  def rolling_restart(self, statefulset, namespace):
62
59
  # kubectl rollout restart statefulset <statefulset-name>
@@ -82,9 +79,7 @@ class RollOut(Command):
82
79
  log2("Exception when calling AppsV1Api->read_namespaced_statefulset_status: %s\n" % e)
83
80
 
84
81
  def completion(self, state: ReplState):
85
- if state.pod:
86
- return {}
87
- elif state.sts:
82
+ if super().completion(state):
88
83
  return {RollOut.COMMAND: None}
89
84
 
90
85
  return {}