kaqing 1.98.15__py3-none-any.whl → 2.0.145__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaqing might be problematic. Click here for more details.

Files changed (180) hide show
  1. adam/app_session.py +1 -1
  2. adam/apps.py +2 -2
  3. adam/batch.py +30 -31
  4. adam/checks/check_utils.py +4 -4
  5. adam/checks/compactionstats.py +1 -1
  6. adam/checks/cpu.py +2 -2
  7. adam/checks/disk.py +1 -1
  8. adam/checks/gossip.py +1 -1
  9. adam/checks/memory.py +3 -3
  10. adam/checks/status.py +1 -1
  11. adam/commands/alter_tables.py +81 -0
  12. adam/commands/app.py +3 -3
  13. adam/commands/app_ping.py +2 -2
  14. adam/commands/audit/audit.py +86 -0
  15. adam/commands/audit/audit_repair_tables.py +77 -0
  16. adam/commands/audit/audit_run.py +58 -0
  17. adam/commands/audit/show_last10.py +51 -0
  18. adam/commands/audit/show_slow10.py +50 -0
  19. adam/commands/audit/show_top10.py +48 -0
  20. adam/commands/audit/utils_show_top10.py +59 -0
  21. adam/commands/bash/bash.py +133 -0
  22. adam/commands/bash/bash_completer.py +93 -0
  23. adam/commands/cat.py +56 -0
  24. adam/commands/cd.py +12 -82
  25. adam/commands/check.py +6 -0
  26. adam/commands/cli_commands.py +3 -3
  27. adam/commands/code.py +60 -0
  28. adam/commands/command.py +48 -12
  29. adam/commands/commands_utils.py +4 -5
  30. adam/commands/cql/cql_completions.py +28 -0
  31. adam/commands/cql/cql_utils.py +209 -0
  32. adam/commands/{cqlsh.py → cql/cqlsh.py} +15 -10
  33. adam/commands/deploy/__init__.py +0 -0
  34. adam/commands/{frontend → deploy}/code_start.py +1 -1
  35. adam/commands/{frontend → deploy}/code_stop.py +1 -1
  36. adam/commands/{frontend → deploy}/code_utils.py +2 -2
  37. adam/commands/deploy/deploy.py +48 -0
  38. adam/commands/deploy/deploy_frontend.py +52 -0
  39. adam/commands/deploy/deploy_pg_agent.py +38 -0
  40. adam/commands/deploy/deploy_pod.py +110 -0
  41. adam/commands/deploy/deploy_utils.py +29 -0
  42. adam/commands/deploy/undeploy.py +48 -0
  43. adam/commands/deploy/undeploy_frontend.py +41 -0
  44. adam/commands/deploy/undeploy_pg_agent.py +42 -0
  45. adam/commands/deploy/undeploy_pod.py +51 -0
  46. adam/commands/devices/__init__.py +0 -0
  47. adam/commands/devices/device.py +27 -0
  48. adam/commands/devices/device_app.py +146 -0
  49. adam/commands/devices/device_auit_log.py +43 -0
  50. adam/commands/devices/device_cass.py +145 -0
  51. adam/commands/devices/device_export.py +86 -0
  52. adam/commands/devices/device_postgres.py +109 -0
  53. adam/commands/devices/devices.py +25 -0
  54. adam/commands/export/__init__.py +0 -0
  55. adam/commands/export/clean_up_export_session.py +53 -0
  56. adam/commands/{frontend/teardown_frontend.py → export/clean_up_export_sessions.py} +9 -11
  57. adam/commands/export/drop_export_database.py +58 -0
  58. adam/commands/export/drop_export_databases.py +46 -0
  59. adam/commands/export/export.py +83 -0
  60. adam/commands/export/export_databases.py +170 -0
  61. adam/commands/export/export_select.py +85 -0
  62. adam/commands/export/export_select_x.py +54 -0
  63. adam/commands/export/export_use.py +55 -0
  64. adam/commands/export/exporter.py +364 -0
  65. adam/commands/export/import_session.py +68 -0
  66. adam/commands/export/importer.py +67 -0
  67. adam/commands/export/importer_athena.py +80 -0
  68. adam/commands/export/importer_sqlite.py +47 -0
  69. adam/commands/export/show_column_counts.py +63 -0
  70. adam/commands/export/show_export_databases.py +39 -0
  71. adam/commands/export/show_export_session.py +51 -0
  72. adam/commands/export/show_export_sessions.py +47 -0
  73. adam/commands/export/utils_export.py +291 -0
  74. adam/commands/help.py +12 -7
  75. adam/commands/issues.py +6 -0
  76. adam/commands/kubectl.py +41 -0
  77. adam/commands/login.py +9 -5
  78. adam/commands/logs.py +2 -1
  79. adam/commands/ls.py +4 -107
  80. adam/commands/medusa/medusa.py +2 -26
  81. adam/commands/medusa/medusa_backup.py +2 -2
  82. adam/commands/medusa/medusa_restore.py +3 -4
  83. adam/commands/medusa/medusa_show_backupjobs.py +4 -3
  84. adam/commands/medusa/medusa_show_restorejobs.py +3 -3
  85. adam/commands/nodetool.py +9 -4
  86. adam/commands/param_set.py +1 -1
  87. adam/commands/postgres/postgres.py +42 -43
  88. adam/commands/postgres/postgres_context.py +248 -0
  89. adam/commands/postgres/postgres_preview.py +0 -1
  90. adam/commands/postgres/postgres_utils.py +31 -0
  91. adam/commands/postgres/psql_completions.py +10 -0
  92. adam/commands/preview_table.py +18 -40
  93. adam/commands/pwd.py +2 -28
  94. adam/commands/reaper/reaper.py +4 -24
  95. adam/commands/reaper/reaper_restart.py +1 -1
  96. adam/commands/reaper/reaper_session.py +2 -2
  97. adam/commands/repair/repair.py +3 -27
  98. adam/commands/repair/repair_log.py +1 -1
  99. adam/commands/repair/repair_run.py +2 -2
  100. adam/commands/repair/repair_scan.py +2 -7
  101. adam/commands/repair/repair_stop.py +1 -1
  102. adam/commands/report.py +6 -0
  103. adam/commands/restart.py +2 -2
  104. adam/commands/rollout.py +1 -1
  105. adam/commands/shell.py +33 -0
  106. adam/commands/show/show.py +11 -26
  107. adam/commands/show/show_app_actions.py +3 -0
  108. adam/commands/show/show_app_id.py +1 -1
  109. adam/commands/show/show_app_queues.py +3 -2
  110. adam/commands/show/show_cassandra_status.py +3 -3
  111. adam/commands/show/show_cassandra_version.py +3 -3
  112. adam/commands/show/show_commands.py +4 -1
  113. adam/commands/show/show_host.py +33 -0
  114. adam/commands/show/show_login.py +3 -0
  115. adam/commands/show/show_processes.py +1 -1
  116. adam/commands/show/show_repairs.py +2 -2
  117. adam/commands/show/show_storage.py +1 -1
  118. adam/commands/watch.py +1 -1
  119. adam/config.py +16 -3
  120. adam/embedded_params.py +1 -1
  121. adam/pod_exec_result.py +10 -2
  122. adam/repl.py +132 -117
  123. adam/repl_commands.py +62 -18
  124. adam/repl_state.py +276 -55
  125. adam/sql/__init__.py +0 -0
  126. adam/sql/sql_completer.py +120 -0
  127. adam/sql/sql_state_machine.py +617 -0
  128. adam/sql/term_completer.py +76 -0
  129. adam/sso/authenticator.py +1 -1
  130. adam/sso/authn_ad.py +36 -56
  131. adam/sso/authn_okta.py +6 -32
  132. adam/sso/cred_cache.py +1 -1
  133. adam/sso/idp.py +74 -9
  134. adam/sso/idp_login.py +2 -2
  135. adam/sso/idp_session.py +10 -7
  136. adam/utils.py +85 -4
  137. adam/utils_athena.py +145 -0
  138. adam/utils_audits.py +102 -0
  139. adam/utils_k8s/__init__.py +0 -0
  140. adam/utils_k8s/app_clusters.py +33 -0
  141. adam/utils_k8s/app_pods.py +31 -0
  142. adam/{k8s_utils → utils_k8s}/cassandra_clusters.py +6 -21
  143. adam/{k8s_utils → utils_k8s}/cassandra_nodes.py +12 -5
  144. adam/utils_k8s/config_maps.py +34 -0
  145. adam/utils_k8s/deployment.py +56 -0
  146. adam/{k8s_utils → utils_k8s}/jobs.py +1 -1
  147. adam/{k8s_utils → utils_k8s}/kube_context.py +1 -1
  148. adam/utils_k8s/pods.py +342 -0
  149. adam/{k8s_utils → utils_k8s}/secrets.py +4 -0
  150. adam/utils_k8s/service_accounts.py +169 -0
  151. adam/{k8s_utils → utils_k8s}/statefulsets.py +5 -4
  152. adam/{k8s_utils → utils_k8s}/volumes.py +9 -0
  153. adam/utils_net.py +24 -0
  154. adam/utils_repl/__init__.py +0 -0
  155. adam/utils_repl/automata_completer.py +48 -0
  156. adam/utils_repl/repl_completer.py +46 -0
  157. adam/utils_repl/state_machine.py +173 -0
  158. adam/utils_sqlite.py +101 -0
  159. adam/version.py +1 -1
  160. {kaqing-1.98.15.dist-info → kaqing-2.0.145.dist-info}/METADATA +1 -1
  161. kaqing-2.0.145.dist-info/RECORD +227 -0
  162. adam/commands/bash.py +0 -87
  163. adam/commands/cql_utils.py +0 -53
  164. adam/commands/devices.py +0 -89
  165. adam/commands/frontend/setup.py +0 -60
  166. adam/commands/frontend/setup_frontend.py +0 -58
  167. adam/commands/frontend/teardown.py +0 -61
  168. adam/commands/postgres/postgres_session.py +0 -225
  169. adam/commands/user_entry.py +0 -77
  170. adam/k8s_utils/pods.py +0 -211
  171. kaqing-1.98.15.dist-info/RECORD +0 -160
  172. /adam/commands/{frontend → audit}/__init__.py +0 -0
  173. /adam/{k8s_utils → commands/bash}/__init__.py +0 -0
  174. /adam/{medusa_show_restorejobs.py → commands/cql/__init__.py} +0 -0
  175. /adam/{k8s_utils → utils_k8s}/custom_resources.py +0 -0
  176. /adam/{k8s_utils → utils_k8s}/ingresses.py +0 -0
  177. /adam/{k8s_utils → utils_k8s}/services.py +0 -0
  178. {kaqing-1.98.15.dist-info → kaqing-2.0.145.dist-info}/WHEEL +0 -0
  179. {kaqing-1.98.15.dist-info → kaqing-2.0.145.dist-info}/entry_points.txt +0 -0
  180. {kaqing-1.98.15.dist-info → kaqing-2.0.145.dist-info}/top_level.txt +0 -0
adam/utils_k8s/pods.py ADDED
@@ -0,0 +1,342 @@
1
+ from collections.abc import Callable
2
+ from concurrent.futures import ThreadPoolExecutor, as_completed
3
+ from datetime import datetime
4
+ import sys
5
+ import time
6
+ from typing import TypeVar, cast
7
+ from kubernetes import client
8
+ from kubernetes.stream import stream
9
+ from kubernetes.stream.ws_client import ERROR_CHANNEL, WSClient
10
+
11
+ from adam.config import Config
12
+ from adam.utils_k8s.volumes import ConfigMapMount
13
+ from adam.pod_exec_result import PodExecResult
14
+ from adam.utils import elapsed_time, log2
15
+ from .kube_context import KubeContext
16
+
17
+ from websocket._core import WebSocket
18
+
19
+ T = TypeVar('T')
20
+ _TEST_POD_EXEC_OUTS: PodExecResult = None
21
+
22
+ # utility collection on pods; methods are all static
23
+ class Pods:
24
+ _TEST_POD_CLOSE_SOCKET: bool = False
25
+
26
+ def set_test_pod_exec_outs(outs: PodExecResult):
27
+ global _TEST_POD_EXEC_OUTS
28
+ _TEST_POD_EXEC_OUTS = outs
29
+
30
+ return _TEST_POD_EXEC_OUTS
31
+
32
+ def delete(pod_name: str, namespace: str, grace_period_seconds: int = None):
33
+ try:
34
+ v1 = client.CoreV1Api()
35
+ v1.delete_namespaced_pod(pod_name, namespace, grace_period_seconds=grace_period_seconds)
36
+ except Exception as e:
37
+ log2("Exception when calling CoreV1Api->delete_namespaced_pod: %s\n" % e)
38
+
39
+ def delete_with_selector(namespace: str, label_selector: str, grace_period_seconds: int = None):
40
+ v1 = client.CoreV1Api()
41
+
42
+ ret = v1.list_namespaced_pod(namespace=namespace, label_selector=label_selector)
43
+ for i in ret.items:
44
+ v1.delete_namespaced_pod(name=i.metadata.name, namespace=namespace, grace_period_seconds=grace_period_seconds)
45
+
46
+ def on_pods(pods: list[str],
47
+ namespace: str,
48
+ body: Callable[[ThreadPoolExecutor, str, str, bool], T],
49
+ post: Callable[[T], T] = None,
50
+ action: str = 'action',
51
+ max_workers=0,
52
+ show_out=True,
53
+ on_any = False,
54
+ background = False) -> list[T]:
55
+ show_out = KubeContext.show_out(show_out)
56
+
57
+ if not max_workers:
58
+ max_workers = Config().action_workers(action, 0)
59
+ if not on_any and max_workers > 0:
60
+ # if parallel, node sampling is suppressed
61
+ if KubeContext.show_parallelism():
62
+ log2(f'Executing on all nodes from statefulset in parallel...')
63
+ start_time = time.time()
64
+ try:
65
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
66
+ # disable stdout from the pod_exec, then show the output in a for loop
67
+ futures = [body(executor, pod, namespace, show_out) for pod in pods]
68
+ if len(futures) == 0:
69
+ return cast(list[T], [])
70
+
71
+ rs = [future.result() for future in as_completed(futures)]
72
+ if post:
73
+ rs = [post(r, show_out=show_out) for r in rs]
74
+
75
+ return rs
76
+ finally:
77
+ if KubeContext.show_parallelism():
78
+ log2(f"Parallel {action} elapsed time: {elapsed_time(start_time)} with {max_workers} workers")
79
+ else:
80
+ results: list[T] = []
81
+
82
+ samples = 1 if on_any else Config().action_node_samples(action, sys.maxsize)
83
+ l = min(len(pods), samples)
84
+ adj = 'all'
85
+ if l < len(pods):
86
+ adj = f'{l} sample'
87
+ if show_out:
88
+ log2(f'Executing on {adj} nodes from statefulset...')
89
+ for pod_name in pods:
90
+ try:
91
+ # disable stdout from the pod_exec, then show the output in a for loop
92
+ result = body(None, pod_name, namespace, False)
93
+ if post:
94
+ result = post(result, show_out=show_out)
95
+ results.append(result)
96
+ if result:
97
+ l -= 1
98
+ if not l:
99
+ break
100
+ except Exception as e:
101
+ log2(e)
102
+
103
+ return results
104
+
105
+ def exec(pod_name: str, container: str, namespace: str, command: str,
106
+ show_out = True, throw_err = False, shell = '/bin/sh',
107
+ background = False,
108
+ log_file = None,
109
+ interaction: Callable[[any, list[str]], any] = None,
110
+ env_prefix: str = None):
111
+ if _TEST_POD_EXEC_OUTS:
112
+ return _TEST_POD_EXEC_OUTS
113
+
114
+ show_out = KubeContext.show_out(show_out)
115
+
116
+ api = client.CoreV1Api()
117
+
118
+ tty = True
119
+ exec_command = [shell, '-c', command]
120
+ if env_prefix:
121
+ exec_command = [shell, '-c', f'{env_prefix} {command}']
122
+
123
+ if background or command.endswith(' &'):
124
+ # should be false for starting a background process
125
+ tty = False
126
+
127
+ if Config().get('repl.background-process.auto-nohup', True):
128
+ command = command.strip(' &')
129
+ cmd_name = ''
130
+ if command.startswith('nodetool '):
131
+ cmd_name = f".{'_'.join(command.split(' ')[5:])}"
132
+
133
+ if not log_file:
134
+ log_file = f'{log_prefix()}-{datetime.now().strftime("%d%H%M%S")}{cmd_name}.log'
135
+ command = f"nohup {command} > {log_file} 2>&1 &"
136
+ if env_prefix:
137
+ command = f'{env_prefix} {command}'
138
+ exec_command = [shell, '-c', command]
139
+
140
+ k_command = f'kubectl exec {pod_name} -c {container} -n {namespace} -- {shell} -c "{command}"'
141
+ if show_out:
142
+ print(k_command)
143
+
144
+ resp: WSClient = stream(
145
+ api.connect_get_namespaced_pod_exec,
146
+ pod_name,
147
+ namespace,
148
+ command=exec_command,
149
+ container=container,
150
+ stderr=True,
151
+ stdin=True,
152
+ stdout=True,
153
+ tty=tty,
154
+ _preload_content=False,
155
+ )
156
+
157
+ s: WebSocket = resp.sock
158
+ stdout = []
159
+ stderr = []
160
+ error_output = None
161
+ try:
162
+ while resp.is_open():
163
+ resp.update(timeout=1)
164
+ if resp.peek_stdout():
165
+ frag = resp.read_stdout()
166
+ stdout.append(frag)
167
+ if show_out: print(frag, end="")
168
+
169
+ if interaction:
170
+ interaction(resp, stdout)
171
+ if resp.peek_stderr():
172
+ frag = resp.read_stderr()
173
+ stderr.append(frag)
174
+ if show_out: print(frag, end="")
175
+
176
+ try:
177
+ # get the exit code from server
178
+ error_output = resp.read_channel(ERROR_CHANNEL)
179
+ except Exception as e:
180
+ pass
181
+ except Exception as e:
182
+ if throw_err:
183
+ raise e
184
+ else:
185
+ log2(e)
186
+ finally:
187
+ resp.close()
188
+ if s and s.sock and Pods._TEST_POD_CLOSE_SOCKET:
189
+ try:
190
+ s.sock.close()
191
+ except:
192
+ pass
193
+
194
+ return PodExecResult("".join(stdout), "".join(stderr), k_command, error_output, pod=pod_name, log_file=log_file)
195
+
196
+ def read_file(pod_name: str, container: str, namespace: str, file_path: str):
197
+ v1 = client.CoreV1Api()
198
+
199
+ resp = stream(
200
+ v1.connect_get_namespaced_pod_exec,
201
+ name=pod_name,
202
+ namespace=namespace,
203
+ container=container,
204
+ command=["cat", file_path],
205
+ stderr=True, stdin=False,
206
+ stdout=True, tty=False,
207
+ _preload_content=False, # Important for streaming
208
+ )
209
+
210
+ s: WebSocket = resp.sock
211
+ try:
212
+ while resp.is_open():
213
+ resp.update(timeout=1)
214
+ if resp.peek_stdout():
215
+ yield resp.read_stdout()
216
+
217
+ try:
218
+ # get the exit code from server
219
+ error_output = resp.read_channel(ERROR_CHANNEL)
220
+ except Exception as e:
221
+ pass
222
+ except Exception as e:
223
+ raise e
224
+ finally:
225
+ resp.close()
226
+ if s and s.sock and Pods._TEST_POD_CLOSE_SOCKET:
227
+ try:
228
+ s.sock.close()
229
+ except:
230
+ pass
231
+ def get_container(namespace: str, pod_name: str, container_name: str):
232
+ pod = Pods.get(namespace, pod_name)
233
+ if not pod:
234
+ return None
235
+
236
+ for container in pod.spec.containers:
237
+ if container_name == container.name:
238
+ return container
239
+
240
+ return None
241
+
242
+ def get(namespace: str, pod_name: str):
243
+ v1 = client.CoreV1Api()
244
+ return v1.read_namespaced_pod(name=pod_name, namespace=namespace)
245
+
246
+ def get_with_selector(namespace: str, label_selector: str):
247
+ v1 = client.CoreV1Api()
248
+
249
+ ret = v1.list_namespaced_pod(namespace=namespace, label_selector=label_selector)
250
+ for i in ret.items:
251
+ return v1.read_namespaced_pod(name=i.metadata.name, namespace=namespace)
252
+
253
+ def create_pod_spec(name: str, image: str, image_pull_secret: str,
254
+ envs: list, container_security_context: client.V1SecurityContext,
255
+ volume_name: str, pvc_name:str, mount_path:str,
256
+ command: list[str]=None, sa_name : str = None, config_map_mount: ConfigMapMount = None,
257
+ restart_policy="Never"):
258
+ volume_mounts = []
259
+ if volume_name and pvc_name and mount_path:
260
+ volume_mounts=[client.V1VolumeMount(mount_path=mount_path, name=volume_name)]
261
+
262
+ if config_map_mount:
263
+ volume_mounts.append(client.V1VolumeMount(mount_path=config_map_mount.mount_path, sub_path=config_map_mount.sub_path, name=config_map_mount.name()))
264
+
265
+ container = client.V1Container(name=name, image=image, env=envs, security_context=container_security_context, command=command,
266
+ volume_mounts=volume_mounts)
267
+
268
+ volumes = []
269
+ if volume_name and pvc_name and mount_path:
270
+ volumes=[client.V1Volume(name=volume_name, persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name))]
271
+
272
+ security_context = None
273
+ if not sa_name:
274
+ security_context=client.V1PodSecurityContext(run_as_user=1001, run_as_group=1001, fs_group=1001)
275
+
276
+ if config_map_mount:
277
+ volumes.append(client.V1Volume(name=config_map_mount.name(), config_map=client.V1ConfigMapVolumeSource(name=config_map_mount.config_map_name)))
278
+
279
+ return client.V1PodSpec(
280
+ restart_policy=restart_policy,
281
+ containers=[container],
282
+ image_pull_secrets=[client.V1LocalObjectReference(name=image_pull_secret)],
283
+ security_context=security_context,
284
+ service_account_name=sa_name,
285
+ volumes=volumes
286
+ )
287
+
288
+ def create(namespace: str, pod_name: str, image: str,
289
+ command: list[str] = None,
290
+ secret: str = None,
291
+ env: dict[str, any] = {},
292
+ container_security_context: client.V1SecurityContext = None,
293
+ labels: dict[str, str] = {},
294
+ volume_name: str = None,
295
+ pvc_name: str = None,
296
+ mount_path: str = None,
297
+ sa_name: str = None,
298
+ config_map_mount: ConfigMapMount = None):
299
+ v1 = client.CoreV1Api()
300
+ envs = []
301
+ for k, v in env.items():
302
+ envs.append(client.V1EnvVar(name=str(k), value=str(v)))
303
+ pod = Pods.create_pod_spec(pod_name, image, secret, envs, container_security_context, volume_name, pvc_name, mount_path, command=command,
304
+ sa_name=sa_name, config_map_mount=config_map_mount)
305
+ return v1.create_namespaced_pod(
306
+ namespace=namespace,
307
+ body=client.V1Pod(spec=pod, metadata=client.V1ObjectMeta(
308
+ name=pod_name,
309
+ labels=labels
310
+ ))
311
+ )
312
+
313
+ def wait_for_running(namespace: str, pod_name: str, msg: str = None, label_selector: str = None):
314
+ cnt = 2
315
+ while (cnt < 302 and Pods.get_with_selector(namespace, label_selector) if label_selector else Pods.get(namespace, pod_name)).status.phase != 'Running':
316
+ if not msg:
317
+ msg = f'Waiting for the {pod_name} pod to start up.'
318
+
319
+ max_len = len(msg) + 3
320
+ mod = cnt % 3
321
+ padded = ''
322
+ if mod == 0:
323
+ padded = f'\r{msg}'.ljust(max_len)
324
+ elif mod == 1:
325
+ padded = f'\r{msg}.'.ljust(max_len)
326
+ else:
327
+ padded = f'\r{msg}..'.ljust(max_len)
328
+ log2(padded, nl=False)
329
+ cnt += 1
330
+ time.sleep(1)
331
+
332
+ log2(f'\r{msg}..'.ljust(max_len), nl=False)
333
+ if cnt < 302:
334
+ log2(' OK')
335
+ else:
336
+ log2(' Timed Out')
337
+
338
+ def completed(namespace: str, pod_name: str):
339
+ return Pods.get(namespace, pod_name).status.phase in ['Succeeded', 'Failed']
340
+
341
+ def log_prefix():
342
+ return Config().get('log-prefix', '/tmp/qing')
@@ -1,4 +1,5 @@
1
1
  import base64
2
+ import functools
2
3
  import re
3
4
  from typing import cast
4
5
  from kubernetes import client
@@ -9,7 +10,10 @@ from adam.utils import log2
9
10
 
10
11
  # utility collection on secrets; methods are all static
11
12
  class Secrets:
13
+ @functools.lru_cache()
12
14
  def list_secrets(namespace: str = None, name_pattern: str = None):
15
+ Config().wait_log('Inspecting Cassandra Instances...')
16
+
13
17
  secrets_names = []
14
18
 
15
19
  v1 = client.CoreV1Api()
@@ -0,0 +1,169 @@
1
+ from kubernetes import client, config
2
+
3
+ from adam.config import Config
4
+
5
+ # utility collection on service accounts; methods are all static
6
+ class ServiceAccounts:
7
+ def delete(namespace: str, label_selector: str):
8
+ ServiceAccounts.delete_cluster_role_bindings(label_selector)
9
+ ServiceAccounts.delete_role_bindings(namespace, label_selector)
10
+ ServiceAccounts.delete_service_account(namespace, label_selector)
11
+
12
+ def replicate(to_sa: str, namespace: str, from_sa: str, labels: dict[str, str] = {}, add_cluster_roles: list[str] = []):
13
+ ServiceAccounts.create_service_account(to_sa, namespace, labels=labels)
14
+ for b in ServiceAccounts.get_role_bindings(from_sa, namespace):
15
+ n = f'{to_sa}-{b.role_ref.name}'
16
+ ServiceAccounts.create_role_binding(n, namespace, to_sa, b.role_ref.name, labels=labels)
17
+
18
+ for b in ServiceAccounts.get_cluster_role_bindings(from_sa):
19
+ n = f'{to_sa}-{b.role_ref.name}'
20
+ ServiceAccounts.create_cluster_role_binding(n, namespace, to_sa, b.role_ref.name, labels=labels)
21
+
22
+ for cr in add_cluster_roles:
23
+ n = f'{to_sa}-{cr}'
24
+ ServiceAccounts.create_cluster_role_binding(n, namespace, to_sa, cr, labels=labels)
25
+
26
+ def create_service_account(name: str, namespace: str, labels: dict[str, str] = {}):
27
+ config.load_kube_config()
28
+
29
+ v1 = client.CoreV1Api()
30
+
31
+ service_account = client.V1ServiceAccount(
32
+ metadata=client.V1ObjectMeta(
33
+ name=name,
34
+ labels=labels)
35
+ )
36
+ api_response = v1.create_namespaced_service_account(
37
+ namespace=namespace,
38
+ body=service_account
39
+ )
40
+ Config().debug(f"Service Account '{api_response.metadata.name}' created in namespace '{namespace}'.")
41
+
42
+ def delete_service_account(namespace: str, label_selector: str) -> list:
43
+ refs = []
44
+
45
+ v1 = client.CoreV1Api()
46
+ sas = v1.list_namespaced_service_account(namespace=namespace, label_selector=label_selector).items
47
+ for sa in sas:
48
+ Config().debug(f'delete {sa.metadata.name}')
49
+ v1.delete_namespaced_service_account(name=sa.metadata.name, namespace=namespace)
50
+ refs.append(sa)
51
+
52
+ return refs
53
+
54
+ def create_role_binding(name: str, namespace: str, sa_name: str, role_name: str, labels: dict[str, str] = {}):
55
+ api = client.RbacAuthorizationV1Api()
56
+
57
+ metadata = client.V1ObjectMeta(
58
+ name=name,
59
+ namespace=namespace,
60
+ labels=labels
61
+ )
62
+ role_ref = client.V1RoleRef(
63
+ api_group="rbac.authorization.k8s.io",
64
+ kind="Role",
65
+ name=role_name
66
+ )
67
+
68
+ subjects = [
69
+ client.RbacV1Subject(
70
+ kind="ServiceAccount",
71
+ name=sa_name, # Name of the service account
72
+ namespace=namespace # Namespace of the service account
73
+ )
74
+ ]
75
+
76
+ role_binding = client.V1RoleBinding(
77
+ api_version="rbac.authorization.k8s.io/v1",
78
+ kind="RoleBinding",
79
+ metadata=metadata,
80
+ role_ref=role_ref,
81
+ subjects=subjects
82
+ )
83
+
84
+ api.create_namespaced_role_binding(namespace=namespace, body=role_binding)
85
+
86
+ def get_role_bindings(service_account_name: str, namespace: str) -> list:
87
+ refs = []
88
+
89
+ rbac_api = client.RbacAuthorizationV1Api()
90
+ role_bindings = rbac_api.list_namespaced_role_binding(namespace=namespace)
91
+ for binding in role_bindings.items:
92
+ if binding.subjects:
93
+ for subject in binding.subjects:
94
+ if subject.kind == "ServiceAccount" and subject.name == service_account_name:
95
+ refs.append(binding)
96
+
97
+ return refs
98
+
99
+ def delete_role_bindings(namespace: str, label_selector: str) -> list:
100
+ refs = []
101
+
102
+ v1_rbac = client.RbacAuthorizationV1Api()
103
+ cluster_role_bindings = v1_rbac.list_namespaced_role_binding(namespace=namespace, label_selector=label_selector).items
104
+ for binding in cluster_role_bindings:
105
+ Config().debug(f'delete {binding.metadata.name}')
106
+ v1_rbac.delete_namespaced_role_binding(name=binding.metadata.name, namespace=namespace)
107
+ refs.append(binding)
108
+
109
+ return refs
110
+
111
+ def create_cluster_role_binding(name: str, namespace: str, sa_name: str, role_name: str, labels: dict[str, str] = {}):
112
+ api = client.RbacAuthorizationV1Api()
113
+
114
+ metadata = client.V1ObjectMeta(
115
+ name=name,
116
+ namespace=namespace,
117
+ labels=labels
118
+ )
119
+ role_ref = client.V1RoleRef(
120
+ api_group="rbac.authorization.k8s.io",
121
+ kind="ClusterRole",
122
+ name=role_name
123
+ )
124
+
125
+ subjects = [
126
+ client.RbacV1Subject(
127
+ kind="ServiceAccount",
128
+ name=sa_name,
129
+ namespace=namespace
130
+ )
131
+ ]
132
+
133
+ role_binding = client.V1ClusterRoleBinding(
134
+ api_version="rbac.authorization.k8s.io/v1",
135
+ metadata=metadata,
136
+ role_ref=role_ref,
137
+ subjects=subjects
138
+ )
139
+
140
+ try:
141
+ api.create_cluster_role_binding(body=role_binding)
142
+ except client.ApiException as e:
143
+ print(f"Error creating ClusterRoleBinding: {e}")
144
+
145
+ def get_cluster_role_bindings(service_account_name: str) -> list:
146
+ refs = []
147
+
148
+ v1_rbac = client.RbacAuthorizationV1Api()
149
+ cluster_role_bindings = v1_rbac.list_cluster_role_binding().items
150
+ for binding in cluster_role_bindings:
151
+ if binding.subjects:
152
+ for subject in binding.subjects:
153
+ if subject.kind == "ServiceAccount" and subject.name == service_account_name:
154
+ refs.append(binding)
155
+
156
+ return refs
157
+
158
+
159
+ def delete_cluster_role_bindings(label_selector: str) -> list:
160
+ refs = []
161
+
162
+ v1_rbac = client.RbacAuthorizationV1Api()
163
+ cluster_role_bindings = v1_rbac.list_cluster_role_binding(label_selector=label_selector).items
164
+ for binding in cluster_role_bindings:
165
+ Config().debug(f'delete {binding.metadata.name}')
166
+ v1_rbac.delete_cluster_role_binding(binding.metadata.name)
167
+ refs.append(binding)
168
+
169
+ return refs
@@ -24,11 +24,12 @@ class StatefulSets:
24
24
 
25
25
  return statefulsets.items
26
26
 
27
+ @functools.lru_cache()
27
28
  def list_sts_name_and_ns():
28
29
  return [(statefulset.metadata.name, statefulset.metadata.namespace) for statefulset in StatefulSets.list_sts()]
29
30
 
30
- def list_sts_names(show_namespace = True):
31
- if show_namespace:
31
+ def list_sts_names():
32
+ if not KubeContext.in_cluster_namespace():
32
33
  return [f"{sts}@{ns}" for sts, ns in StatefulSets.list_sts_name_and_ns()]
33
34
  else:
34
35
  return [f"{sts}" for sts, _ in StatefulSets.list_sts_name_and_ns()]
@@ -61,10 +62,10 @@ class StatefulSets:
61
62
  namespace: str,
62
63
  body: Callable[[ThreadPoolExecutor, str, str, bool], T],
63
64
  post: Callable[[T], T] = None,
64
- action: str = 'action', max_workers=0, show_out=True) -> list[T]:
65
+ action: str = 'action', max_workers=0, show_out=True, on_any = False, background = False) -> list[T]:
65
66
  pods = StatefulSets.pod_names(statefulset, namespace)
66
67
 
67
- return Pods.on_pods(pods, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out)
68
+ return Pods.on_pods(pods, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out, on_any=on_any, background=background)
68
69
 
69
70
  @functools.lru_cache()
70
71
  def pod_names(ss: str, ns: str):
@@ -1,6 +1,15 @@
1
1
  from kubernetes import client
2
2
  from adam.utils import log2
3
3
 
4
+ class ConfigMapMount:
5
+ def __init__(self, config_map_name: str, sub_path: str, mount_path: str):
6
+ self.config_map_name = config_map_name
7
+ self.sub_path = sub_path
8
+ self.mount_path = mount_path
9
+
10
+ def name(self) -> str:
11
+ return f"{self.config_map_name}-volume"
12
+
4
13
  # utility collection on volumes; methods are all static
5
14
  class Volumes:
6
15
  def create_pvc(name: str, storage: int, namespace: str):
adam/utils_net.py ADDED
@@ -0,0 +1,24 @@
1
+ import socket
2
+
3
+ MY_HOST = None
4
+
5
+ def get_my_host():
6
+ global MY_HOST
7
+
8
+ if MY_HOST:
9
+ return MY_HOST
10
+
11
+ MY_HOST = get_ip_from_hostname('host.docker.internal')
12
+ if not MY_HOST:
13
+ MY_HOST = socket.gethostname()
14
+
15
+ if not MY_HOST:
16
+ MY_HOST = 'NA'
17
+
18
+ return MY_HOST
19
+
20
+ def get_ip_from_hostname(hostname):
21
+ try:
22
+ return socket.gethostbyname(hostname)
23
+ except socket.gaierror:
24
+ return None
File without changes
@@ -0,0 +1,48 @@
1
+ from typing import Generic, Iterable, TypeVar
2
+ from prompt_toolkit.completion import CompleteEvent, Completer, Completion, WordCompleter
3
+ from prompt_toolkit.document import Document
4
+
5
+ from adam.utils_repl.state_machine import StateMachine, State
6
+
7
+ __all__ = [
8
+ "AutomataCompleter",
9
+ ]
10
+
11
+ T = TypeVar('T')
12
+
13
+ class AutomataCompleter(Completer, Generic[T]):
14
+ def __init__(self,
15
+ state_machine: StateMachine,
16
+ first_term: str = '',
17
+ debug = False):
18
+ super().__init__()
19
+ self.machine = state_machine
20
+ self.first_term = first_term
21
+ self.debug = debug
22
+
23
+ def get_completions(
24
+ self, document: Document, complete_event: CompleteEvent
25
+ ) -> Iterable[Completion]:
26
+ text = document.text_before_cursor.lstrip()
27
+ state = ''
28
+ if self.first_term:
29
+ text = f'{self.first_term} {text}'
30
+
31
+ completer: Completer = None
32
+ state: State = self.machine.traverse_tokens(self.tokens(text), State(state))
33
+ if self.debug:
34
+ print('\n =>', state.state if isinstance(state, State) else '')
35
+
36
+ if state.state in self.machine.suggestions:
37
+ if completer := self.suggestions_completer(state, self.machine.suggestions[state.state].strip(' ')):
38
+ for c in completer.get_completions(document, complete_event):
39
+ yield c
40
+
41
+ def tokens(self, text: str) -> list[T]:
42
+ return text.split(' ')
43
+
44
+ def suggestions_completer(self, _: State, suggestions: str) -> list[str]:
45
+ if not suggestions:
46
+ return None
47
+
48
+ return WordCompleter(suggestions.split(','))
@@ -0,0 +1,46 @@
1
+ import re
2
+ from typing import Iterable, TypeVar
3
+ from prompt_toolkit.completion import CompleteEvent, Completion, NestedCompleter, WordCompleter
4
+ from prompt_toolkit.document import Document
5
+
6
+ __all__ = [
7
+ "ReplCompleter",
8
+ ]
9
+
10
+ T = TypeVar('T')
11
+
12
+ class ReplCompleter(NestedCompleter):
13
+ def get_completions(
14
+ self, document: Document, complete_event: CompleteEvent
15
+ ) -> Iterable[Completion]:
16
+ # Split document.
17
+ text = document.text_before_cursor.lstrip()
18
+ stripped_len = len(document.text_before_cursor) - len(text)
19
+
20
+ # If there is a space, check for the first term, and use a
21
+ # subcompleter.
22
+ if " " in text:
23
+ first_term = text.split()[0]
24
+ completer = self.options.get(first_term)
25
+
26
+ # If we have a sub completer, use this for the completions.
27
+ if completer is not None:
28
+ remaining_text = text[len(first_term) :].lstrip()
29
+ move_cursor = len(text) - len(remaining_text) + stripped_len
30
+
31
+ new_document = Document(
32
+ remaining_text,
33
+ cursor_position=document.cursor_position - move_cursor,
34
+ )
35
+
36
+ for c in completer.get_completions(new_document, complete_event):
37
+ yield c
38
+
39
+ # No space in the input: behave exactly like `WordCompleter`.
40
+ else:
41
+ completer = WordCompleter(
42
+ # Allow dot in the middle or a word
43
+ list(self.options.keys()), ignore_case=self.ignore_case, pattern=re.compile(r"([a-zA-Z0-9_\.\@\&]+|[^a-zA-Z0-9_\.\@\&\s]+)")
44
+ )
45
+ for c in completer.get_completions(document, complete_event):
46
+ yield c