kaqing 1.77.0__py3-none-any.whl → 2.0.171__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (308) hide show
  1. adam/__init__.py +1 -0
  2. adam/app_session.py +182 -0
  3. {walker → adam}/apps.py +8 -24
  4. {walker → adam}/batch.py +54 -97
  5. {walker → adam}/checks/check.py +3 -3
  6. {walker → adam}/checks/check_result.py +1 -1
  7. adam/checks/check_utils.py +65 -0
  8. {walker → adam}/checks/compactionstats.py +6 -6
  9. {walker → adam}/checks/cpu.py +14 -8
  10. adam/checks/cpu_metrics.py +52 -0
  11. {walker → adam}/checks/disk.py +6 -6
  12. {walker → adam}/checks/gossip.py +5 -5
  13. {walker → adam}/checks/memory.py +7 -7
  14. {walker → adam}/checks/status.py +5 -5
  15. {walker → adam}/cli.py +3 -3
  16. {walker → adam}/columns/column.py +1 -1
  17. adam/columns/columns.py +45 -0
  18. {walker → adam}/columns/compactions.py +5 -5
  19. {walker → adam}/columns/cpu.py +6 -4
  20. adam/columns/cpu_metrics.py +22 -0
  21. {walker → adam}/columns/dir_data.py +3 -3
  22. {walker → adam}/columns/dir_snapshots.py +3 -3
  23. {walker → adam}/columns/gossip.py +5 -5
  24. {walker → adam}/columns/host_id.py +3 -3
  25. {walker → adam}/columns/memory.py +3 -3
  26. {walker → adam}/columns/node_address.py +3 -3
  27. {walker → adam}/columns/node_load.py +3 -3
  28. {walker → adam}/columns/node_owns.py +3 -3
  29. {walker → adam}/columns/node_status.py +3 -3
  30. {walker → adam}/columns/node_tokens.py +3 -3
  31. {walker → adam}/columns/node_utils.py +2 -2
  32. {walker → adam}/columns/pod_name.py +2 -2
  33. {walker → adam}/columns/volume_cassandra.py +4 -4
  34. {walker → adam}/columns/volume_root.py +3 -3
  35. adam/commands/__init__.py +15 -0
  36. adam/commands/alter_tables.py +81 -0
  37. adam/commands/app_cmd.py +38 -0
  38. {walker → adam}/commands/app_ping.py +10 -16
  39. adam/commands/audit/audit.py +84 -0
  40. adam/commands/audit/audit_repair_tables.py +74 -0
  41. adam/commands/audit/audit_run.py +50 -0
  42. adam/commands/audit/show_last10.py +48 -0
  43. adam/commands/audit/show_slow10.py +47 -0
  44. adam/commands/audit/show_top10.py +45 -0
  45. adam/commands/audit/utils_show_top10.py +59 -0
  46. adam/commands/bash/__init__.py +5 -0
  47. adam/commands/bash/bash.py +36 -0
  48. adam/commands/bash/bash_completer.py +93 -0
  49. adam/commands/bash/utils_bash.py +16 -0
  50. adam/commands/cat.py +50 -0
  51. adam/commands/cd.py +43 -0
  52. adam/commands/check.py +73 -0
  53. {walker → adam}/commands/cli_commands.py +7 -8
  54. adam/commands/code.py +57 -0
  55. adam/commands/command.py +190 -0
  56. {walker → adam}/commands/command_helpers.py +1 -1
  57. {walker → adam}/commands/commands_utils.py +15 -25
  58. adam/commands/cp.py +89 -0
  59. adam/commands/cql/cql_completions.py +33 -0
  60. {walker/commands → adam/commands/cql}/cqlsh.py +20 -35
  61. adam/commands/cql/utils_cql.py +343 -0
  62. {walker/commands/frontend → adam/commands/deploy}/code_start.py +11 -14
  63. adam/commands/deploy/code_stop.py +40 -0
  64. {walker/commands/frontend → adam/commands/deploy}/code_utils.py +7 -9
  65. adam/commands/deploy/deploy.py +25 -0
  66. adam/commands/deploy/deploy_frontend.py +49 -0
  67. adam/commands/deploy/deploy_pg_agent.py +35 -0
  68. adam/commands/deploy/deploy_pod.py +108 -0
  69. adam/commands/deploy/deploy_utils.py +29 -0
  70. adam/commands/deploy/undeploy.py +25 -0
  71. adam/commands/deploy/undeploy_frontend.py +38 -0
  72. adam/commands/deploy/undeploy_pg_agent.py +39 -0
  73. adam/commands/deploy/undeploy_pod.py +48 -0
  74. adam/commands/devices/device.py +118 -0
  75. adam/commands/devices/device_app.py +173 -0
  76. adam/commands/devices/device_auit_log.py +49 -0
  77. adam/commands/devices/device_cass.py +185 -0
  78. adam/commands/devices/device_export.py +86 -0
  79. adam/commands/devices/device_postgres.py +144 -0
  80. adam/commands/devices/devices.py +25 -0
  81. {walker → adam}/commands/exit.py +3 -6
  82. adam/commands/export/clean_up_all_export_sessions.py +37 -0
  83. adam/commands/export/clean_up_export_sessions.py +51 -0
  84. adam/commands/export/drop_export_database.py +55 -0
  85. adam/commands/export/drop_export_databases.py +43 -0
  86. adam/commands/export/export.py +53 -0
  87. adam/commands/export/export_databases.py +170 -0
  88. adam/commands/export/export_handlers.py +71 -0
  89. adam/commands/export/export_select.py +81 -0
  90. adam/commands/export/export_select_x.py +54 -0
  91. adam/commands/export/export_use.py +52 -0
  92. adam/commands/export/exporter.py +352 -0
  93. adam/commands/export/import_session.py +40 -0
  94. adam/commands/export/importer.py +67 -0
  95. adam/commands/export/importer_athena.py +80 -0
  96. adam/commands/export/importer_sqlite.py +47 -0
  97. adam/commands/export/show_column_counts.py +54 -0
  98. adam/commands/export/show_export_databases.py +36 -0
  99. adam/commands/export/show_export_session.py +48 -0
  100. adam/commands/export/show_export_sessions.py +44 -0
  101. adam/commands/export/utils_export.py +314 -0
  102. {walker → adam}/commands/help.py +17 -12
  103. adam/commands/intermediate_command.py +49 -0
  104. adam/commands/issues.py +43 -0
  105. adam/commands/kubectl.py +38 -0
  106. adam/commands/login.py +70 -0
  107. {walker → adam}/commands/logs.py +8 -10
  108. adam/commands/ls.py +41 -0
  109. adam/commands/medusa/medusa.py +27 -0
  110. adam/commands/medusa/medusa_backup.py +57 -0
  111. adam/commands/medusa/medusa_restore.py +83 -0
  112. adam/commands/medusa/medusa_show_backupjobs.py +51 -0
  113. adam/commands/medusa/medusa_show_restorejobs.py +47 -0
  114. {walker → adam}/commands/nodetool.py +17 -21
  115. {walker → adam}/commands/param_get.py +15 -16
  116. adam/commands/param_set.py +43 -0
  117. adam/commands/postgres/postgres.py +104 -0
  118. adam/commands/postgres/postgres_context.py +274 -0
  119. {walker → adam}/commands/postgres/postgres_ls.py +7 -11
  120. {walker → adam}/commands/postgres/postgres_preview.py +8 -13
  121. adam/commands/postgres/psql_completions.py +10 -0
  122. adam/commands/postgres/utils_postgres.py +66 -0
  123. adam/commands/preview_table.py +37 -0
  124. adam/commands/pwd.py +47 -0
  125. adam/commands/reaper/reaper.py +35 -0
  126. adam/commands/reaper/reaper_forward.py +93 -0
  127. adam/commands/reaper/reaper_forward_session.py +6 -0
  128. {walker → adam}/commands/reaper/reaper_forward_stop.py +13 -19
  129. {walker → adam}/commands/reaper/reaper_restart.py +10 -17
  130. adam/commands/reaper/reaper_run_abort.py +46 -0
  131. adam/commands/reaper/reaper_runs.py +82 -0
  132. adam/commands/reaper/reaper_runs_abort.py +63 -0
  133. adam/commands/reaper/reaper_schedule_activate.py +45 -0
  134. adam/commands/reaper/reaper_schedule_start.py +45 -0
  135. adam/commands/reaper/reaper_schedule_stop.py +45 -0
  136. {walker → adam}/commands/reaper/reaper_schedules.py +6 -16
  137. {walker → adam}/commands/reaper/reaper_status.py +11 -19
  138. adam/commands/reaper/utils_reaper.py +196 -0
  139. adam/commands/repair/repair.py +26 -0
  140. {walker → adam}/commands/repair/repair_log.py +7 -10
  141. adam/commands/repair/repair_run.py +70 -0
  142. adam/commands/repair/repair_scan.py +71 -0
  143. {walker → adam}/commands/repair/repair_stop.py +8 -11
  144. adam/commands/report.py +61 -0
  145. adam/commands/restart.py +60 -0
  146. {walker → adam}/commands/rollout.py +25 -30
  147. adam/commands/shell.py +34 -0
  148. adam/commands/show/show.py +39 -0
  149. walker/commands/show/show_version.py → adam/commands/show/show_adam.py +14 -10
  150. adam/commands/show/show_app_actions.py +57 -0
  151. {walker → adam}/commands/show/show_app_id.py +12 -15
  152. {walker → adam}/commands/show/show_app_queues.py +9 -12
  153. adam/commands/show/show_cassandra_repairs.py +38 -0
  154. adam/commands/show/show_cassandra_status.py +124 -0
  155. {walker → adam}/commands/show/show_cassandra_version.py +6 -16
  156. adam/commands/show/show_commands.py +59 -0
  157. walker/commands/show/show_storage.py → adam/commands/show/show_host.py +11 -13
  158. adam/commands/show/show_login.py +62 -0
  159. {walker → adam}/commands/show/show_params.py +4 -4
  160. adam/commands/show/show_processes.py +51 -0
  161. adam/commands/show/show_storage.py +42 -0
  162. adam/commands/watch.py +82 -0
  163. {walker → adam}/config.py +10 -22
  164. {walker → adam}/embedded_apps.py +1 -1
  165. adam/embedded_params.py +2 -0
  166. adam/log.py +47 -0
  167. {walker → adam}/pod_exec_result.py +10 -2
  168. adam/repl.py +182 -0
  169. adam/repl_commands.py +124 -0
  170. adam/repl_state.py +458 -0
  171. adam/sql/__init__.py +0 -0
  172. adam/sql/sql_completer.py +120 -0
  173. adam/sql/sql_state_machine.py +618 -0
  174. adam/sql/term_completer.py +76 -0
  175. adam/sso/__init__.py +0 -0
  176. {walker → adam}/sso/authenticator.py +5 -1
  177. adam/sso/authn_ad.py +170 -0
  178. {walker → adam}/sso/authn_okta.py +39 -22
  179. adam/sso/cred_cache.py +60 -0
  180. adam/sso/id_token.py +23 -0
  181. adam/sso/idp.py +143 -0
  182. adam/sso/idp_login.py +50 -0
  183. adam/sso/idp_session.py +55 -0
  184. adam/sso/sso_config.py +63 -0
  185. adam/utils.py +679 -0
  186. adam/utils_app.py +98 -0
  187. adam/utils_athena.py +145 -0
  188. adam/utils_audits.py +106 -0
  189. adam/utils_issues.py +32 -0
  190. adam/utils_k8s/__init__.py +0 -0
  191. adam/utils_k8s/app_clusters.py +28 -0
  192. adam/utils_k8s/app_pods.py +33 -0
  193. adam/utils_k8s/cassandra_clusters.py +36 -0
  194. adam/utils_k8s/cassandra_nodes.py +33 -0
  195. adam/utils_k8s/config_maps.py +34 -0
  196. {walker/k8s_utils → adam/utils_k8s}/custom_resources.py +7 -2
  197. adam/utils_k8s/deployment.py +56 -0
  198. {walker/k8s_utils → adam/utils_k8s}/ingresses.py +3 -4
  199. {walker/k8s_utils → adam/utils_k8s}/jobs.py +3 -3
  200. adam/utils_k8s/k8s.py +87 -0
  201. {walker/k8s_utils → adam/utils_k8s}/kube_context.py +4 -4
  202. adam/utils_k8s/pods.py +290 -0
  203. {walker/k8s_utils → adam/utils_k8s}/secrets.py +8 -4
  204. adam/utils_k8s/service_accounts.py +170 -0
  205. {walker/k8s_utils → adam/utils_k8s}/services.py +3 -4
  206. {walker/k8s_utils → adam/utils_k8s}/statefulsets.py +6 -16
  207. {walker/k8s_utils → adam/utils_k8s}/volumes.py +10 -1
  208. adam/utils_net.py +24 -0
  209. adam/utils_repl/__init__.py +0 -0
  210. adam/utils_repl/automata_completer.py +48 -0
  211. adam/utils_repl/repl_completer.py +46 -0
  212. adam/utils_repl/state_machine.py +173 -0
  213. adam/utils_sqlite.py +109 -0
  214. adam/version.py +5 -0
  215. {kaqing-1.77.0.dist-info → kaqing-2.0.171.dist-info}/METADATA +1 -1
  216. kaqing-2.0.171.dist-info/RECORD +236 -0
  217. kaqing-2.0.171.dist-info/entry_points.txt +3 -0
  218. kaqing-2.0.171.dist-info/top_level.txt +1 -0
  219. kaqing-1.77.0.dist-info/RECORD +0 -159
  220. kaqing-1.77.0.dist-info/entry_points.txt +0 -3
  221. kaqing-1.77.0.dist-info/top_level.txt +0 -1
  222. walker/__init__.py +0 -3
  223. walker/app_session.py +0 -168
  224. walker/checks/check_utils.py +0 -97
  225. walker/columns/columns.py +0 -43
  226. walker/commands/add_user.py +0 -68
  227. walker/commands/app.py +0 -67
  228. walker/commands/bash.py +0 -87
  229. walker/commands/cd.py +0 -115
  230. walker/commands/check.py +0 -68
  231. walker/commands/command.py +0 -104
  232. walker/commands/cp.py +0 -95
  233. walker/commands/cql_utils.py +0 -53
  234. walker/commands/devices.py +0 -89
  235. walker/commands/frontend/code_stop.py +0 -57
  236. walker/commands/frontend/setup.py +0 -60
  237. walker/commands/frontend/setup_frontend.py +0 -58
  238. walker/commands/frontend/teardown.py +0 -61
  239. walker/commands/frontend/teardown_frontend.py +0 -42
  240. walker/commands/issues.py +0 -69
  241. walker/commands/login.py +0 -72
  242. walker/commands/ls.py +0 -145
  243. walker/commands/medusa/medusa.py +0 -69
  244. walker/commands/medusa/medusa_backup.py +0 -61
  245. walker/commands/medusa/medusa_restore.py +0 -86
  246. walker/commands/medusa/medusa_show_backupjobs.py +0 -52
  247. walker/commands/medusa/medusa_show_restorejobs.py +0 -52
  248. walker/commands/param_set.py +0 -44
  249. walker/commands/postgres/postgres.py +0 -113
  250. walker/commands/postgres/postgres_session.py +0 -225
  251. walker/commands/preview_table.py +0 -98
  252. walker/commands/processes.py +0 -53
  253. walker/commands/pwd.py +0 -64
  254. walker/commands/reaper/reaper.py +0 -78
  255. walker/commands/reaper/reaper_forward.py +0 -100
  256. walker/commands/reaper/reaper_run_abort.py +0 -65
  257. walker/commands/reaper/reaper_runs.py +0 -97
  258. walker/commands/reaper/reaper_runs_abort.py +0 -83
  259. walker/commands/reaper/reaper_schedule_activate.py +0 -64
  260. walker/commands/reaper/reaper_schedule_start.py +0 -64
  261. walker/commands/reaper/reaper_schedule_stop.py +0 -64
  262. walker/commands/reaper/reaper_session.py +0 -159
  263. walker/commands/repair/repair.py +0 -68
  264. walker/commands/repair/repair_run.py +0 -72
  265. walker/commands/repair/repair_scan.py +0 -79
  266. walker/commands/report.py +0 -57
  267. walker/commands/restart.py +0 -61
  268. walker/commands/show/show.py +0 -72
  269. walker/commands/show/show_app_actions.py +0 -53
  270. walker/commands/show/show_cassandra_status.py +0 -35
  271. walker/commands/show/show_commands.py +0 -58
  272. walker/commands/show/show_processes.py +0 -35
  273. walker/commands/show/show_repairs.py +0 -47
  274. walker/commands/status.py +0 -128
  275. walker/commands/storage.py +0 -52
  276. walker/commands/user_entry.py +0 -69
  277. walker/commands/watch.py +0 -85
  278. walker/embedded_params.py +0 -2
  279. walker/k8s_utils/cassandra_clusters.py +0 -48
  280. walker/k8s_utils/cassandra_nodes.py +0 -26
  281. walker/k8s_utils/pods.py +0 -211
  282. walker/repl.py +0 -165
  283. walker/repl_commands.py +0 -58
  284. walker/repl_state.py +0 -211
  285. walker/sso/authn_ad.py +0 -94
  286. walker/sso/idp.py +0 -150
  287. walker/sso/idp_login.py +0 -29
  288. walker/sso/sso_config.py +0 -45
  289. walker/utils.py +0 -194
  290. walker/version.py +0 -5
  291. {walker → adam}/checks/__init__.py +0 -0
  292. {walker → adam}/checks/check_context.py +0 -0
  293. {walker → adam}/checks/issue.py +0 -0
  294. {walker → adam}/cli_group.py +0 -0
  295. {walker → adam}/columns/__init__.py +0 -0
  296. {walker/commands → adam/commands/audit}/__init__.py +0 -0
  297. {walker/commands/frontend → adam/commands/cql}/__init__.py +0 -0
  298. {walker/commands/medusa → adam/commands/deploy}/__init__.py +0 -0
  299. {walker/commands/postgres → adam/commands/devices}/__init__.py +0 -0
  300. {walker/commands/reaper → adam/commands/export}/__init__.py +0 -0
  301. {walker/commands/repair → adam/commands/medusa}/__init__.py +0 -0
  302. {walker → adam}/commands/nodetool_commands.py +0 -0
  303. {walker/commands/show → adam/commands/postgres}/__init__.py +0 -0
  304. {walker/k8s_utils → adam/commands/reaper}/__init__.py +0 -0
  305. {walker/sso → adam/commands/repair}/__init__.py +0 -0
  306. /walker/medusa_show_restorejobs.py → /adam/commands/show/__init__.py +0 -0
  307. {walker → adam}/repl_session.py +0 -0
  308. {kaqing-1.77.0.dist-info → kaqing-2.0.171.dist-info}/WHEEL +0 -0
@@ -1,7 +1,7 @@
1
1
  from kubernetes import client
2
2
  from time import sleep
3
3
  from .pods import Pods
4
- from walker.utils import log2
4
+ from adam.utils import log2
5
5
 
6
6
  # utility collection on jobs; methods are all static
7
7
  class Jobs:
@@ -12,7 +12,7 @@ class Jobs:
12
12
  envs.append(client.V1EnvVar(name=k.upper(), value=str(v)))
13
13
  for k, v in env_from.items():
14
14
  envs.append(client.V1EnvVar(name=k.upper(), value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(key=k, name=v))))
15
- template = Pods.create_pod_spec(job_name, image, image_pull_secret, envs, volume_name, pvc_name, mount_path, command)
15
+ template = Pods.create_pod_spec(job_name, image, image_pull_secret, envs, None, volume_name, pvc_name, mount_path, command)
16
16
  spec = client.V1JobSpec(template=client.V1PodTemplateSpec(spec=template), backoff_limit=1, ttl_seconds_after_finished=300)
17
17
  job = client.V1Job(
18
18
  api_version="batch/v1",
@@ -44,7 +44,7 @@ class Jobs:
44
44
  except Exception as e:
45
45
  log2("Exception when calling BatchV1Apii->delete_namespaced_job: %s\n" % e)
46
46
  return
47
-
47
+
48
48
  def get_logs(job_name: str, namespace: str):
49
49
  v1 = client.CoreV1Api()
50
50
  try:
adam/utils_k8s/k8s.py ADDED
@@ -0,0 +1,87 @@
1
+ from collections.abc import Callable
2
+ import re
3
+ import portforward
4
+
5
+ from adam.commands.command import InvalidState
6
+ from adam.repl_state import ReplState
7
+ from adam.utils import log2
8
+ from adam.utils_k8s.kube_context import KubeContext
9
+
10
+ class PortForwardHandler:
11
+ connections: dict[str, int] = {}
12
+
13
+ def __init__(self, state: ReplState, local_port: int, svc_or_pod: Callable[[bool],str], target_port: int):
14
+ self.state = state
15
+ self.local_port = local_port
16
+ self.svc_or_pod = svc_or_pod
17
+ self.target_port = target_port
18
+ self.forward_connection = None
19
+ self.pod = None
20
+
21
+ def __enter__(self) -> tuple[str, str]:
22
+ state = self.state
23
+
24
+ if not self.svc_or_pod:
25
+ log2('No service or pod found.')
26
+
27
+ raise InvalidState(state)
28
+
29
+ if KubeContext.in_cluster():
30
+ svc_name = self.svc_or_pod(True)
31
+ if not svc_name:
32
+ log2('No service found.')
33
+
34
+ raise InvalidState(state)
35
+
36
+ # cs-a526330d23-cs-a526330d23-default-sts-0 ->
37
+ # curl http://cs-a526330d23-cs-a526330d23-reaper-service.stgawsscpsr.svc.cluster.local:8080
38
+ groups = re.match(r'^(.*?-.*?-.*?-.*?-).*', state.sts)
39
+ if groups:
40
+ svc = f'{groups[1]}{svc_name}.{state.namespace}.svc.cluster.local:{self.target_port}'
41
+ return (svc, svc)
42
+ else:
43
+ raise InvalidState(state)
44
+ else:
45
+ pod = self.svc_or_pod(False)
46
+ if not pod:
47
+ log2('No pod found.')
48
+
49
+ raise InvalidState(state)
50
+
51
+ self.pod = pod
52
+ self.forward_connection = portforward.forward(state.namespace, pod, self.local_port, self.target_port)
53
+ if self.inc_connection_cnt() == 1:
54
+ self.forward_connection.__enter__()
55
+
56
+ return (f'localhost:{self.local_port}', f'{pod}:{self.target_port}')
57
+
58
+ def __exit__(self, exc_type, exc_val, exc_tb):
59
+ if self.forward_connection:
60
+ if not self.dec_connection_cnt():
61
+ return self.forward_connection.__exit__(exc_type, exc_val, exc_tb)
62
+
63
+ return False
64
+
65
+ def inc_connection_cnt(self):
66
+ id = self.connection_id(self.pod)
67
+ if id not in PortForwardHandler.connections:
68
+ PortForwardHandler.connections[id] = 1
69
+ else:
70
+ PortForwardHandler.connections[id] += 1
71
+
72
+ return PortForwardHandler.connections[id]
73
+
74
+ def dec_connection_cnt(self):
75
+ id = self.connection_id(self.pod)
76
+ if id not in PortForwardHandler.connections:
77
+ PortForwardHandler.connections[id] = 0
78
+ elif PortForwardHandler.connections[id] > 0:
79
+ PortForwardHandler.connections[id] -= 1
80
+
81
+ return PortForwardHandler.connections[id]
82
+
83
+ def connection_id(self, pod: str):
84
+ return f'{self.local_port}:{pod}:{self.target_port}'
85
+
86
+ def port_forwarding(state: ReplState, local_port: int, svc_or_pod: Callable[[bool],str], target_port: int):
87
+ return PortForwardHandler(state, local_port, svc_or_pod, target_port)
@@ -2,8 +2,8 @@ import os
2
2
  import re
3
3
  from kubernetes import config as kconfig
4
4
 
5
- from walker.config import Config
6
- from walker.utils import lines_to_tabular, log2
5
+ from adam.config import Config
6
+ from adam.utils import idp_token_from_env, lines_to_tabular, log2
7
7
 
8
8
  class KubeContext:
9
9
  _in_cluster = False
@@ -44,7 +44,7 @@ class KubeContext:
44
44
  except kconfig.ConfigException:
45
45
  pass
46
46
 
47
- if msg and not is_user_entry:
47
+ if msg and not is_user_entry and not idp_token_from_env():
48
48
  log2(msg)
49
49
  if not loaded:
50
50
  exit(1)
@@ -105,4 +105,4 @@ class KubeContext:
105
105
  return s or Config().is_debug()
106
106
 
107
107
  def show_parallelism():
108
- return Config().get('debug.show-parallelism', False)
108
+ return Config().get('debugs.show-parallelism', False)
adam/utils_k8s/pods.py ADDED
@@ -0,0 +1,290 @@
1
+ from collections.abc import Callable
2
+ from datetime import datetime
3
+ import sys
4
+ import time
5
+ from typing import TypeVar
6
+ from kubernetes import client
7
+ from kubernetes.stream import stream
8
+ from kubernetes.stream.ws_client import ERROR_CHANNEL, WSClient
9
+
10
+ from adam.config import Config
11
+ from adam.utils_k8s.volumes import ConfigMapMount
12
+ from adam.pod_exec_result import PodExecResult
13
+ from adam.utils import ParallelMapHandler, log2
14
+ from .kube_context import KubeContext
15
+
16
+ from websocket._core import WebSocket
17
+
18
+ T = TypeVar('T')
19
+ _TEST_POD_EXEC_OUTS: PodExecResult = None
20
+
21
+ # utility collection on pods; methods are all static
22
+ class Pods:
23
+ _TEST_POD_CLOSE_SOCKET: bool = False
24
+
25
+ def set_test_pod_exec_outs(outs: PodExecResult):
26
+ global _TEST_POD_EXEC_OUTS
27
+ _TEST_POD_EXEC_OUTS = outs
28
+
29
+ return _TEST_POD_EXEC_OUTS
30
+
31
+ def delete(pod_name: str, namespace: str, grace_period_seconds: int = None):
32
+ try:
33
+ v1 = client.CoreV1Api()
34
+ v1.delete_namespaced_pod(pod_name, namespace, grace_period_seconds=grace_period_seconds)
35
+ except Exception as e:
36
+ log2("Exception when calling CoreV1Api->delete_namespaced_pod: %s\n" % e)
37
+
38
+ def delete_with_selector(namespace: str, label_selector: str, grace_period_seconds: int = None):
39
+ v1 = client.CoreV1Api()
40
+
41
+ ret = v1.list_namespaced_pod(namespace=namespace, label_selector=label_selector)
42
+ for i in ret.items:
43
+ v1.delete_namespaced_pod(name=i.metadata.name, namespace=namespace, grace_period_seconds=grace_period_seconds)
44
+
45
+ def parallelize(collection: list, max_workers: int = 0, samples = sys.maxsize, msg: str = None, action: str = 'action'):
46
+ if not max_workers:
47
+ max_workers = Config().action_workers(action, 0)
48
+ if samples == sys.maxsize:
49
+ samples = Config().action_node_samples(action, sys.maxsize)
50
+
51
+ return ParallelMapHandler(collection, max_workers, samples = samples, msg = msg)
52
+
53
+ def exec(pod_name: str, container: str, namespace: str, command: str,
54
+ show_out = True, throw_err = False, shell = '/bin/sh',
55
+ background = False,
56
+ log_file = None,
57
+ interaction: Callable[[any, list[str]], any] = None,
58
+ env_prefix: str = None):
59
+ if _TEST_POD_EXEC_OUTS:
60
+ return _TEST_POD_EXEC_OUTS
61
+
62
+ show_out = KubeContext.show_out(show_out)
63
+
64
+ api = client.CoreV1Api()
65
+
66
+ tty = True
67
+ exec_command = [shell, '-c', command]
68
+ if env_prefix:
69
+ exec_command = [shell, '-c', f'{env_prefix} {command}']
70
+
71
+ if background or command.endswith(' &'):
72
+ # should be false for starting a background process
73
+ tty = False
74
+
75
+ if Config().get('repl.background-process.auto-nohup', True):
76
+ command = command.strip(' &')
77
+ cmd_name = ''
78
+ if command.startswith('nodetool '):
79
+ cmd_name = f".{'_'.join(command.split(' ')[5:])}"
80
+
81
+ if not log_file:
82
+ log_file = f'{log_prefix()}-{datetime.now().strftime("%d%H%M%S")}{cmd_name}.log'
83
+ command = f"nohup {command} > {log_file} 2>&1 &"
84
+ if env_prefix:
85
+ command = f'{env_prefix} {command}'
86
+ exec_command = [shell, '-c', command]
87
+
88
+ k_command = f'kubectl exec {pod_name} -c {container} -n {namespace} -- {shell} -c "{command}"'
89
+ if Config().is_debug():
90
+ log2(k_command)
91
+
92
+ resp: WSClient = stream(
93
+ api.connect_get_namespaced_pod_exec,
94
+ pod_name,
95
+ namespace,
96
+ command=exec_command,
97
+ container=container,
98
+ stderr=True,
99
+ stdin=True,
100
+ stdout=True,
101
+ tty=tty,
102
+ _preload_content=False,
103
+ )
104
+
105
+ s: WebSocket = resp.sock
106
+ stdout = []
107
+ stderr = []
108
+ error_output = None
109
+ try:
110
+ while resp.is_open():
111
+ resp.update(timeout=1)
112
+ if resp.peek_stdout():
113
+ frag = resp.read_stdout()
114
+ stdout.append(frag)
115
+ if show_out: print(frag, end="")
116
+
117
+ if interaction:
118
+ interaction(resp, stdout)
119
+ if resp.peek_stderr():
120
+ frag = resp.read_stderr()
121
+ stderr.append(frag)
122
+ if show_out: print(frag, end="")
123
+
124
+ try:
125
+ # get the exit code from server
126
+ error_output = resp.read_channel(ERROR_CHANNEL)
127
+ except Exception as e:
128
+ pass
129
+ except Exception as e:
130
+ if throw_err:
131
+ raise e
132
+ else:
133
+ log2(e)
134
+ finally:
135
+ resp.close()
136
+ if s and s.sock and Pods._TEST_POD_CLOSE_SOCKET:
137
+ try:
138
+ s.sock.close()
139
+ except:
140
+ pass
141
+
142
+ return PodExecResult("".join(stdout), "".join(stderr), k_command, error_output, pod=pod_name, log_file=log_file)
143
+
144
+ def read_file(pod_name: str, container: str, namespace: str, file_path: str):
145
+ v1 = client.CoreV1Api()
146
+
147
+ resp = stream(
148
+ v1.connect_get_namespaced_pod_exec,
149
+ name=pod_name,
150
+ namespace=namespace,
151
+ container=container,
152
+ command=["cat", file_path],
153
+ stderr=True, stdin=False,
154
+ stdout=True, tty=False,
155
+ _preload_content=False, # Important for streaming
156
+ )
157
+
158
+ s: WebSocket = resp.sock
159
+ try:
160
+ while resp.is_open():
161
+ resp.update(timeout=1)
162
+ if resp.peek_stdout():
163
+ yield resp.read_stdout()
164
+
165
+ try:
166
+ # get the exit code from server
167
+ error_output = resp.read_channel(ERROR_CHANNEL)
168
+ except Exception as e:
169
+ pass
170
+ except Exception as e:
171
+ raise e
172
+ finally:
173
+ resp.close()
174
+ if s and s.sock and Pods._TEST_POD_CLOSE_SOCKET:
175
+ try:
176
+ s.sock.close()
177
+ except:
178
+ pass
179
+ def get_container(namespace: str, pod_name: str, container_name: str):
180
+ pod = Pods.get(namespace, pod_name)
181
+ if not pod:
182
+ return None
183
+
184
+ for container in pod.spec.containers:
185
+ if container_name == container.name:
186
+ return container
187
+
188
+ return None
189
+
190
+ def get(namespace: str, pod_name: str):
191
+ v1 = client.CoreV1Api()
192
+ return v1.read_namespaced_pod(name=pod_name, namespace=namespace)
193
+
194
+ def get_with_selector(namespace: str, label_selector: str):
195
+ v1 = client.CoreV1Api()
196
+
197
+ ret = v1.list_namespaced_pod(namespace=namespace, label_selector=label_selector)
198
+ for i in ret.items:
199
+ return v1.read_namespaced_pod(name=i.metadata.name, namespace=namespace)
200
+
201
+ def create_pod_spec(name: str, image: str, image_pull_secret: str,
202
+ envs: list, container_security_context: client.V1SecurityContext,
203
+ volume_name: str, pvc_name:str, mount_path:str,
204
+ command: list[str]=None, sa_name : str = None, config_map_mount: ConfigMapMount = None,
205
+ restart_policy="Never"):
206
+ volume_mounts = []
207
+ if volume_name and pvc_name and mount_path:
208
+ volume_mounts=[client.V1VolumeMount(mount_path=mount_path, name=volume_name)]
209
+
210
+ if config_map_mount:
211
+ volume_mounts.append(client.V1VolumeMount(mount_path=config_map_mount.mount_path, sub_path=config_map_mount.sub_path, name=config_map_mount.name()))
212
+
213
+ container = client.V1Container(name=name, image=image, env=envs, security_context=container_security_context, command=command,
214
+ volume_mounts=volume_mounts)
215
+
216
+ volumes = []
217
+ if volume_name and pvc_name and mount_path:
218
+ volumes=[client.V1Volume(name=volume_name, persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name))]
219
+
220
+ security_context = None
221
+ if not sa_name:
222
+ security_context=client.V1PodSecurityContext(run_as_user=1001, run_as_group=1001, fs_group=1001)
223
+
224
+ if config_map_mount:
225
+ volumes.append(client.V1Volume(name=config_map_mount.name(), config_map=client.V1ConfigMapVolumeSource(name=config_map_mount.config_map_name)))
226
+
227
+ return client.V1PodSpec(
228
+ restart_policy=restart_policy,
229
+ containers=[container],
230
+ image_pull_secrets=[client.V1LocalObjectReference(name=image_pull_secret)],
231
+ security_context=security_context,
232
+ service_account_name=sa_name,
233
+ volumes=volumes
234
+ )
235
+
236
+ def create(namespace: str, pod_name: str, image: str,
237
+ command: list[str] = None,
238
+ secret: str = None,
239
+ env: dict[str, any] = {},
240
+ container_security_context: client.V1SecurityContext = None,
241
+ labels: dict[str, str] = {},
242
+ volume_name: str = None,
243
+ pvc_name: str = None,
244
+ mount_path: str = None,
245
+ sa_name: str = None,
246
+ config_map_mount: ConfigMapMount = None):
247
+ v1 = client.CoreV1Api()
248
+ envs = []
249
+ for k, v in env.items():
250
+ envs.append(client.V1EnvVar(name=str(k), value=str(v)))
251
+ pod = Pods.create_pod_spec(pod_name, image, secret, envs, container_security_context, volume_name, pvc_name, mount_path, command=command,
252
+ sa_name=sa_name, config_map_mount=config_map_mount)
253
+ return v1.create_namespaced_pod(
254
+ namespace=namespace,
255
+ body=client.V1Pod(spec=pod, metadata=client.V1ObjectMeta(
256
+ name=pod_name,
257
+ labels=labels
258
+ ))
259
+ )
260
+
261
+ def wait_for_running(namespace: str, pod_name: str, msg: str = None, label_selector: str = None):
262
+ cnt = 2
263
+ while (cnt < 302 and Pods.get_with_selector(namespace, label_selector) if label_selector else Pods.get(namespace, pod_name)).status.phase != 'Running':
264
+ if not msg:
265
+ msg = f'Waiting for the {pod_name} pod to start up.'
266
+
267
+ max_len = len(msg) + 3
268
+ mod = cnt % 3
269
+ padded = ''
270
+ if mod == 0:
271
+ padded = f'\r{msg}'.ljust(max_len)
272
+ elif mod == 1:
273
+ padded = f'\r{msg}.'.ljust(max_len)
274
+ else:
275
+ padded = f'\r{msg}..'.ljust(max_len)
276
+ log2(padded, nl=False)
277
+ cnt += 1
278
+ time.sleep(1)
279
+
280
+ log2(f'\r{msg}..'.ljust(max_len), nl=False)
281
+ if cnt < 302:
282
+ log2(' OK')
283
+ else:
284
+ log2(' Timed Out')
285
+
286
+ def completed(namespace: str, pod_name: str):
287
+ return Pods.get(namespace, pod_name).status.phase in ['Succeeded', 'Failed']
288
+
289
+ def log_prefix():
290
+ return Config().get('log-prefix', '/tmp/qing')
@@ -1,15 +1,19 @@
1
1
  import base64
2
+ import functools
2
3
  import re
3
4
  from typing import cast
4
5
  from kubernetes import client
5
6
  from kubernetes.client import V1Secret
6
7
 
7
- from walker.config import Config
8
- from walker.utils import log2
8
+ from adam.config import Config
9
+ from adam.utils import log2, wait_log
9
10
 
10
11
  # utility collection on secrets; methods are all static
11
12
  class Secrets:
13
+ @functools.lru_cache()
12
14
  def list_secrets(namespace: str = None, name_pattern: str = None):
15
+ wait_log('Inspecting Cassandra Instances...')
16
+
13
17
  secrets_names = []
14
18
 
15
19
  v1 = client.CoreV1Api()
@@ -35,14 +39,14 @@ class Secrets:
35
39
 
36
40
  return secrets_names
37
41
 
38
- def get_user_pass(ss_name: str, namespace: str, secret_path: str = 'cql.secret'):
42
+ def get_user_pass(sts_or_pod_name: str, namespace: str, secret_path: str = 'cql.secret'):
39
43
  # cs-d0767a536f-cs-d0767a536f-default-sts ->
40
44
  # cs-d0767a536f-superuser
41
45
  # cs-d0767a536f-reaper-ui
42
46
  user = 'superuser'
43
47
  if secret_path == 'reaper.secret':
44
48
  user = 'reaper-ui'
45
- groups = re.match(Config().get(f'{secret_path}.cluster-regex', r'(.*?-.*?)-.*'), ss_name)
49
+ groups = re.match(Config().get(f'{secret_path}.cluster-regex', r'(.*?-.*?)-.*'), sts_or_pod_name)
46
50
  secret_name = Config().get(f'{secret_path}.name', '{cluster}-' + user).replace('{cluster}', groups[1], 1)
47
51
 
48
52
  secret = Secrets.get_data(namespace, secret_name)
@@ -0,0 +1,170 @@
1
+ from kubernetes import client, config
2
+
3
+ from adam.config import Config
4
+ from adam.utils import debug
5
+
6
+ # utility collection on service accounts; methods are all static
7
+ class ServiceAccounts:
8
+ def delete(namespace: str, label_selector: str):
9
+ ServiceAccounts.delete_cluster_role_bindings(label_selector)
10
+ ServiceAccounts.delete_role_bindings(namespace, label_selector)
11
+ ServiceAccounts.delete_service_account(namespace, label_selector)
12
+
13
+ def replicate(to_sa: str, namespace: str, from_sa: str, labels: dict[str, str] = {}, add_cluster_roles: list[str] = []):
14
+ ServiceAccounts.create_service_account(to_sa, namespace, labels=labels)
15
+ for b in ServiceAccounts.get_role_bindings(from_sa, namespace):
16
+ n = f'{to_sa}-{b.role_ref.name}'
17
+ ServiceAccounts.create_role_binding(n, namespace, to_sa, b.role_ref.name, labels=labels)
18
+
19
+ for b in ServiceAccounts.get_cluster_role_bindings(from_sa):
20
+ n = f'{to_sa}-{b.role_ref.name}'
21
+ ServiceAccounts.create_cluster_role_binding(n, namespace, to_sa, b.role_ref.name, labels=labels)
22
+
23
+ for cr in add_cluster_roles:
24
+ n = f'{to_sa}-{cr}'
25
+ ServiceAccounts.create_cluster_role_binding(n, namespace, to_sa, cr, labels=labels)
26
+
27
+ def create_service_account(name: str, namespace: str, labels: dict[str, str] = {}):
28
+ config.load_kube_config()
29
+
30
+ v1 = client.CoreV1Api()
31
+
32
+ service_account = client.V1ServiceAccount(
33
+ metadata=client.V1ObjectMeta(
34
+ name=name,
35
+ labels=labels)
36
+ )
37
+ api_response = v1.create_namespaced_service_account(
38
+ namespace=namespace,
39
+ body=service_account
40
+ )
41
+ debug(f"Service Account '{api_response.metadata.name}' created in namespace '{namespace}'.")
42
+
43
+ def delete_service_account(namespace: str, label_selector: str) -> list:
44
+ refs = []
45
+
46
+ v1 = client.CoreV1Api()
47
+ sas = v1.list_namespaced_service_account(namespace=namespace, label_selector=label_selector).items
48
+ for sa in sas:
49
+ debug(f'delete {sa.metadata.name}')
50
+ v1.delete_namespaced_service_account(name=sa.metadata.name, namespace=namespace)
51
+ refs.append(sa)
52
+
53
+ return refs
54
+
55
+ def create_role_binding(name: str, namespace: str, sa_name: str, role_name: str, labels: dict[str, str] = {}):
56
+ api = client.RbacAuthorizationV1Api()
57
+
58
+ metadata = client.V1ObjectMeta(
59
+ name=name,
60
+ namespace=namespace,
61
+ labels=labels
62
+ )
63
+ role_ref = client.V1RoleRef(
64
+ api_group="rbac.authorization.k8s.io",
65
+ kind="Role",
66
+ name=role_name
67
+ )
68
+
69
+ subjects = [
70
+ client.RbacV1Subject(
71
+ kind="ServiceAccount",
72
+ name=sa_name, # Name of the service account
73
+ namespace=namespace # Namespace of the service account
74
+ )
75
+ ]
76
+
77
+ role_binding = client.V1RoleBinding(
78
+ api_version="rbac.authorization.k8s.io/v1",
79
+ kind="RoleBinding",
80
+ metadata=metadata,
81
+ role_ref=role_ref,
82
+ subjects=subjects
83
+ )
84
+
85
+ api.create_namespaced_role_binding(namespace=namespace, body=role_binding)
86
+
87
+ def get_role_bindings(service_account_name: str, namespace: str) -> list:
88
+ refs = []
89
+
90
+ rbac_api = client.RbacAuthorizationV1Api()
91
+ role_bindings = rbac_api.list_namespaced_role_binding(namespace=namespace)
92
+ for binding in role_bindings.items:
93
+ if binding.subjects:
94
+ for subject in binding.subjects:
95
+ if subject.kind == "ServiceAccount" and subject.name == service_account_name:
96
+ refs.append(binding)
97
+
98
+ return refs
99
+
100
+ def delete_role_bindings(namespace: str, label_selector: str) -> list:
101
+ refs = []
102
+
103
+ v1_rbac = client.RbacAuthorizationV1Api()
104
+ cluster_role_bindings = v1_rbac.list_namespaced_role_binding(namespace=namespace, label_selector=label_selector).items
105
+ for binding in cluster_role_bindings:
106
+ debug(f'delete {binding.metadata.name}')
107
+ v1_rbac.delete_namespaced_role_binding(name=binding.metadata.name, namespace=namespace)
108
+ refs.append(binding)
109
+
110
+ return refs
111
+
112
+ def create_cluster_role_binding(name: str, namespace: str, sa_name: str, role_name: str, labels: dict[str, str] = {}):
113
+ api = client.RbacAuthorizationV1Api()
114
+
115
+ metadata = client.V1ObjectMeta(
116
+ name=name,
117
+ namespace=namespace,
118
+ labels=labels
119
+ )
120
+ role_ref = client.V1RoleRef(
121
+ api_group="rbac.authorization.k8s.io",
122
+ kind="ClusterRole",
123
+ name=role_name
124
+ )
125
+
126
+ subjects = [
127
+ client.RbacV1Subject(
128
+ kind="ServiceAccount",
129
+ name=sa_name,
130
+ namespace=namespace
131
+ )
132
+ ]
133
+
134
+ role_binding = client.V1ClusterRoleBinding(
135
+ api_version="rbac.authorization.k8s.io/v1",
136
+ metadata=metadata,
137
+ role_ref=role_ref,
138
+ subjects=subjects
139
+ )
140
+
141
+ try:
142
+ api.create_cluster_role_binding(body=role_binding)
143
+ except client.ApiException as e:
144
+ print(f"Error creating ClusterRoleBinding: {e}")
145
+
146
+ def get_cluster_role_bindings(service_account_name: str) -> list:
147
+ refs = []
148
+
149
+ v1_rbac = client.RbacAuthorizationV1Api()
150
+ cluster_role_bindings = v1_rbac.list_cluster_role_binding().items
151
+ for binding in cluster_role_bindings:
152
+ if binding.subjects:
153
+ for subject in binding.subjects:
154
+ if subject.kind == "ServiceAccount" and subject.name == service_account_name:
155
+ refs.append(binding)
156
+
157
+ return refs
158
+
159
+
160
+ def delete_cluster_role_bindings(label_selector: str) -> list:
161
+ refs = []
162
+
163
+ v1_rbac = client.RbacAuthorizationV1Api()
164
+ cluster_role_bindings = v1_rbac.list_cluster_role_binding(label_selector=label_selector).items
165
+ for binding in cluster_role_bindings:
166
+ debug(f'delete {binding.metadata.name}')
167
+ v1_rbac.delete_cluster_role_binding(binding.metadata.name)
168
+ refs.append(binding)
169
+
170
+ return refs
@@ -1,8 +1,8 @@
1
1
  from typing import List
2
2
  from kubernetes import client
3
3
 
4
- from walker.config import Config
5
- from walker.utils import log2
4
+ from adam.config import Config
5
+ from adam.utils import debug, log2
6
6
 
7
7
  from .kube_context import KubeContext
8
8
 
@@ -71,8 +71,7 @@ class Services:
71
71
  namespace=namespace,
72
72
  body=delete_options
73
73
  )
74
- if Config().is_debug():
75
- log2(f"200 Service '{name}' in namespace '{namespace}' deleted successfully.")
74
+ debug(f"200 Service '{name}' in namespace '{namespace}' deleted successfully.")
76
75
  except client.ApiException as e:
77
76
  log2(f"Error deleting Service '{name}': {e}")
78
77
  except Exception as e: