kaqing 1.98.15__py3-none-any.whl → 2.0.145__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaqing might be problematic. Click here for more details.

Files changed (180) hide show
  1. adam/app_session.py +1 -1
  2. adam/apps.py +2 -2
  3. adam/batch.py +30 -31
  4. adam/checks/check_utils.py +4 -4
  5. adam/checks/compactionstats.py +1 -1
  6. adam/checks/cpu.py +2 -2
  7. adam/checks/disk.py +1 -1
  8. adam/checks/gossip.py +1 -1
  9. adam/checks/memory.py +3 -3
  10. adam/checks/status.py +1 -1
  11. adam/commands/alter_tables.py +81 -0
  12. adam/commands/app.py +3 -3
  13. adam/commands/app_ping.py +2 -2
  14. adam/commands/audit/audit.py +86 -0
  15. adam/commands/audit/audit_repair_tables.py +77 -0
  16. adam/commands/audit/audit_run.py +58 -0
  17. adam/commands/audit/show_last10.py +51 -0
  18. adam/commands/audit/show_slow10.py +50 -0
  19. adam/commands/audit/show_top10.py +48 -0
  20. adam/commands/audit/utils_show_top10.py +59 -0
  21. adam/commands/bash/bash.py +133 -0
  22. adam/commands/bash/bash_completer.py +93 -0
  23. adam/commands/cat.py +56 -0
  24. adam/commands/cd.py +12 -82
  25. adam/commands/check.py +6 -0
  26. adam/commands/cli_commands.py +3 -3
  27. adam/commands/code.py +60 -0
  28. adam/commands/command.py +48 -12
  29. adam/commands/commands_utils.py +4 -5
  30. adam/commands/cql/cql_completions.py +28 -0
  31. adam/commands/cql/cql_utils.py +209 -0
  32. adam/commands/{cqlsh.py → cql/cqlsh.py} +15 -10
  33. adam/commands/deploy/__init__.py +0 -0
  34. adam/commands/{frontend → deploy}/code_start.py +1 -1
  35. adam/commands/{frontend → deploy}/code_stop.py +1 -1
  36. adam/commands/{frontend → deploy}/code_utils.py +2 -2
  37. adam/commands/deploy/deploy.py +48 -0
  38. adam/commands/deploy/deploy_frontend.py +52 -0
  39. adam/commands/deploy/deploy_pg_agent.py +38 -0
  40. adam/commands/deploy/deploy_pod.py +110 -0
  41. adam/commands/deploy/deploy_utils.py +29 -0
  42. adam/commands/deploy/undeploy.py +48 -0
  43. adam/commands/deploy/undeploy_frontend.py +41 -0
  44. adam/commands/deploy/undeploy_pg_agent.py +42 -0
  45. adam/commands/deploy/undeploy_pod.py +51 -0
  46. adam/commands/devices/__init__.py +0 -0
  47. adam/commands/devices/device.py +27 -0
  48. adam/commands/devices/device_app.py +146 -0
  49. adam/commands/devices/device_auit_log.py +43 -0
  50. adam/commands/devices/device_cass.py +145 -0
  51. adam/commands/devices/device_export.py +86 -0
  52. adam/commands/devices/device_postgres.py +109 -0
  53. adam/commands/devices/devices.py +25 -0
  54. adam/commands/export/__init__.py +0 -0
  55. adam/commands/export/clean_up_export_session.py +53 -0
  56. adam/commands/{frontend/teardown_frontend.py → export/clean_up_export_sessions.py} +9 -11
  57. adam/commands/export/drop_export_database.py +58 -0
  58. adam/commands/export/drop_export_databases.py +46 -0
  59. adam/commands/export/export.py +83 -0
  60. adam/commands/export/export_databases.py +170 -0
  61. adam/commands/export/export_select.py +85 -0
  62. adam/commands/export/export_select_x.py +54 -0
  63. adam/commands/export/export_use.py +55 -0
  64. adam/commands/export/exporter.py +364 -0
  65. adam/commands/export/import_session.py +68 -0
  66. adam/commands/export/importer.py +67 -0
  67. adam/commands/export/importer_athena.py +80 -0
  68. adam/commands/export/importer_sqlite.py +47 -0
  69. adam/commands/export/show_column_counts.py +63 -0
  70. adam/commands/export/show_export_databases.py +39 -0
  71. adam/commands/export/show_export_session.py +51 -0
  72. adam/commands/export/show_export_sessions.py +47 -0
  73. adam/commands/export/utils_export.py +291 -0
  74. adam/commands/help.py +12 -7
  75. adam/commands/issues.py +6 -0
  76. adam/commands/kubectl.py +41 -0
  77. adam/commands/login.py +9 -5
  78. adam/commands/logs.py +2 -1
  79. adam/commands/ls.py +4 -107
  80. adam/commands/medusa/medusa.py +2 -26
  81. adam/commands/medusa/medusa_backup.py +2 -2
  82. adam/commands/medusa/medusa_restore.py +3 -4
  83. adam/commands/medusa/medusa_show_backupjobs.py +4 -3
  84. adam/commands/medusa/medusa_show_restorejobs.py +3 -3
  85. adam/commands/nodetool.py +9 -4
  86. adam/commands/param_set.py +1 -1
  87. adam/commands/postgres/postgres.py +42 -43
  88. adam/commands/postgres/postgres_context.py +248 -0
  89. adam/commands/postgres/postgres_preview.py +0 -1
  90. adam/commands/postgres/postgres_utils.py +31 -0
  91. adam/commands/postgres/psql_completions.py +10 -0
  92. adam/commands/preview_table.py +18 -40
  93. adam/commands/pwd.py +2 -28
  94. adam/commands/reaper/reaper.py +4 -24
  95. adam/commands/reaper/reaper_restart.py +1 -1
  96. adam/commands/reaper/reaper_session.py +2 -2
  97. adam/commands/repair/repair.py +3 -27
  98. adam/commands/repair/repair_log.py +1 -1
  99. adam/commands/repair/repair_run.py +2 -2
  100. adam/commands/repair/repair_scan.py +2 -7
  101. adam/commands/repair/repair_stop.py +1 -1
  102. adam/commands/report.py +6 -0
  103. adam/commands/restart.py +2 -2
  104. adam/commands/rollout.py +1 -1
  105. adam/commands/shell.py +33 -0
  106. adam/commands/show/show.py +11 -26
  107. adam/commands/show/show_app_actions.py +3 -0
  108. adam/commands/show/show_app_id.py +1 -1
  109. adam/commands/show/show_app_queues.py +3 -2
  110. adam/commands/show/show_cassandra_status.py +3 -3
  111. adam/commands/show/show_cassandra_version.py +3 -3
  112. adam/commands/show/show_commands.py +4 -1
  113. adam/commands/show/show_host.py +33 -0
  114. adam/commands/show/show_login.py +3 -0
  115. adam/commands/show/show_processes.py +1 -1
  116. adam/commands/show/show_repairs.py +2 -2
  117. adam/commands/show/show_storage.py +1 -1
  118. adam/commands/watch.py +1 -1
  119. adam/config.py +16 -3
  120. adam/embedded_params.py +1 -1
  121. adam/pod_exec_result.py +10 -2
  122. adam/repl.py +132 -117
  123. adam/repl_commands.py +62 -18
  124. adam/repl_state.py +276 -55
  125. adam/sql/__init__.py +0 -0
  126. adam/sql/sql_completer.py +120 -0
  127. adam/sql/sql_state_machine.py +617 -0
  128. adam/sql/term_completer.py +76 -0
  129. adam/sso/authenticator.py +1 -1
  130. adam/sso/authn_ad.py +36 -56
  131. adam/sso/authn_okta.py +6 -32
  132. adam/sso/cred_cache.py +1 -1
  133. adam/sso/idp.py +74 -9
  134. adam/sso/idp_login.py +2 -2
  135. adam/sso/idp_session.py +10 -7
  136. adam/utils.py +85 -4
  137. adam/utils_athena.py +145 -0
  138. adam/utils_audits.py +102 -0
  139. adam/utils_k8s/__init__.py +0 -0
  140. adam/utils_k8s/app_clusters.py +33 -0
  141. adam/utils_k8s/app_pods.py +31 -0
  142. adam/{k8s_utils → utils_k8s}/cassandra_clusters.py +6 -21
  143. adam/{k8s_utils → utils_k8s}/cassandra_nodes.py +12 -5
  144. adam/utils_k8s/config_maps.py +34 -0
  145. adam/utils_k8s/deployment.py +56 -0
  146. adam/{k8s_utils → utils_k8s}/jobs.py +1 -1
  147. adam/{k8s_utils → utils_k8s}/kube_context.py +1 -1
  148. adam/utils_k8s/pods.py +342 -0
  149. adam/{k8s_utils → utils_k8s}/secrets.py +4 -0
  150. adam/utils_k8s/service_accounts.py +169 -0
  151. adam/{k8s_utils → utils_k8s}/statefulsets.py +5 -4
  152. adam/{k8s_utils → utils_k8s}/volumes.py +9 -0
  153. adam/utils_net.py +24 -0
  154. adam/utils_repl/__init__.py +0 -0
  155. adam/utils_repl/automata_completer.py +48 -0
  156. adam/utils_repl/repl_completer.py +46 -0
  157. adam/utils_repl/state_machine.py +173 -0
  158. adam/utils_sqlite.py +101 -0
  159. adam/version.py +1 -1
  160. {kaqing-1.98.15.dist-info → kaqing-2.0.145.dist-info}/METADATA +1 -1
  161. kaqing-2.0.145.dist-info/RECORD +227 -0
  162. adam/commands/bash.py +0 -87
  163. adam/commands/cql_utils.py +0 -53
  164. adam/commands/devices.py +0 -89
  165. adam/commands/frontend/setup.py +0 -60
  166. adam/commands/frontend/setup_frontend.py +0 -58
  167. adam/commands/frontend/teardown.py +0 -61
  168. adam/commands/postgres/postgres_session.py +0 -225
  169. adam/commands/user_entry.py +0 -77
  170. adam/k8s_utils/pods.py +0 -211
  171. kaqing-1.98.15.dist-info/RECORD +0 -160
  172. /adam/commands/{frontend → audit}/__init__.py +0 -0
  173. /adam/{k8s_utils → commands/bash}/__init__.py +0 -0
  174. /adam/{medusa_show_restorejobs.py → commands/cql/__init__.py} +0 -0
  175. /adam/{k8s_utils → utils_k8s}/custom_resources.py +0 -0
  176. /adam/{k8s_utils → utils_k8s}/ingresses.py +0 -0
  177. /adam/{k8s_utils → utils_k8s}/services.py +0 -0
  178. {kaqing-1.98.15.dist-info → kaqing-2.0.145.dist-info}/WHEEL +0 -0
  179. {kaqing-1.98.15.dist-info → kaqing-2.0.145.dist-info}/entry_points.txt +0 -0
  180. {kaqing-1.98.15.dist-info → kaqing-2.0.145.dist-info}/top_level.txt +0 -0
adam/utils_athena.py ADDED
@@ -0,0 +1,145 @@
1
+ import functools
2
+ import time
3
+ import boto3
4
+ import botocore
5
+
6
+ from adam.config import Config
7
+ from adam.utils import lines_to_tabular, log, log2
8
+
9
+ # no state utility class
10
+ class Athena:
11
+ @functools.lru_cache()
12
+ def database_names(like: str = None):
13
+ # this function is called only from export currently
14
+ Config().wait_log(f'Inspecting export database schema...')
15
+
16
+ query = f"SELECT schema_name FROM information_schema.schemata WHERE schema_name <> 'information_schema'"
17
+ if like:
18
+ query = f"{query} AND schema_name like '{like}'"
19
+
20
+ try:
21
+ state, reason, rs = Athena.query(query)
22
+ if rs:
23
+ names = []
24
+ for row in rs[1:]:
25
+ row_data = [col.get('VarCharValue') if col else '' for col in row['Data']]
26
+ names.append(row_data[0])
27
+
28
+ return names
29
+ except:
30
+ pass
31
+
32
+ return []
33
+
34
+ def clear_cache(cache: str = None):
35
+ if not cache or cache == 'databases':
36
+ Athena.database_names.cache_clear()
37
+ if not cache or cache == 'tables':
38
+ Athena.table_names.cache_clear()
39
+ if not cache or cache == 'columns':
40
+ Athena.column_names.cache_clear()
41
+
42
+ @functools.lru_cache()
43
+ def table_names(database: str = 'audit', function: str = 'audit'):
44
+ table_names = []
45
+ try:
46
+ region_name = Config().get(f'{function}.athena.region', 'us-west-2')
47
+ database_name = Config().get(f'{function}.athena.database', database)
48
+ catalog_name = Config().get(f'{function}.athena.catalog', 'AwsDataCatalog')
49
+
50
+ athena_client = boto3.client('athena', region_name=region_name)
51
+ paginator = athena_client.get_paginator('list_table_metadata')
52
+
53
+ for page in paginator.paginate(CatalogName=catalog_name, DatabaseName=database_name):
54
+ for table_metadata in page.get('TableMetadataList', []):
55
+ table_names.append(table_metadata['Name'])
56
+ except botocore.exceptions.NoCredentialsError as e:
57
+ # aws credentials not found
58
+ if function == 'audit':
59
+ log2(f'Please configure AWS credentials to Audit Log Database.')
60
+ except:
61
+ pass
62
+
63
+ return table_names
64
+
65
+ @functools.lru_cache()
66
+ def column_names(tables: list[str] = [], database: str = None, function: str = 'audit', partition_cols_only = False):
67
+ try:
68
+ if not database:
69
+ database = Config().get(f'{function}.athena.database', 'audit')
70
+
71
+ if not tables:
72
+ tables = Config().get(f'{function}.athena.tables', 'audit').split(',')
73
+
74
+ table_names = "'" + "','".join([table.strip() for table in tables]) + "'"
75
+
76
+ query = f"select column_name from information_schema.columns where table_name in ({table_names}) and table_schema = '{database}'"
77
+ if partition_cols_only:
78
+ query = f"{query} and extra_info = 'partition key'"
79
+
80
+ _, _, rs = Athena.query(query)
81
+ if rs:
82
+ return [row['Data'][0].get('VarCharValue') for row in rs[1:]]
83
+ except:
84
+ # aws credentials not found
85
+ pass
86
+
87
+ return []
88
+
89
+ def run_query(sql: str, database: str = None):
90
+ state, reason, rs = Athena.query(sql, database)
91
+
92
+ if state == 'SUCCEEDED':
93
+ if rs:
94
+ column_info = rs[0]['Data']
95
+ columns = [col.get('VarCharValue') for col in column_info]
96
+ lines = []
97
+ for row in rs[1:]:
98
+ row_data = [col.get('VarCharValue') if col else '' for col in row['Data']]
99
+ lines.append('\t'.join(row_data))
100
+
101
+ log(lines_to_tabular(lines, header='\t'.join(columns), separator='\t'))
102
+
103
+ return len(lines)
104
+ else:
105
+ log2(f"Query failed or was cancelled. State: {state}")
106
+ log2(f"Reason: {reason}")
107
+
108
+ return 0
109
+
110
+ def query(sql: str, database: str = None, function: str = 'audit') -> tuple[str, str, list]:
111
+ region_name = Config().get(f'{function}.athena.region', 'us-west-2')
112
+ athena_client = boto3.client('athena', region_name=region_name)
113
+
114
+ if not database:
115
+ database = Config().get(f'{function}.athena.database', 'audit')
116
+
117
+ s3_output_location = Config().get(f'{function}.athena.output', f's3://s3.ops--{function}/ddl/results')
118
+
119
+ response = athena_client.start_query_execution(
120
+ QueryString=sql,
121
+ QueryExecutionContext={
122
+ 'Database': database
123
+ },
124
+ ResultConfiguration={
125
+ 'OutputLocation': s3_output_location
126
+ }
127
+ )
128
+
129
+ query_execution_id = response['QueryExecutionId']
130
+
131
+ while True:
132
+ query_status = athena_client.get_query_execution(QueryExecutionId=query_execution_id)
133
+ state = query_status['QueryExecution']['Status']['State']
134
+ if state in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
135
+ break
136
+ time.sleep(1)
137
+
138
+ if state == 'SUCCEEDED':
139
+ results_response = athena_client.get_query_results(QueryExecutionId=query_execution_id)
140
+ if results_response['ResultSet']['Rows']:
141
+ return (state, None, results_response['ResultSet']['Rows'])
142
+
143
+ return (state, None, [])
144
+ else:
145
+ return (state, query_status['QueryExecution']['Status'].get('StateChangeReason'), [])
adam/utils_audits.py ADDED
@@ -0,0 +1,102 @@
1
+ from datetime import datetime
2
+ import getpass
3
+ import time
4
+ import requests
5
+
6
+ from adam.config import Config
7
+ from adam.utils import log2
8
+ from adam.utils_net import get_my_host
9
+
10
+ class AuditMeta:
11
+ def __init__(self, partitions_last_checked: float, cluster_last_checked: float):
12
+ self.partitions_last_checked = partitions_last_checked
13
+ self.cluster_last_checked = cluster_last_checked
14
+
15
+ # no state utility class
16
+ class Audits:
17
+ PARTITIONS_ADDED = 'partitions-added'
18
+ ADD_CLUSTERS = 'add-clusters'
19
+
20
+ def log(cmd: str, cluster = 'NA', drive: str = 'NA', duration: float = 0.0, audit_extra = None):
21
+ payload = {
22
+ 'cluster': cluster if cluster else 'NA',
23
+ 'ts': time.time(),
24
+ 'host': get_my_host(),
25
+ 'user': getpass.getuser(),
26
+ 'line': cmd.replace('"', '""').replace('\n', ' '),
27
+ 'drive': drive,
28
+ 'duration': duration,
29
+ 'audit_extra': audit_extra if audit_extra else '',
30
+ }
31
+ audit_endpoint = Config().get("audit.endpoint", "https://4psvtaxlcb.execute-api.us-west-2.amazonaws.com/prod/")
32
+ try:
33
+ response = requests.post(audit_endpoint, json=payload, timeout=Config().get("audit.timeout", 10))
34
+ if response.status_code in [200, 201]:
35
+ Config().debug(response.text)
36
+ else:
37
+ log2(f"Error: {response.status_code} {response.text}")
38
+ except requests.exceptions.Timeout as e:
39
+ log2(f"Timeout occurred: {e}")
40
+
41
+ def get_meta() -> AuditMeta:
42
+ checked_in = 0.0
43
+ cluster_last_checked = 0.0
44
+
45
+ state, _, rs = Audits.audit_query(f'select partitions_last_checked, clusters_last_checked from meta')
46
+ if state == 'SUCCEEDED':
47
+ if len(rs) > 1:
48
+ try:
49
+ row = rs[1]['Data']
50
+ checked_in = float(row[0]['VarCharValue'])
51
+ cluster_last_checked = float(row[1]['VarCharValue'])
52
+ except:
53
+ pass
54
+
55
+ return AuditMeta(checked_in, cluster_last_checked)
56
+
57
+ def put_meta(action: str, meta: AuditMeta, clusters: list[str] = None):
58
+ payload = {
59
+ 'action': action,
60
+ 'partitions-last-checked': meta.partitions_last_checked,
61
+ 'clusters-last-checked': meta.cluster_last_checked
62
+ }
63
+ if clusters:
64
+ payload['clusters'] = clusters
65
+
66
+ audit_endpoint = Config().get("audit.endpoint", "https://4psvtaxlcb.execute-api.us-west-2.amazonaws.com/prod/")
67
+ try:
68
+ response = requests.post(audit_endpoint, json=payload, timeout=Config().get("audit.timeout", 10))
69
+ if response.status_code in [200, 201]:
70
+ Config().debug(response.text)
71
+ else:
72
+ log2(f"Error: {response.status_code} {response.text}")
73
+ except requests.exceptions.Timeout as e:
74
+ log2(f"Timeout occurred: {e}")
75
+
76
+ def find_new_clusters(cluster_last_checked: float) -> list[str]:
77
+ dt_object = datetime.fromtimestamp(cluster_last_checked)
78
+
79
+ # select distinct c2.name from cluster as c1 right outer join
80
+ # (select distinct c as name from audit where y = '1969' and m = '12' and d >= '31' or y = '1969' and m > '12' or y > '1969') as c2
81
+ # on c1.name = c2.name where c1.name is null
82
+ query = '\n '.join([
83
+ 'select distinct c2.name from cluster as c1 right outer join',
84
+ f'(select distinct c as name from audit where {Audits.date_from(dt_object)}) as c2',
85
+ 'on c1.name = c2.name where c1.name is null'])
86
+ log2(query)
87
+ state, _, rs = Audits.audit_query(query)
88
+ if state == 'SUCCEEDED':
89
+ if len(rs) > 1:
90
+ try:
91
+ return [r['Data'][0]['VarCharValue'] for r in rs[1:]]
92
+ except:
93
+ pass
94
+
95
+ return []
96
+
97
+ def date_from(dt_object: datetime):
98
+ y = dt_object.strftime("%Y")
99
+ m = dt_object.strftime("%m")
100
+ d = dt_object.strftime("%d")
101
+
102
+ return f"y = '{y}' and m = '{m}' and d >= '{d}' or y = '{y}' and m > '{m}' or y > '{y}'"
File without changes
@@ -0,0 +1,33 @@
1
+ from concurrent.futures import ThreadPoolExecutor
2
+ import sys
3
+ from typing import TypeVar
4
+
5
+ from adam.utils_k8s.app_pods import AppPods
6
+ from adam.pod_exec_result import PodExecResult
7
+ from adam.utils import log2
8
+ from adam.utils_k8s.pods import Pods
9
+ from .kube_context import KubeContext
10
+
11
+ T = TypeVar('T')
12
+
13
+ # utility collection on app clusters; methods are all static
14
+ class AppClusters:
15
+ def exec(pods: list[str], namespace: str, command: str, action: str = 'action',
16
+ max_workers=0, show_out=True, on_any = False, shell = '/bin/sh', background = False) -> list[PodExecResult]:
17
+ def body(executor: ThreadPoolExecutor, pod: str, namespace: str, show_out: bool):
18
+ if executor:
19
+ return executor.submit(AppPods.exec, pod, namespace, command, False, False, shell, background)
20
+
21
+ return AppPods.exec(pod, namespace, command, show_out=show_out, background=background)
22
+
23
+ def post(result, show_out: bool):
24
+ if KubeContext.show_out(show_out):
25
+ print(result.command)
26
+ if result.stdout:
27
+ print(result.stdout)
28
+ if result.stderr:
29
+ log2(result.stderr, file=sys.stderr)
30
+
31
+ return result
32
+
33
+ return Pods.on_pods(pods, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out, on_any=on_any, background=background)
@@ -0,0 +1,31 @@
1
+ from typing import List
2
+ from kubernetes import client
3
+
4
+ from adam.config import Config
5
+ from adam.utils_k8s.pods import Pods
6
+ from adam.pod_exec_result import PodExecResult
7
+ from adam.repl_session import ReplSession
8
+
9
+ # utility collection on app pods; methods are all static
10
+ class AppPods:
11
+ def pod_names(namespace: str, env: str, app: str):
12
+ return [pod.metadata.name for pod in AppPods.app_pods(namespace, env, app)]
13
+
14
+ def app_pods(namespace: str, env: str, app: str) -> List[client.V1Pod]:
15
+ v1 = client.CoreV1Api()
16
+
17
+ env_key = Config().get('app.env', 'c3__env-0')
18
+ app_key = Config().get('app.app', 'c3__app-0')
19
+ label_selector = f'applicationGroup=c3,{env_key}=0{env}0,{app_key}=0{app}0'
20
+
21
+ return v1.list_namespaced_pod(namespace, label_selector=label_selector).items
22
+
23
+ def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh', background = False) -> PodExecResult:
24
+ container = Config().get('app.container-name', 'c3-server')
25
+ r = Pods.exec(pod_name, container, namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, background = background)
26
+
27
+ if r and Config().get('repl.history.push-cat-remote-log-file', True):
28
+ if r.log_file and ReplSession().prompt_session:
29
+ ReplSession().prompt_session.history.append_string(f'@{r.pod} cat {r.log_file}')
30
+
31
+ return r
@@ -1,25 +1,24 @@
1
- from collections.abc import Callable
2
1
  from concurrent.futures import ThreadPoolExecutor
3
2
  import sys
4
3
  from typing import TypeVar
5
4
 
6
- from adam.k8s_utils.cassandra_nodes import CassandraNodes
5
+ from adam.utils_k8s.cassandra_nodes import CassandraNodes
7
6
  from adam.pod_exec_result import PodExecResult
8
7
  from adam.utils import log2
9
8
  from .statefulsets import StatefulSets
10
- from .pods import Pods
11
9
  from .kube_context import KubeContext
12
10
 
13
11
  T = TypeVar('T')
14
12
 
15
13
  # utility collection on cassandra clusters; methods are all static
16
14
  class CassandraClusters:
17
- def exec(statefulset: str, namespace: str, command: str, action: str = 'action', max_workers=0, show_out=True) -> list[PodExecResult]:
15
+ def exec(statefulset: str, namespace: str, command: str, action: str = 'action',
16
+ max_workers=0, show_out=True, on_any = False, shell = '/bin/sh', background = False, log_file = None) -> list[PodExecResult]:
18
17
  def body(executor: ThreadPoolExecutor, pod: str, namespace: str, show_out: bool):
19
18
  if executor:
20
- return executor.submit(CassandraNodes.exec, pod, namespace, command, False, False,)
19
+ return executor.submit(CassandraNodes.exec, pod, namespace, command, False, False, shell, background, log_file)
21
20
 
22
- return CassandraNodes.exec(pod, namespace, command, show_out=show_out)
21
+ return CassandraNodes.exec(pod, namespace, command, show_out=show_out, background=background, log_file=log_file)
23
22
 
24
23
  def post(result, show_out: bool):
25
24
  if KubeContext.show_out(show_out):
@@ -31,18 +30,4 @@ class CassandraClusters:
31
30
 
32
31
  return result
33
32
 
34
- return StatefulSets.on_cluster(statefulset, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out)
35
-
36
- def on_cluster(statefulset: str,
37
- namespace: str,
38
- body: Callable[[ThreadPoolExecutor, str, str, bool], T],
39
- post: Callable[[T], T] = None,
40
- action: str = 'action', max_workers=0, show_out=True) -> list[T]:
41
- pods = StatefulSets.pod_names(statefulset, namespace)
42
-
43
- return Pods.on_pods(pods, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out)
44
-
45
- def pod_names_by_host_id(ss: str, ns: str):
46
- pods = StatefulSets.pods(ss, ns)
47
-
48
- return {CassandraNodes.get_host_id(pod.metadata.name, ns): pod.metadata.name for pod in pods}
33
+ return StatefulSets.on_cluster(statefulset, namespace, body, post=post, action=action, max_workers=max_workers, show_out=show_out, on_any=on_any, background=background)
@@ -1,18 +1,25 @@
1
1
  from adam.config import Config
2
- from adam.k8s_utils.pods import Pods
3
- from adam.k8s_utils.secrets import Secrets
2
+ from adam.utils_k8s.pods import Pods
3
+ from adam.utils_k8s.secrets import Secrets
4
4
  from adam.pod_exec_result import PodExecResult
5
+ from adam.repl_session import ReplSession
5
6
 
6
7
  # utility collection on cassandra nodes; methods are all static
7
8
  class CassandraNodes:
8
- def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False) -> PodExecResult:
9
- return Pods.exec(pod_name, "cassandra", namespace, command, show_out, throw_err)
9
+ def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh', background = False, log_file = None) -> PodExecResult:
10
+ r = Pods.exec(pod_name, "cassandra", namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, background = background, log_file=log_file)
11
+
12
+ if r and Config().get('repl.history.push-cat-remote-log-file', True):
13
+ if r.log_file and ReplSession().prompt_session:
14
+ ReplSession().prompt_session.history.append_string(f'@{r.pod} cat {r.log_file}')
15
+
16
+ return r
10
17
 
11
18
  def get_host_id(pod_name: str, ns: str):
12
19
  try:
13
20
  user, pw = Secrets.get_user_pass(pod_name, ns)
14
21
  command = f'echo "SELECT host_id FROM system.local; exit" | cqlsh --no-color -u {user} -p {pw}'
15
- result: PodExecResult = CassandraNodes.exec(pod_name, ns, command, show_out=Config().get('debug.trace', False))
22
+ result: PodExecResult = CassandraNodes.exec(pod_name, ns, command, show_out=Config().is_debug())
16
23
  next = False
17
24
  for line in result.stdout.splitlines():
18
25
  if next:
@@ -0,0 +1,34 @@
1
+ from kubernetes import client
2
+
3
+ # utility collection on config maps; methods are all static
4
+ class ConfigMaps:
5
+ def create(name: str, namespace: str, data: dict[str, str], labels: dict[str, str] = {}):
6
+ v1 = client.CoreV1Api()
7
+
8
+ metadata = client.V1ObjectMeta(
9
+ name=name,
10
+ namespace=namespace,
11
+ labels=labels
12
+ )
13
+
14
+ configmap = client.V1ConfigMap(
15
+ api_version="v1",
16
+ kind="ConfigMap",
17
+ metadata=metadata,
18
+ data=data
19
+ )
20
+
21
+ try:
22
+ api_response = v1.create_namespaced_config_map(body=configmap, namespace=namespace)
23
+ # print(f"ConfigMap '{name}' created successfully in namespace '{namespace}'.")
24
+ # print(api_response)
25
+ except client.ApiException as e:
26
+ # print(f"Error creating ConfigMap: {e}")
27
+ raise e
28
+
29
+ def delete_with_selector(namespace: str, label_selector: str):
30
+ v1 = client.CoreV1Api()
31
+
32
+ ret = v1.list_namespaced_config_map(namespace=namespace, label_selector=label_selector)
33
+ for i in ret.items:
34
+ v1.delete_namespaced_config_map(name=i.metadata.name, namespace=namespace)
@@ -0,0 +1,56 @@
1
+ from kubernetes import client
2
+
3
+ from adam.utils_k8s.pods import Pods
4
+ from adam.utils_k8s.volumes import ConfigMapMount
5
+
6
+ # utility collection on deployments; methods are all static
7
+ class Deployments:
8
+ def delete_with_selector(namespace: str, label_selector: str, grace_period_seconds: int = None):
9
+ v1 = client.AppsV1Api()
10
+
11
+ ret = v1.list_namespaced_deployment(namespace=namespace, label_selector=label_selector)
12
+ for i in ret.items:
13
+ v1.delete_namespaced_deployment(name=i.metadata.name, namespace=namespace, grace_period_seconds=grace_period_seconds)
14
+
15
+ def create_deployment_spec(name: str, image: str, image_pull_secret: str,
16
+ envs: list, container_security_context: client.V1SecurityContext,
17
+ volume_name: str, pvc_name:str, mount_path:str,
18
+ command: list[str]=None, sa_name=None, labels: dict[str, str] = {},
19
+ config_map_mount: ConfigMapMount = None):
20
+ return client.V1DeploymentSpec(
21
+ replicas=1,
22
+ selector=client.V1LabelSelector(match_labels=labels),
23
+ template=client.V1PodTemplateSpec(
24
+ metadata=client.V1ObjectMeta(labels=labels),
25
+ spec=Pods.create_pod_spec(name, image, image_pull_secret, envs, container_security_context,
26
+ volume_name, pvc_name, mount_path, command=command, sa_name=sa_name,
27
+ restart_policy="Always", config_map_mount=config_map_mount),
28
+ ),
29
+ )
30
+
31
+ def create(namespace: str, deployment_name: str, image: str,
32
+ command: list[str] = None,
33
+ secret: str = None,
34
+ env: dict[str, any] = {},
35
+ container_security_context: client.V1SecurityContext = None,
36
+ labels: dict[str, str] = {},
37
+ volume_name: str = None,
38
+ pvc_name: str = None,
39
+ mount_path: str = None,
40
+ sa_name=None,
41
+ config_map_mount: ConfigMapMount = None):
42
+ v1 = client.AppsV1Api()
43
+ envs = []
44
+ for k, v in env.items():
45
+ envs.append(client.V1EnvVar(name=str(k), value=str(v)))
46
+ deployment = Deployments.create_deployment_spec(deployment_name, image, secret, envs,
47
+ container_security_context, volume_name, pvc_name,
48
+ mount_path, command=command, sa_name=sa_name, labels=labels,
49
+ config_map_mount=config_map_mount)
50
+ return v1.create_namespaced_deployment(
51
+ namespace=namespace,
52
+ body=client.V1Deployment(spec=deployment, metadata=client.V1ObjectMeta(
53
+ name=deployment_name,
54
+ labels=labels
55
+ ))
56
+ )
@@ -12,7 +12,7 @@ class Jobs:
12
12
  envs.append(client.V1EnvVar(name=k.upper(), value=str(v)))
13
13
  for k, v in env_from.items():
14
14
  envs.append(client.V1EnvVar(name=k.upper(), value_from=client.V1EnvVarSource(secret_key_ref=client.V1SecretKeySelector(key=k, name=v))))
15
- template = Pods.create_pod_spec(job_name, image, image_pull_secret, envs, volume_name, pvc_name, mount_path, command)
15
+ template = Pods.create_pod_spec(job_name, image, image_pull_secret, envs, None, volume_name, pvc_name, mount_path, command)
16
16
  spec = client.V1JobSpec(template=client.V1PodTemplateSpec(spec=template), backoff_limit=1, ttl_seconds_after_finished=300)
17
17
  job = client.V1Job(
18
18
  api_version="batch/v1",
@@ -105,4 +105,4 @@ class KubeContext:
105
105
  return s or Config().is_debug()
106
106
 
107
107
  def show_parallelism():
108
- return Config().get('debug.show-parallelism', False)
108
+ return Config().get('debugs.show-parallelism', False)