kaqing 2.0.184__py3-none-any.whl → 2.0.214__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaqing might be problematic. Click here for more details.
- adam/app_session.py +1 -1
- adam/batch.py +15 -15
- adam/commands/app/app.py +2 -2
- adam/commands/app/show_app_actions.py +1 -1
- adam/commands/{show → app}/show_login.py +1 -1
- adam/commands/app/utils_app.py +9 -1
- adam/commands/audit/audit.py +6 -20
- adam/commands/audit/audit_repair_tables.py +1 -1
- adam/commands/audit/audit_run.py +1 -1
- adam/commands/audit/completions_l.py +15 -0
- adam/commands/audit/show_last10.py +0 -1
- adam/commands/bash/bash.py +1 -1
- adam/commands/bash/utils_bash.py +1 -1
- adam/commands/cassandra/download_cassandra_log.py +45 -0
- adam/commands/cassandra/restart_cluster.py +47 -0
- adam/commands/cassandra/restart_node.py +51 -0
- adam/commands/cassandra/restart_nodes.py +47 -0
- adam/commands/{rollout.py → cassandra/rollout.py} +1 -1
- adam/commands/{show → cassandra}/show_cassandra_repairs.py +5 -3
- adam/commands/{show → cassandra}/show_cassandra_status.py +22 -15
- adam/commands/cassandra/show_processes.py +50 -0
- adam/commands/{show → cassandra}/show_storage.py +10 -8
- adam/commands/cli/__init__.py +0 -0
- adam/commands/{cli_commands.py → cli/cli_commands.py} +6 -1
- adam/commands/{clipboard_copy.py → cli/clipboard_copy.py} +2 -2
- adam/commands/{show/show_commands.py → cli/show_cli_commands.py} +2 -2
- adam/commands/command.py +22 -9
- adam/commands/commands_utils.py +14 -6
- adam/commands/config/__init__.py +0 -0
- adam/commands/{show → config}/show_params.py +1 -1
- adam/commands/{alter_tables.py → cql/alter_tables.py} +1 -1
- adam/commands/cql/completions_c.py +29 -0
- adam/commands/cql/cqlsh.py +2 -6
- adam/commands/cql/utils_cql.py +26 -17
- adam/commands/debug/__init__.py +0 -0
- adam/commands/debug/debug.py +22 -0
- adam/commands/debug/debug_completes.py +35 -0
- adam/commands/debug/debug_timings.py +35 -0
- adam/commands/debug/show_offloaded_completes.py +45 -0
- adam/commands/devices/device.py +30 -4
- adam/commands/devices/device_app.py +1 -1
- adam/commands/devices/device_export.py +5 -2
- adam/commands/devices/device_postgres.py +13 -3
- adam/commands/devices/devices.py +1 -1
- adam/commands/diag/__init__.py +0 -0
- adam/commands/{check.py → diag/check.py} +1 -1
- adam/commands/diag/generate_report.py +52 -0
- adam/commands/export/completions_x.py +11 -0
- adam/commands/export/download_export_session.py +2 -1
- adam/commands/export/export.py +0 -16
- adam/commands/export/export_databases.py +16 -10
- adam/commands/export/export_select.py +8 -33
- adam/commands/export/export_sessions.py +12 -11
- adam/commands/export/export_use.py +3 -3
- adam/commands/export/export_x_select.py +48 -0
- adam/commands/export/exporter.py +140 -53
- adam/commands/export/import_files.py +2 -2
- adam/commands/export/import_session.py +0 -4
- adam/commands/export/importer.py +11 -11
- adam/commands/export/importer_athena.py +15 -35
- adam/commands/export/importer_sqlite.py +19 -8
- adam/commands/export/show_column_counts.py +10 -10
- adam/commands/export/show_export_databases.py +2 -1
- adam/commands/export/show_export_session.py +1 -1
- adam/commands/export/show_export_sessions.py +1 -1
- adam/commands/export/utils_export.py +38 -15
- adam/commands/fs/__init__.py +0 -0
- adam/commands/{cat.py → fs/cat.py} +2 -2
- adam/commands/fs/cat_local.py +42 -0
- adam/commands/{cd.py → fs/cd.py} +2 -2
- adam/commands/{download_file.py → fs/download_file.py} +5 -5
- adam/commands/{find_files.py → fs/find_files.py} +4 -4
- adam/commands/{find_processes.py → fs/find_processes.py} +3 -3
- adam/commands/{head.py → fs/head.py} +2 -2
- adam/commands/{ls.py → fs/ls.py} +2 -2
- adam/commands/fs/ls_local.py +40 -0
- adam/commands/fs/rm.py +18 -0
- adam/commands/fs/rm_downloads.py +39 -0
- adam/commands/fs/rm_logs.py +38 -0
- adam/commands/{show → fs}/show_adam.py +1 -1
- adam/commands/intermediate_command.py +3 -0
- adam/commands/medusa/medusa_restore.py +2 -16
- adam/commands/medusa/utils_medusa.py +15 -0
- adam/commands/nodetool/__init__.py +0 -0
- adam/commands/{nodetool.py → nodetool/nodetool.py} +3 -8
- adam/commands/postgres/completions_p.py +22 -0
- adam/commands/postgres/postgres.py +7 -14
- adam/commands/postgres/postgres_databases.py +3 -3
- adam/commands/postgres/postgres_ls.py +1 -1
- adam/commands/postgres/utils_postgres.py +12 -2
- adam/commands/preview_table.py +1 -1
- adam/commands/reaper/reaper_schedule_activate.py +6 -2
- adam/commands/reaper/reaper_schedule_start.py +1 -2
- adam/commands/reaper/reaper_schedule_stop.py +1 -2
- adam/commands/reaper/utils_reaper.py +10 -1
- adam/commands/repair/repair_scan.py +0 -2
- adam/commands/repair/repair_stop.py +0 -1
- adam/commands/{show/show.py → show.py} +12 -11
- adam/config.py +4 -5
- adam/embedded_params.py +1 -1
- adam/repl.py +22 -9
- adam/repl_commands.py +50 -42
- adam/repl_session.py +9 -1
- adam/repl_state.py +16 -1
- adam/sql/async_executor.py +62 -0
- adam/sql/lark_completer.py +286 -0
- adam/sql/lark_parser.py +604 -0
- adam/sql/qingl.lark +1076 -0
- adam/sso/cred_cache.py +2 -5
- adam/utils.py +216 -79
- adam/utils_k8s/app_clusters.py +11 -4
- adam/utils_k8s/app_pods.py +10 -5
- adam/utils_k8s/cassandra_clusters.py +8 -4
- adam/utils_k8s/cassandra_nodes.py +14 -5
- adam/utils_k8s/k8s.py +9 -0
- adam/utils_k8s/kube_context.py +1 -4
- adam/{pod_exec_result.py → utils_k8s/pod_exec_result.py} +8 -2
- adam/utils_k8s/pods.py +83 -24
- adam/utils_k8s/statefulsets.py +5 -2
- adam/utils_local.py +78 -2
- adam/utils_repl/appendable_completer.py +6 -0
- adam/utils_repl/repl_completer.py +51 -4
- adam/utils_sqlite.py +3 -8
- adam/version.py +1 -1
- {kaqing-2.0.184.dist-info → kaqing-2.0.214.dist-info}/METADATA +1 -1
- kaqing-2.0.214.dist-info/RECORD +272 -0
- kaqing-2.0.214.dist-info/top_level.txt +2 -0
- teddy/__init__.py +0 -0
- teddy/lark_parser.py +436 -0
- teddy/lark_parser2.py +618 -0
- adam/commands/cql/cql_completions.py +0 -32
- adam/commands/export/export_select_x.py +0 -54
- adam/commands/logs.py +0 -37
- adam/commands/postgres/psql_completions.py +0 -11
- adam/commands/report.py +0 -61
- adam/commands/restart.py +0 -60
- adam/commands/show/show_processes.py +0 -49
- kaqing-2.0.184.dist-info/RECORD +0 -244
- kaqing-2.0.184.dist-info/top_level.txt +0 -1
- /adam/commands/{login.py → app/login.py} +0 -0
- /adam/commands/{show → cassandra}/__init__.py +0 -0
- /adam/commands/{show → cassandra}/show_cassandra_version.py +0 -0
- /adam/commands/{watch.py → cassandra/watch.py} +0 -0
- /adam/commands/{param_get.py → config/param_get.py} +0 -0
- /adam/commands/{param_set.py → config/param_set.py} +0 -0
- /adam/commands/{issues.py → diag/issues.py} +0 -0
- /adam/commands/{pwd.py → fs/pwd.py} +0 -0
- /adam/commands/{shell.py → fs/shell.py} +0 -0
- /adam/commands/{show → fs}/show_host.py +0 -0
- /adam/commands/{nodetool_commands.py → nodetool/nodetool_commands.py} +0 -0
- {kaqing-2.0.184.dist-info → kaqing-2.0.214.dist-info}/WHEEL +0 -0
- {kaqing-2.0.184.dist-info → kaqing-2.0.214.dist-info}/entry_points.txt +0 -0
adam/sso/cred_cache.py
CHANGED
|
@@ -2,8 +2,7 @@ import os
|
|
|
2
2
|
from pathlib import Path
|
|
3
3
|
from dotenv import load_dotenv
|
|
4
4
|
|
|
5
|
-
from adam.
|
|
6
|
-
from adam.utils import debug, log_exc
|
|
5
|
+
from adam.utils import creating_dir, debug, log_exc
|
|
7
6
|
from adam.utils_k8s.kube_context import KubeContext
|
|
8
7
|
|
|
9
8
|
class CredCache:
|
|
@@ -15,7 +14,7 @@ class CredCache:
|
|
|
15
14
|
|
|
16
15
|
def __init__(self):
|
|
17
16
|
if not hasattr(self, 'env_f'):
|
|
18
|
-
self.dir = f'{Path.home()}/.kaqing'
|
|
17
|
+
self.dir = creating_dir(f'{Path.home()}/.kaqing')
|
|
19
18
|
self.env_f = f'{self.dir}/.credentials'
|
|
20
19
|
# immutable - cannot reload with different file content
|
|
21
20
|
load_dotenv(dotenv_path=self.env_f)
|
|
@@ -44,8 +43,6 @@ class CredCache:
|
|
|
44
43
|
updated.append(f'IDP_PASSWORD={password}')
|
|
45
44
|
|
|
46
45
|
if updated:
|
|
47
|
-
if not os.path.exists(self.env_f):
|
|
48
|
-
os.makedirs(self.dir, exist_ok=True)
|
|
49
46
|
with open(self.env_f, 'w') as file:
|
|
50
47
|
file.write('\n'.join(updated))
|
|
51
48
|
|
adam/utils.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from abc import ABC
|
|
1
2
|
from concurrent.futures import Future, ThreadPoolExecutor
|
|
2
3
|
from contextlib import redirect_stdout
|
|
3
4
|
import copy
|
|
@@ -12,13 +13,14 @@ import random
|
|
|
12
13
|
import string
|
|
13
14
|
import threading
|
|
14
15
|
import traceback
|
|
15
|
-
from typing import Callable, Iterator, TypeVar, Union
|
|
16
|
+
from typing import Callable, Iterator, TextIO, TypeVar, Union
|
|
16
17
|
from dateutil import parser
|
|
17
18
|
import subprocess
|
|
18
19
|
import sys
|
|
19
20
|
import time
|
|
20
21
|
import click
|
|
21
22
|
import yaml
|
|
23
|
+
from prompt_toolkit.completion import Completer
|
|
22
24
|
|
|
23
25
|
from . import __version__
|
|
24
26
|
|
|
@@ -26,10 +28,28 @@ T = TypeVar('T')
|
|
|
26
28
|
|
|
27
29
|
log_state = threading.local()
|
|
28
30
|
|
|
29
|
-
class
|
|
30
|
-
is_debug
|
|
31
|
-
|
|
32
|
-
|
|
31
|
+
class ConfigReadable:
|
|
32
|
+
def is_debug() -> bool:
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
def get(self, key: str, default: T) -> T:
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
class ConfigHolder:
|
|
39
|
+
# the singleton pattern
|
|
40
|
+
def __new__(cls, *args, **kwargs):
|
|
41
|
+
if not hasattr(cls, 'instance'): cls.instance = super(ConfigHolder, cls).__new__(cls)
|
|
42
|
+
|
|
43
|
+
return cls.instance
|
|
44
|
+
|
|
45
|
+
def __init__(self):
|
|
46
|
+
if not hasattr(self, 'config'):
|
|
47
|
+
# set by Config
|
|
48
|
+
self.config: 'ConfigReadable' = None
|
|
49
|
+
# only for testing
|
|
50
|
+
self.is_display_help = True
|
|
51
|
+
# set by ReplSession
|
|
52
|
+
self.append_command_history = lambda entry: None
|
|
33
53
|
|
|
34
54
|
NO_SORT = 0
|
|
35
55
|
SORT = 1
|
|
@@ -109,14 +129,24 @@ def log(s = None):
|
|
|
109
129
|
|
|
110
130
|
return True
|
|
111
131
|
|
|
112
|
-
def log2(s = None, nl = True):
|
|
132
|
+
def log2(s = None, nl = True, file: str = None):
|
|
113
133
|
if not loggable():
|
|
114
134
|
return False
|
|
115
135
|
|
|
116
136
|
if s:
|
|
117
|
-
|
|
137
|
+
if file:
|
|
138
|
+
with open(file, 'at') as f:
|
|
139
|
+
f.write(s)
|
|
140
|
+
if nl:
|
|
141
|
+
f.write('\n')
|
|
142
|
+
else:
|
|
143
|
+
click.echo(s, err=True, nl=nl)
|
|
118
144
|
else:
|
|
119
|
-
|
|
145
|
+
if file:
|
|
146
|
+
with open(file, 'at') as f:
|
|
147
|
+
f.write('\n')
|
|
148
|
+
else:
|
|
149
|
+
print(file=sys.stderr)
|
|
120
150
|
|
|
121
151
|
return True
|
|
122
152
|
|
|
@@ -160,7 +190,11 @@ def deep_merge_dicts(dict1, dict2):
|
|
|
160
190
|
merged_dict[key] = deep_merge_dicts(merged_dict[key], value)
|
|
161
191
|
elif key not in merged_dict or value:
|
|
162
192
|
# Otherwise, overwrite or add the value from dict2
|
|
163
|
-
merged_dict[key]
|
|
193
|
+
if key in merged_dict and isinstance(merged_dict[key], Completer):
|
|
194
|
+
pass
|
|
195
|
+
# print('SEAN completer found, ignoring', key, value)
|
|
196
|
+
else:
|
|
197
|
+
merged_dict[key] = value
|
|
164
198
|
return merged_dict
|
|
165
199
|
|
|
166
200
|
def deep_sort_dict(d):
|
|
@@ -198,7 +232,7 @@ def get_deep_keys(d, current_path=""):
|
|
|
198
232
|
return keys
|
|
199
233
|
|
|
200
234
|
def display_help(replace_arg = False):
|
|
201
|
-
if not
|
|
235
|
+
if not ConfigHolder().is_display_help:
|
|
202
236
|
return
|
|
203
237
|
|
|
204
238
|
args = copy.copy(sys.argv)
|
|
@@ -250,28 +284,10 @@ def json_to_csv(json_data: list[dict[any, any]], delimiter: str = ','):
|
|
|
250
284
|
else:
|
|
251
285
|
return None
|
|
252
286
|
|
|
253
|
-
def log_to_file(config: dict[any, any]):
|
|
254
|
-
with log_exc():
|
|
255
|
-
base = f"/kaqing/logs"
|
|
256
|
-
os.makedirs(base, exist_ok=True)
|
|
257
|
-
|
|
258
|
-
now = datetime.now()
|
|
259
|
-
timestamp_str = now.strftime("%Y%m%d-%H%M%S")
|
|
260
|
-
filename = f"{base}/login.{timestamp_str}.txt"
|
|
261
|
-
with open(filename, 'w') as f:
|
|
262
|
-
if isinstance(config, dict):
|
|
263
|
-
try:
|
|
264
|
-
json.dump(config, f, indent=4)
|
|
265
|
-
except:
|
|
266
|
-
f.write(config)
|
|
267
|
-
else:
|
|
268
|
-
f.write(config)
|
|
269
|
-
|
|
270
287
|
def copy_config_file(rel_path: str, module: str, suffix: str = '.yaml', show_out = True):
|
|
271
|
-
dir = f'{Path.home()}/.kaqing'
|
|
288
|
+
dir = creating_dir(f'{Path.home()}/.kaqing')
|
|
272
289
|
path = f'{dir}/{rel_path}'
|
|
273
290
|
if not os.path.exists(path):
|
|
274
|
-
os.makedirs(dir, exist_ok=True)
|
|
275
291
|
module = importlib.import_module(module)
|
|
276
292
|
with open(path, 'w') as f:
|
|
277
293
|
yaml.dump(module.config(), f, default_flow_style=False)
|
|
@@ -287,11 +303,15 @@ def is_lambda(func):
|
|
|
287
303
|
return callable(func) and hasattr(func, '__name__') and func.__name__ == '<lambda>'
|
|
288
304
|
|
|
289
305
|
def debug(s = None):
|
|
290
|
-
if
|
|
306
|
+
if ConfigHolder().config.is_debug():
|
|
291
307
|
log2(f'DEBUG {s}')
|
|
292
308
|
|
|
309
|
+
def debug_complete(s = None):
|
|
310
|
+
CommandLog.log(f'DEBUG {s}', config=ConfigHolder().config.get('debugs.complete', 'off'))
|
|
311
|
+
|
|
293
312
|
def debug_trace():
|
|
294
|
-
if
|
|
313
|
+
if ConfigHolder().config.is_debug():
|
|
314
|
+
# if LogConfig.is_debug():
|
|
295
315
|
log2(traceback.format_exc())
|
|
296
316
|
|
|
297
317
|
def in_docker() -> bool:
|
|
@@ -309,34 +329,42 @@ def in_docker() -> bool:
|
|
|
309
329
|
return False
|
|
310
330
|
|
|
311
331
|
class Ing:
|
|
312
|
-
def __init__(self, msg: str, suppress_log=False):
|
|
332
|
+
def __init__(self, msg: str, suppress_log=False, job_log: str = None, condition = True):
|
|
313
333
|
self.msg = msg
|
|
314
334
|
self.suppress_log = suppress_log
|
|
335
|
+
self.job_log = job_log
|
|
336
|
+
self.condition = condition
|
|
315
337
|
|
|
316
338
|
def __enter__(self):
|
|
339
|
+
if not self.condition:
|
|
340
|
+
return None
|
|
341
|
+
|
|
317
342
|
if not hasattr(log_state, 'ing_cnt'):
|
|
318
343
|
log_state.ing_cnt = 0
|
|
319
344
|
|
|
320
345
|
try:
|
|
321
346
|
if not log_state.ing_cnt:
|
|
322
|
-
if not self.suppress_log and not
|
|
323
|
-
log2(f'{self.msg}...', nl=False)
|
|
347
|
+
if not self.suppress_log and not ConfigHolder().config.is_debug():
|
|
348
|
+
log2(f'{self.msg}...', nl=False, file=self.job_log)
|
|
324
349
|
|
|
325
350
|
return None
|
|
326
351
|
finally:
|
|
327
352
|
log_state.ing_cnt += 1
|
|
328
353
|
|
|
329
354
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
355
|
+
if not self.condition:
|
|
356
|
+
return False
|
|
357
|
+
|
|
330
358
|
log_state.ing_cnt -= 1
|
|
331
359
|
if not log_state.ing_cnt:
|
|
332
|
-
if not self.suppress_log and not
|
|
333
|
-
log2(' OK')
|
|
360
|
+
if not self.suppress_log and not ConfigHolder().config.is_debug():
|
|
361
|
+
log2(' OK', file=self.job_log)
|
|
334
362
|
|
|
335
363
|
return False
|
|
336
364
|
|
|
337
|
-
def ing(msg: str, body: Callable[[], None]=None, suppress_log=False):
|
|
365
|
+
def ing(msg: str, body: Callable[[], None]=None, suppress_log=False, job_log: str = None, condition = True):
|
|
338
366
|
if not body:
|
|
339
|
-
return Ing(msg, suppress_log=suppress_log)
|
|
367
|
+
return Ing(msg, suppress_log=suppress_log, job_log=job_log, condition=condition)
|
|
340
368
|
|
|
341
369
|
r = None
|
|
342
370
|
|
|
@@ -350,7 +378,7 @@ def ing(msg: str, body: Callable[[], None]=None, suppress_log=False):
|
|
|
350
378
|
return r
|
|
351
379
|
|
|
352
380
|
def loggable():
|
|
353
|
-
return
|
|
381
|
+
return ConfigHolder().config and ConfigHolder().config.is_debug() or not hasattr(log_state, 'ing_cnt') or not log_state.ing_cnt
|
|
354
382
|
|
|
355
383
|
class TimingNode:
|
|
356
384
|
def __init__(self, depth: int, s0: time.time = time.time(), line: str = None):
|
|
@@ -378,7 +406,7 @@ class LogTiming:
|
|
|
378
406
|
self.s0 = s0
|
|
379
407
|
|
|
380
408
|
def __enter__(self):
|
|
381
|
-
if
|
|
409
|
+
if (config := ConfigHolder().config.get('debugs.timings', 'off')) not in ['on', 'file']:
|
|
382
410
|
return
|
|
383
411
|
|
|
384
412
|
if not hasattr(log_state, 'timings'):
|
|
@@ -390,7 +418,7 @@ class LogTiming:
|
|
|
390
418
|
self.s0 = time.time()
|
|
391
419
|
|
|
392
420
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
393
|
-
if
|
|
421
|
+
if (config := ConfigHolder().config.get('debugs.timings', 'off')) not in ['on', 'file']:
|
|
394
422
|
return False
|
|
395
423
|
|
|
396
424
|
child = log_state.timings
|
|
@@ -401,7 +429,9 @@ class LogTiming:
|
|
|
401
429
|
log_state.timings = self.me
|
|
402
430
|
|
|
403
431
|
if not self.me.depth:
|
|
404
|
-
|
|
432
|
+
# log timings finally
|
|
433
|
+
CommandLog.log(self.me.tree(), config)
|
|
434
|
+
|
|
405
435
|
log_state.timings = TimingNode(0)
|
|
406
436
|
|
|
407
437
|
return False
|
|
@@ -410,7 +440,7 @@ def log_timing(msg: str, body: Callable[[], None]=None, s0: time.time = None):
|
|
|
410
440
|
if not s0 and not body:
|
|
411
441
|
return LogTiming(msg, s0=s0)
|
|
412
442
|
|
|
413
|
-
if not
|
|
443
|
+
if not ConfigHolder().config.get('debugs.timings', False):
|
|
414
444
|
if body:
|
|
415
445
|
return body()
|
|
416
446
|
|
|
@@ -430,7 +460,9 @@ def log_timing(msg: str, body: Callable[[], None]=None, s0: time.time = None):
|
|
|
430
460
|
|
|
431
461
|
def timing_log_line(depth: int, msg: str, s0: time.time):
|
|
432
462
|
elapsed = time.time() - s0
|
|
433
|
-
|
|
463
|
+
offloaded = '-' if threading.current_thread().name.startswith('offload') or threading.current_thread().name.startswith('async') else '+'
|
|
464
|
+
prefix = f'[{offloaded} timings] '
|
|
465
|
+
|
|
434
466
|
if depth:
|
|
435
467
|
if elapsed > 0.01:
|
|
436
468
|
prefix = (' ' * (depth-1)) + '* '
|
|
@@ -526,7 +558,7 @@ class LogTrace:
|
|
|
526
558
|
elif self.err_msg is not False and self.err_msg:
|
|
527
559
|
log2(self.err_msg)
|
|
528
560
|
|
|
529
|
-
if self.err_msg is not False and
|
|
561
|
+
if self.err_msg is not False and ConfigHolder().config.is_debug():
|
|
530
562
|
traceback.print_exception(exc_type, exc_val, exc_tb, file=sys.stderr)
|
|
531
563
|
|
|
532
564
|
# swallow exception
|
|
@@ -567,37 +599,44 @@ class ParallelService:
|
|
|
567
599
|
else:
|
|
568
600
|
return iterator
|
|
569
601
|
|
|
602
|
+
thread_pools: dict[str, ThreadPoolExecutor] = {}
|
|
603
|
+
thread_pool_lock = threading.Lock()
|
|
604
|
+
|
|
570
605
|
class ParallelMapHandler:
|
|
571
|
-
def __init__(self, collection: list, workers: int, samples: int = sys.maxsize, msg: str = None, collect = True):
|
|
606
|
+
def __init__(self, collection: list, workers: int, samples: int = sys.maxsize, msg: str = None, collect = True, name = None):
|
|
572
607
|
self.collection = collection
|
|
573
608
|
self.workers = workers
|
|
574
609
|
self.executor = None
|
|
575
610
|
self.samples = samples
|
|
576
611
|
self.msg = msg
|
|
577
612
|
if msg and msg.startswith('d`'):
|
|
578
|
-
if
|
|
613
|
+
if ConfigHolder().config.is_debug():
|
|
579
614
|
self.msg = msg.replace('d`', '', 1)
|
|
580
615
|
else:
|
|
581
616
|
self.msg = None
|
|
582
617
|
self.collect = collect
|
|
618
|
+
self.name = name
|
|
583
619
|
|
|
584
620
|
self.begin = []
|
|
585
621
|
self.end = []
|
|
586
622
|
self.start_time = None
|
|
587
623
|
|
|
588
624
|
def __enter__(self):
|
|
625
|
+
self.start_time = None
|
|
626
|
+
|
|
589
627
|
self.calc_msgs()
|
|
590
628
|
|
|
591
629
|
if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
|
|
592
630
|
self.start_time = time.time()
|
|
593
631
|
|
|
594
|
-
self.executor =
|
|
632
|
+
self.executor = self.pool()
|
|
633
|
+
# self.executor = ThreadPoolExecutor(max_workers=self.workers)
|
|
595
634
|
self.executor.__enter__()
|
|
596
635
|
|
|
597
636
|
return ParallelService(self)
|
|
598
637
|
|
|
599
638
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
600
|
-
if self.executor:
|
|
639
|
+
if not self.name and self.executor:
|
|
601
640
|
self.executor.__exit__(exc_type, exc_val, exc_tb)
|
|
602
641
|
|
|
603
642
|
if self.end:
|
|
@@ -605,6 +644,15 @@ class ParallelMapHandler:
|
|
|
605
644
|
|
|
606
645
|
return False
|
|
607
646
|
|
|
647
|
+
def pool(self, thread_name_prefix: str = None):
|
|
648
|
+
if not self.name:
|
|
649
|
+
return ThreadPoolExecutor(max_workers=self.workers)
|
|
650
|
+
|
|
651
|
+
if self.name not in thread_pools:
|
|
652
|
+
thread_pools[self.name] = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix=thread_name_prefix)
|
|
653
|
+
|
|
654
|
+
return thread_pools[self.name]
|
|
655
|
+
|
|
608
656
|
def size(self):
|
|
609
657
|
if not self.collection:
|
|
610
658
|
return 0
|
|
@@ -615,25 +663,28 @@ class ParallelMapHandler:
|
|
|
615
663
|
if not self.msg:
|
|
616
664
|
return
|
|
617
665
|
|
|
666
|
+
self.begin = []
|
|
667
|
+
self.end = []
|
|
618
668
|
size = self.size()
|
|
619
669
|
offloaded = False
|
|
620
670
|
serially = False
|
|
621
671
|
sampling = False
|
|
622
672
|
if size == 0:
|
|
623
673
|
offloaded = True
|
|
624
|
-
|
|
674
|
+
msg = self.msg.replace('{size}', '1')
|
|
625
675
|
elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
|
|
626
|
-
|
|
676
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
627
677
|
elif self.samples < sys.maxsize:
|
|
628
678
|
sampling = True
|
|
679
|
+
samples = self.samples
|
|
629
680
|
if self.samples > size:
|
|
630
|
-
|
|
631
|
-
|
|
681
|
+
samples = size
|
|
682
|
+
msg = self.msg.replace('{size}', f'{samples}/{size} sample')
|
|
632
683
|
else:
|
|
633
684
|
serially = True
|
|
634
|
-
|
|
685
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
635
686
|
|
|
636
|
-
for token in
|
|
687
|
+
for token in msg.split(' '):
|
|
637
688
|
if '|' in token:
|
|
638
689
|
self.begin.append(token.split('|')[0])
|
|
639
690
|
if not sampling and not serially and not offloaded:
|
|
@@ -650,8 +701,19 @@ class ParallelMapHandler:
|
|
|
650
701
|
else:
|
|
651
702
|
log2(f'{" ".join(self.begin)} with {self.workers} workers...')
|
|
652
703
|
|
|
653
|
-
|
|
654
|
-
|
|
704
|
+
# parallelizers: dict[str, ParallelMapHandler] = {}
|
|
705
|
+
# parallelizer_lock = threading.Lock()
|
|
706
|
+
|
|
707
|
+
def parallelize(collection: list, workers: int = 0, samples = sys.maxsize, msg: str = None, collect = True, name = None):
|
|
708
|
+
return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect, name = name)
|
|
709
|
+
# if not name:
|
|
710
|
+
# return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect)
|
|
711
|
+
|
|
712
|
+
# with parallelizer_lock:
|
|
713
|
+
# if name not in parallelizers:
|
|
714
|
+
# parallelizers[name] = ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect, name = name)
|
|
715
|
+
|
|
716
|
+
# return parallelizers[name]
|
|
655
717
|
|
|
656
718
|
class OffloadService:
|
|
657
719
|
def __init__(self, handler: 'OffloadHandler'):
|
|
@@ -670,22 +732,24 @@ class OffloadService:
|
|
|
670
732
|
return future
|
|
671
733
|
|
|
672
734
|
class OffloadHandler(ParallelMapHandler):
|
|
673
|
-
def __init__(self, max_workers: int, msg: str = None):
|
|
674
|
-
super().__init__(None, max_workers, msg=msg, collect=False )
|
|
735
|
+
def __init__(self, max_workers: int, msg: str = None, name: str = None):
|
|
736
|
+
super().__init__(None, max_workers, msg=msg, collect=False, name=f'offload-{name}')
|
|
675
737
|
|
|
676
738
|
def __enter__(self):
|
|
739
|
+
self.start_time = None
|
|
677
740
|
self.calc_msgs()
|
|
678
741
|
|
|
679
|
-
if self.workers > 1
|
|
742
|
+
if self.workers > 1:
|
|
680
743
|
self.start_time = time.time()
|
|
681
744
|
|
|
682
|
-
self.executor =
|
|
745
|
+
self.executor = self.pool(thread_name_prefix='offload')
|
|
746
|
+
# self.executor = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix='offload')
|
|
683
747
|
self.executor.__enter__()
|
|
684
748
|
|
|
685
749
|
return OffloadService(self)
|
|
686
750
|
|
|
687
751
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
688
|
-
if self.executor:
|
|
752
|
+
if not self.name and self.executor:
|
|
689
753
|
self.executor.__exit__(exc_type, exc_val, exc_tb)
|
|
690
754
|
|
|
691
755
|
if self.end:
|
|
@@ -693,38 +757,33 @@ class OffloadHandler(ParallelMapHandler):
|
|
|
693
757
|
|
|
694
758
|
return False
|
|
695
759
|
|
|
696
|
-
def size(self):
|
|
697
|
-
if not self.collection:
|
|
698
|
-
return 0
|
|
699
|
-
|
|
700
|
-
return len(self.collection)
|
|
701
|
-
|
|
702
760
|
def calc_msgs(self):
|
|
703
761
|
if not self.msg:
|
|
704
762
|
return
|
|
705
763
|
|
|
764
|
+
self.begin = []
|
|
765
|
+
self.end = []
|
|
706
766
|
size = self.size()
|
|
707
|
-
# return
|
|
708
767
|
|
|
709
768
|
offloaded = False
|
|
710
769
|
serially = False
|
|
711
770
|
sampling = False
|
|
712
771
|
if size == 0:
|
|
713
772
|
offloaded = True
|
|
714
|
-
|
|
773
|
+
msg = self.msg.replace('{size}', '1')
|
|
715
774
|
elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
|
|
716
|
-
|
|
775
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
717
776
|
elif self.samples < sys.maxsize:
|
|
718
777
|
sampling = True
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
778
|
+
samples = self.samples
|
|
779
|
+
if samples > size:
|
|
780
|
+
samples = size
|
|
781
|
+
msg = self.msg.replace('{size}', f'{samples}/{size} sample')
|
|
722
782
|
else:
|
|
723
783
|
serially = True
|
|
724
|
-
|
|
725
|
-
# return
|
|
784
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
726
785
|
|
|
727
|
-
for token in
|
|
786
|
+
for token in msg.split(' '):
|
|
728
787
|
if '|' in token:
|
|
729
788
|
self.begin.append(token.split('|')[0])
|
|
730
789
|
if not sampling and not serially and not offloaded:
|
|
@@ -741,5 +800,83 @@ class OffloadHandler(ParallelMapHandler):
|
|
|
741
800
|
else:
|
|
742
801
|
log2(f'{" ".join(self.begin)} with {self.workers} workers...')
|
|
743
802
|
|
|
744
|
-
def offload(max_workers: int = 3, msg: str = None):
|
|
745
|
-
return OffloadHandler(max_workers, msg = msg)
|
|
803
|
+
def offload(max_workers: int = 3, msg: str = None, name: str = None):
|
|
804
|
+
return OffloadHandler(max_workers, msg = msg, name = name)
|
|
805
|
+
|
|
806
|
+
def kaqing_log_file_name(suffix = 'log'):
|
|
807
|
+
return f"{log_dir()}/{datetime.now().strftime('%d%H%M%S')}.{suffix}"
|
|
808
|
+
|
|
809
|
+
def log_dir():
|
|
810
|
+
return creating_dir(ConfigHolder().config.get('log-dir', '/tmp/qing-db/q/logs'))
|
|
811
|
+
|
|
812
|
+
class LogFileHandler:
|
|
813
|
+
def __init__(self, suffix = 'log', condition=True):
|
|
814
|
+
self.suffix = suffix
|
|
815
|
+
self.condition = condition
|
|
816
|
+
|
|
817
|
+
def __enter__(self):
|
|
818
|
+
self.f = None
|
|
819
|
+
if self.condition:
|
|
820
|
+
self.f = open(kaqing_log_file_name(suffix=self.suffix), 'w')
|
|
821
|
+
self.f.__enter__()
|
|
822
|
+
|
|
823
|
+
return self.f
|
|
824
|
+
|
|
825
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
826
|
+
if self.f:
|
|
827
|
+
self.f.__exit__(exc_type, exc_val, exc_tb)
|
|
828
|
+
|
|
829
|
+
if ConfigHolder().append_command_history:
|
|
830
|
+
ConfigHolder().append_command_history(f':cat {self.f.name}')
|
|
831
|
+
|
|
832
|
+
return False
|
|
833
|
+
|
|
834
|
+
def kaqing_log_file(suffix = 'log', condition=True):
|
|
835
|
+
return LogFileHandler(suffix = suffix, condition=condition)
|
|
836
|
+
|
|
837
|
+
class CommandLog:
|
|
838
|
+
log_file = None
|
|
839
|
+
|
|
840
|
+
def log(line: str, config: str = 'off'):
|
|
841
|
+
if config == 'file':
|
|
842
|
+
if not CommandLog.log_file:
|
|
843
|
+
try:
|
|
844
|
+
CommandLog.log_file = open(kaqing_log_file_name(suffix='cmd.log'), 'w')
|
|
845
|
+
except:
|
|
846
|
+
pass
|
|
847
|
+
|
|
848
|
+
try:
|
|
849
|
+
CommandLog.log_file.write(line + '\n')
|
|
850
|
+
except:
|
|
851
|
+
pass
|
|
852
|
+
elif config == 'on':
|
|
853
|
+
log2(line)
|
|
854
|
+
|
|
855
|
+
def close_log_file():
|
|
856
|
+
if CommandLog.log_file:
|
|
857
|
+
try:
|
|
858
|
+
CommandLog.log_file.close()
|
|
859
|
+
except:
|
|
860
|
+
pass
|
|
861
|
+
|
|
862
|
+
if ConfigHolder().append_command_history:
|
|
863
|
+
ConfigHolder().append_command_history(f':cat {CommandLog.log_file.name}')
|
|
864
|
+
|
|
865
|
+
CommandLog.log_file = None
|
|
866
|
+
|
|
867
|
+
class ExecResult(ABC):
|
|
868
|
+
def exit_code(self) -> int:
|
|
869
|
+
pass
|
|
870
|
+
|
|
871
|
+
def cat_log_file_cmd(self) -> str:
|
|
872
|
+
pass
|
|
873
|
+
|
|
874
|
+
_dirs_created = set()
|
|
875
|
+
|
|
876
|
+
def creating_dir(dir):
|
|
877
|
+
if dir not in _dirs_created:
|
|
878
|
+
_dirs_created.add(dir)
|
|
879
|
+
if not os.path.exists(dir):
|
|
880
|
+
os.makedirs(dir, exist_ok=True)
|
|
881
|
+
|
|
882
|
+
return dir
|
adam/utils_k8s/app_clusters.py
CHANGED
|
@@ -2,7 +2,7 @@ import sys
|
|
|
2
2
|
from typing import TypeVar
|
|
3
3
|
|
|
4
4
|
from adam.utils_k8s.app_pods import AppPods
|
|
5
|
-
from adam.pod_exec_result import PodExecResult
|
|
5
|
+
from adam.utils_k8s.pod_exec_result import PodExecResult
|
|
6
6
|
from adam.utils import log, log2
|
|
7
7
|
from adam.utils_k8s.pods import Pods
|
|
8
8
|
from .kube_context import KubeContext
|
|
@@ -11,8 +11,15 @@ T = TypeVar('T')
|
|
|
11
11
|
|
|
12
12
|
# utility collection on app clusters; methods are all static
|
|
13
13
|
class AppClusters:
|
|
14
|
-
def exec(pods: list[str],
|
|
15
|
-
|
|
14
|
+
def exec(pods: list[str],
|
|
15
|
+
namespace: str,
|
|
16
|
+
command: str,
|
|
17
|
+
action: str = 'action',
|
|
18
|
+
max_workers=0,
|
|
19
|
+
show_out=True,
|
|
20
|
+
on_any = False,
|
|
21
|
+
shell = '/bin/sh',
|
|
22
|
+
backgrounded = False) -> list[PodExecResult]:
|
|
16
23
|
samples = 1 if on_any else sys.maxsize
|
|
17
24
|
msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
|
|
18
25
|
with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
|
|
@@ -23,6 +30,6 @@ class AppClusters:
|
|
|
23
30
|
if result.stdout:
|
|
24
31
|
log(result.stdout)
|
|
25
32
|
if result.stderr:
|
|
26
|
-
log2(result.stderr
|
|
33
|
+
log2(result.stderr)
|
|
27
34
|
|
|
28
35
|
return results
|
adam/utils_k8s/app_pods.py
CHANGED
|
@@ -4,7 +4,7 @@ from kubernetes import client
|
|
|
4
4
|
|
|
5
5
|
from adam.config import Config
|
|
6
6
|
from adam.utils_k8s.pods import Pods
|
|
7
|
-
from adam.pod_exec_result import PodExecResult
|
|
7
|
+
from adam.utils_k8s.pod_exec_result import PodExecResult
|
|
8
8
|
from adam.repl_session import ReplSession
|
|
9
9
|
|
|
10
10
|
# utility collection on app pods; methods are all static
|
|
@@ -25,12 +25,17 @@ class AppPods:
|
|
|
25
25
|
|
|
26
26
|
return v1.list_namespaced_pod(namespace, label_selector=label_selector).items
|
|
27
27
|
|
|
28
|
-
def exec(pod_name: str,
|
|
28
|
+
def exec(pod_name: str,
|
|
29
|
+
namespace: str,
|
|
30
|
+
command: str,
|
|
31
|
+
show_out = True,
|
|
32
|
+
throw_err = False,
|
|
33
|
+
shell = '/bin/sh',
|
|
34
|
+
backgrounded = False) -> PodExecResult:
|
|
29
35
|
container = Config().get('app.container-name', 'c3-server')
|
|
30
36
|
r = Pods.exec(pod_name, container, namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, backgrounded = backgrounded)
|
|
31
37
|
|
|
32
|
-
if r and
|
|
33
|
-
|
|
34
|
-
ReplSession().prompt_session.history.append_string(f'@{r.pod} cat {r.log_file}')
|
|
38
|
+
if r and r.log_file:
|
|
39
|
+
ReplSession().append_history(f':cat {r.log_file}')
|
|
35
40
|
|
|
36
41
|
return r
|
|
@@ -3,7 +3,7 @@ from typing import TypeVar
|
|
|
3
3
|
|
|
4
4
|
from adam.config import Config
|
|
5
5
|
from adam.utils_k8s.cassandra_nodes import CassandraNodes
|
|
6
|
-
from adam.pod_exec_result import PodExecResult
|
|
6
|
+
from adam.utils_k8s.pod_exec_result import PodExecResult
|
|
7
7
|
from adam.utils import log, log2
|
|
8
8
|
from adam.utils_k8s.pods import Pods
|
|
9
9
|
from adam.utils_k8s.statefulsets import StatefulSets
|
|
@@ -21,20 +21,24 @@ class CassandraClusters:
|
|
|
21
21
|
on_any = False,
|
|
22
22
|
shell = '/bin/sh',
|
|
23
23
|
backgrounded = False,
|
|
24
|
-
log_file = None
|
|
24
|
+
log_file = None,
|
|
25
|
+
history=True) -> list[PodExecResult]:
|
|
25
26
|
|
|
26
27
|
pods = StatefulSets.pod_names(sts, namespace)
|
|
27
28
|
samples = 1 if on_any else sys.maxsize
|
|
29
|
+
if (backgrounded or command.endswith(' &')) and not log_file:
|
|
30
|
+
log_file = Pods.log_file(command)
|
|
31
|
+
|
|
28
32
|
msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
|
|
29
33
|
with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
|
|
30
|
-
results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell, backgrounded, log_file))
|
|
34
|
+
results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell, backgrounded, log_file, history))
|
|
31
35
|
for result in results:
|
|
32
36
|
if show_out and not Config().is_debug():
|
|
33
37
|
log(result.command)
|
|
34
38
|
if result.stdout:
|
|
35
39
|
log(result.stdout)
|
|
36
40
|
if result.stderr:
|
|
37
|
-
log2(result.stderr
|
|
41
|
+
log2(result.stderr)
|
|
38
42
|
|
|
39
43
|
return results
|
|
40
44
|
|