kaqing 2.0.174__py3-none-any.whl → 2.0.188__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaqing might be problematic. Click here for more details.
- adam/app_session.py +2 -2
- adam/apps.py +18 -4
- adam/batch.py +1 -1
- adam/checks/check_utils.py +3 -1
- adam/commands/__init__.py +4 -2
- adam/commands/app/__init__.py +0 -0
- adam/commands/app/app.py +38 -0
- adam/commands/app/app_ping.py +38 -0
- adam/commands/app/show_app_actions.py +49 -0
- adam/commands/app/show_app_id.py +44 -0
- adam/commands/app/show_app_queues.py +38 -0
- adam/commands/app/utils_app.py +106 -0
- adam/commands/audit/audit.py +9 -27
- adam/commands/audit/audit_repair_tables.py +5 -7
- adam/commands/audit/audit_run.py +1 -1
- adam/commands/audit/completions_l.py +15 -0
- adam/commands/audit/show_last10.py +2 -14
- adam/commands/audit/show_slow10.py +2 -13
- adam/commands/audit/show_top10.py +2 -11
- adam/commands/audit/utils_show_top10.py +14 -1
- adam/commands/bash/bash.py +1 -1
- adam/commands/cat.py +3 -7
- adam/commands/check.py +2 -2
- adam/commands/cli_commands.py +6 -1
- adam/commands/{cp.py → clipboard_copy.py} +18 -12
- adam/commands/code.py +2 -2
- adam/commands/command.py +61 -11
- adam/commands/commands_utils.py +19 -12
- adam/commands/cql/completions_c.py +28 -0
- adam/commands/cql/cqlsh.py +3 -7
- adam/commands/cql/utils_cql.py +22 -58
- adam/commands/deploy/deploy_pg_agent.py +2 -2
- adam/commands/deploy/undeploy_pg_agent.py +2 -2
- adam/commands/devices/device.py +39 -8
- adam/commands/devices/device_app.py +18 -28
- adam/commands/devices/device_auit_log.py +3 -3
- adam/commands/devices/device_cass.py +16 -22
- adam/commands/devices/device_export.py +6 -3
- adam/commands/devices/device_postgres.py +79 -63
- adam/commands/download_file.py +47 -0
- adam/commands/export/clean_up_all_export_sessions.py +3 -3
- adam/commands/export/clean_up_export_sessions.py +5 -10
- adam/commands/export/completions_x.py +11 -0
- adam/commands/export/download_export_session.py +40 -0
- adam/commands/export/export.py +0 -16
- adam/commands/export/export_databases.py +26 -9
- adam/commands/export/export_select.py +9 -58
- adam/commands/export/export_sessions.py +90 -5
- adam/commands/export/export_use.py +13 -10
- adam/commands/export/export_x_select.py +48 -0
- adam/commands/export/exporter.py +60 -22
- adam/commands/export/import_files.py +44 -0
- adam/commands/export/import_session.py +8 -4
- adam/commands/export/importer.py +7 -0
- adam/commands/export/importer_athena.py +101 -34
- adam/commands/export/importer_sqlite.py +30 -5
- adam/commands/export/show_column_counts.py +11 -11
- adam/commands/export/show_export_databases.py +5 -3
- adam/commands/export/show_export_session.py +5 -6
- adam/commands/export/show_export_sessions.py +4 -11
- adam/commands/export/utils_export.py +42 -14
- adam/commands/find_files.py +51 -0
- adam/commands/find_processes.py +76 -0
- adam/commands/head.py +36 -0
- adam/commands/help.py +2 -2
- adam/commands/intermediate_command.py +6 -3
- adam/commands/ls.py +1 -1
- adam/commands/medusa/medusa_backup.py +12 -14
- adam/commands/medusa/medusa_restore.py +20 -15
- adam/commands/medusa/medusa_show_backupjobs.py +6 -4
- adam/commands/medusa/medusa_show_restorejobs.py +5 -3
- adam/commands/medusa/utils_medusa.py +15 -0
- adam/commands/nodetool.py +3 -8
- adam/commands/param_get.py +2 -3
- adam/commands/param_set.py +1 -1
- adam/commands/postgres/completions_p.py +22 -0
- adam/commands/postgres/postgres.py +14 -21
- adam/commands/postgres/postgres_databases.py +270 -0
- adam/commands/postgres/utils_postgres.py +29 -20
- adam/commands/preview_table.py +3 -1
- adam/commands/pwd.py +3 -3
- adam/commands/reaper/reaper_forward.py +2 -2
- adam/commands/reaper/reaper_runs.py +3 -3
- adam/commands/reaper/reaper_schedule_activate.py +6 -2
- adam/commands/reaper/reaper_schedule_start.py +1 -2
- adam/commands/reaper/reaper_schedule_stop.py +1 -2
- adam/commands/reaper/utils_reaper.py +13 -6
- adam/commands/repair/repair_scan.py +0 -2
- adam/commands/repair/repair_stop.py +0 -1
- adam/commands/shell.py +7 -5
- adam/commands/show/show.py +1 -1
- adam/commands/show/show_adam.py +3 -3
- adam/commands/show/show_cassandra_repairs.py +5 -3
- adam/commands/show/show_cassandra_status.py +27 -20
- adam/commands/show/{show_commands.py → show_cli_commands.py} +2 -2
- adam/commands/show/show_login.py +2 -2
- adam/commands/show/show_params.py +2 -5
- adam/commands/show/show_processes.py +15 -14
- adam/commands/show/show_storage.py +9 -8
- adam/config.py +1 -0
- adam/embedded_params.py +1 -1
- adam/repl.py +16 -9
- adam/repl_commands.py +16 -9
- adam/repl_session.py +8 -1
- adam/repl_state.py +33 -10
- adam/sql/lark_completer.py +284 -0
- adam/sql/lark_parser.py +604 -0
- adam/sql/sql_state_machine.py +8 -2
- adam/utils.py +116 -29
- adam/utils_athena.py +7 -8
- adam/utils_issues.py +2 -2
- adam/utils_k8s/app_clusters.py +2 -2
- adam/utils_k8s/app_pods.py +5 -2
- adam/utils_k8s/cassandra_clusters.py +11 -3
- adam/utils_k8s/cassandra_nodes.py +2 -2
- adam/utils_k8s/k8s.py +9 -0
- adam/utils_k8s/kube_context.py +2 -2
- adam/utils_k8s/pods.py +23 -5
- adam/utils_k8s/statefulsets.py +5 -2
- adam/utils_local.py +4 -0
- adam/utils_repl/appendable_completer.py +6 -0
- adam/utils_repl/repl_completer.py +128 -2
- adam/utils_sqlite.py +2 -2
- adam/version.py +1 -1
- {kaqing-2.0.174.dist-info → kaqing-2.0.188.dist-info}/METADATA +1 -1
- kaqing-2.0.188.dist-info/RECORD +253 -0
- kaqing-2.0.188.dist-info/top_level.txt +2 -0
- teddy/__init__.py +0 -0
- teddy/lark_parser.py +436 -0
- teddy/lark_parser2.py +618 -0
- adam/commands/cql/cql_completions.py +0 -32
- adam/commands/export/export_select_x.py +0 -54
- adam/commands/postgres/postgres_context.py +0 -272
- adam/commands/postgres/psql_completions.py +0 -10
- kaqing-2.0.174.dist-info/RECORD +0 -230
- kaqing-2.0.174.dist-info/top_level.txt +0 -1
- {kaqing-2.0.174.dist-info → kaqing-2.0.188.dist-info}/WHEEL +0 -0
- {kaqing-2.0.174.dist-info → kaqing-2.0.188.dist-info}/entry_points.txt +0 -0
adam/utils.py
CHANGED
|
@@ -20,19 +20,33 @@ import time
|
|
|
20
20
|
import click
|
|
21
21
|
import yaml
|
|
22
22
|
|
|
23
|
+
from prompt_toolkit.completion import Completer
|
|
24
|
+
|
|
23
25
|
from . import __version__
|
|
24
26
|
|
|
27
|
+
T = TypeVar('T')
|
|
28
|
+
|
|
25
29
|
log_state = threading.local()
|
|
26
30
|
|
|
27
31
|
class LogConfig:
|
|
28
32
|
is_debug = lambda: False
|
|
29
33
|
is_debug_timing = lambda: False
|
|
34
|
+
is_debug_complete = lambda: False
|
|
30
35
|
is_display_help = True
|
|
31
36
|
|
|
32
|
-
|
|
33
|
-
|
|
37
|
+
NO_SORT = 0
|
|
38
|
+
SORT = 1
|
|
39
|
+
REVERSE_SORT = -1
|
|
40
|
+
|
|
41
|
+
def tabulize(lines: list[T], fn: Callable[..., T] = None, header: str = None, dashed_line = False, separator = ' ', to: int = 1, sorted: int = NO_SORT):
|
|
42
|
+
if fn:
|
|
43
|
+
lines = list(map(fn, lines))
|
|
44
|
+
|
|
45
|
+
if sorted == SORT:
|
|
46
|
+
lines.sort()
|
|
47
|
+
elif sorted == REVERSE_SORT:
|
|
48
|
+
lines.sort(reverse=True)
|
|
34
49
|
|
|
35
|
-
def lines_to_tabular(lines: list[str], header: str = None, dashed_line = False, separator = ' '):
|
|
36
50
|
maxes = []
|
|
37
51
|
nls = []
|
|
38
52
|
|
|
@@ -63,7 +77,14 @@ def lines_to_tabular(lines: list[str], header: str = None, dashed_line = False,
|
|
|
63
77
|
for line in lines:
|
|
64
78
|
format_line(line)
|
|
65
79
|
|
|
66
|
-
|
|
80
|
+
table = '\n'.join(nls)
|
|
81
|
+
|
|
82
|
+
if to == 1:
|
|
83
|
+
log(table)
|
|
84
|
+
elif to == 2:
|
|
85
|
+
log2(table)
|
|
86
|
+
|
|
87
|
+
return table
|
|
67
88
|
|
|
68
89
|
def convert_seconds(total_seconds_float):
|
|
69
90
|
total_seconds_int = int(total_seconds_float) # Convert float to integer seconds
|
|
@@ -117,7 +138,7 @@ def duration(start_time: float, end_time: float = None):
|
|
|
117
138
|
d = convert_seconds(end_time - start_time)
|
|
118
139
|
t = []
|
|
119
140
|
if d:
|
|
120
|
-
t.append(f'{d}h')
|
|
141
|
+
t.append(f'{d[0]}h')
|
|
121
142
|
if t or d[1]:
|
|
122
143
|
t.append(f'{d[1]}m')
|
|
123
144
|
t.append(f'{d[2]}s')
|
|
@@ -142,7 +163,10 @@ def deep_merge_dicts(dict1, dict2):
|
|
|
142
163
|
merged_dict[key] = deep_merge_dicts(merged_dict[key], value)
|
|
143
164
|
elif key not in merged_dict or value:
|
|
144
165
|
# Otherwise, overwrite or add the value from dict2
|
|
145
|
-
merged_dict[key]
|
|
166
|
+
if key in merged_dict and isinstance(merged_dict[key], Completer):
|
|
167
|
+
print('SEAN completer found, ignoring', key, value)
|
|
168
|
+
else:
|
|
169
|
+
merged_dict[key] = value
|
|
146
170
|
return merged_dict
|
|
147
171
|
|
|
148
172
|
def deep_sort_dict(d):
|
|
@@ -272,6 +296,10 @@ def debug(s = None):
|
|
|
272
296
|
if LogConfig.is_debug():
|
|
273
297
|
log2(f'DEBUG {s}')
|
|
274
298
|
|
|
299
|
+
def debug_complete(s = None):
|
|
300
|
+
if LogConfig.is_debug_complete():
|
|
301
|
+
log2(f'DEBUG {s}')
|
|
302
|
+
|
|
275
303
|
def debug_trace():
|
|
276
304
|
if LogConfig.is_debug():
|
|
277
305
|
log2(traceback.format_exc())
|
|
@@ -411,8 +439,11 @@ def log_timing(msg: str, body: Callable[[], None]=None, s0: time.time = None):
|
|
|
411
439
|
return r
|
|
412
440
|
|
|
413
441
|
def timing_log_line(depth: int, msg: str, s0: time.time):
|
|
442
|
+
# print('SEAN log timing', msg, threading.current_thread().name)
|
|
414
443
|
elapsed = time.time() - s0
|
|
415
|
-
|
|
444
|
+
offloaded = '-' if threading.current_thread().name.startswith('offload') or threading.current_thread().name.startswith('async') else '+'
|
|
445
|
+
prefix = f'[{offloaded} timings] '
|
|
446
|
+
|
|
416
447
|
if depth:
|
|
417
448
|
if elapsed > 0.01:
|
|
418
449
|
prefix = (' ' * (depth-1)) + '* '
|
|
@@ -432,6 +463,66 @@ def wait_log(msg: str):
|
|
|
432
463
|
def clear_wait_log_flag():
|
|
433
464
|
WaitLog.wait_log_flag = False
|
|
434
465
|
|
|
466
|
+
def bytes_generator_from_file(file_path, chunk_size=4096):
|
|
467
|
+
with open(file_path, 'rb') as f:
|
|
468
|
+
while True:
|
|
469
|
+
chunk = f.read(chunk_size)
|
|
470
|
+
if not chunk:
|
|
471
|
+
break
|
|
472
|
+
yield chunk
|
|
473
|
+
|
|
474
|
+
class GeneratorStream(io.RawIOBase):
|
|
475
|
+
def __init__(self, generator):
|
|
476
|
+
self._generator = generator
|
|
477
|
+
self._buffer = b'' # Buffer to store leftover bytes from generator yields
|
|
478
|
+
|
|
479
|
+
def readable(self):
|
|
480
|
+
return True
|
|
481
|
+
|
|
482
|
+
def _read_from_generator(self):
|
|
483
|
+
try:
|
|
484
|
+
chunk = next(self._generator)
|
|
485
|
+
if isinstance(chunk, str):
|
|
486
|
+
chunk = chunk.encode('utf-8') # Encode if generator yields strings
|
|
487
|
+
self._buffer += chunk
|
|
488
|
+
except StopIteration:
|
|
489
|
+
pass # Generator exhausted
|
|
490
|
+
|
|
491
|
+
def readinto(self, b):
|
|
492
|
+
# Fill the buffer if necessary
|
|
493
|
+
while len(self._buffer) < len(b):
|
|
494
|
+
old_buffer_len = len(self._buffer)
|
|
495
|
+
self._read_from_generator()
|
|
496
|
+
if len(self._buffer) == old_buffer_len: # Generator exhausted and buffer empty
|
|
497
|
+
break
|
|
498
|
+
|
|
499
|
+
bytes_to_read = min(len(b), len(self._buffer))
|
|
500
|
+
b[:bytes_to_read] = self._buffer[:bytes_to_read]
|
|
501
|
+
self._buffer = self._buffer[bytes_to_read:]
|
|
502
|
+
return bytes_to_read
|
|
503
|
+
|
|
504
|
+
def read(self, size=-1):
|
|
505
|
+
if size == -1: # Read all remaining data
|
|
506
|
+
while True:
|
|
507
|
+
old_buffer_len = len(self._buffer)
|
|
508
|
+
self._read_from_generator()
|
|
509
|
+
if len(self._buffer) == old_buffer_len:
|
|
510
|
+
break
|
|
511
|
+
data = self._buffer
|
|
512
|
+
self._buffer = b''
|
|
513
|
+
return data
|
|
514
|
+
else:
|
|
515
|
+
# Ensure enough data in buffer
|
|
516
|
+
while len(self._buffer) < size:
|
|
517
|
+
old_buffer_len = len(self._buffer)
|
|
518
|
+
self._read_from_generator()
|
|
519
|
+
if len(self._buffer) == old_buffer_len:
|
|
520
|
+
break
|
|
521
|
+
|
|
522
|
+
data = self._buffer[:size]
|
|
523
|
+
self._buffer = self._buffer[size:]
|
|
524
|
+
return data
|
|
525
|
+
|
|
435
526
|
class LogTrace:
|
|
436
527
|
def __init__(self, err_msg: Union[str, callable, bool] = None):
|
|
437
528
|
self.err_msg = err_msg
|
|
@@ -457,8 +548,6 @@ class LogTrace:
|
|
|
457
548
|
def log_exc(err_msg: Union[str, callable, bool] = None):
|
|
458
549
|
return LogTrace(err_msg=err_msg)
|
|
459
550
|
|
|
460
|
-
T = TypeVar('T')
|
|
461
|
-
|
|
462
551
|
class ParallelService:
|
|
463
552
|
def __init__(self, handler: 'ParallelMapHandler'):
|
|
464
553
|
self.handler = handler
|
|
@@ -492,9 +581,9 @@ class ParallelService:
|
|
|
492
581
|
return iterator
|
|
493
582
|
|
|
494
583
|
class ParallelMapHandler:
|
|
495
|
-
def __init__(self, collection: list,
|
|
584
|
+
def __init__(self, collection: list, workers: int, samples: int = sys.maxsize, msg: str = None, collect = True):
|
|
496
585
|
self.collection = collection
|
|
497
|
-
self.
|
|
586
|
+
self.workers = workers
|
|
498
587
|
self.executor = None
|
|
499
588
|
self.samples = samples
|
|
500
589
|
self.msg = msg
|
|
@@ -512,10 +601,10 @@ class ParallelMapHandler:
|
|
|
512
601
|
def __enter__(self):
|
|
513
602
|
self.calc_msgs()
|
|
514
603
|
|
|
515
|
-
if self.
|
|
604
|
+
if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
|
|
516
605
|
self.start_time = time.time()
|
|
517
606
|
|
|
518
|
-
self.executor = ThreadPoolExecutor(max_workers=self.
|
|
607
|
+
self.executor = ThreadPoolExecutor(max_workers=self.workers)
|
|
519
608
|
self.executor.__enter__()
|
|
520
609
|
|
|
521
610
|
return ParallelService(self)
|
|
@@ -540,15 +629,13 @@ class ParallelMapHandler:
|
|
|
540
629
|
return
|
|
541
630
|
|
|
542
631
|
size = self.size()
|
|
543
|
-
# return
|
|
544
|
-
|
|
545
632
|
offloaded = False
|
|
546
633
|
serially = False
|
|
547
634
|
sampling = False
|
|
548
635
|
if size == 0:
|
|
549
636
|
offloaded = True
|
|
550
637
|
self.msg = self.msg.replace('{size}', '1')
|
|
551
|
-
elif self.
|
|
638
|
+
elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
|
|
552
639
|
self.msg = self.msg.replace('{size}', f'{size}')
|
|
553
640
|
elif self.samples < sys.maxsize:
|
|
554
641
|
sampling = True
|
|
@@ -558,7 +645,6 @@ class ParallelMapHandler:
|
|
|
558
645
|
else:
|
|
559
646
|
serially = True
|
|
560
647
|
self.msg = self.msg.replace('{size}', f'{size}')
|
|
561
|
-
# return
|
|
562
648
|
|
|
563
649
|
for token in self.msg.split(' '):
|
|
564
650
|
if '|' in token:
|
|
@@ -575,7 +661,10 @@ class ParallelMapHandler:
|
|
|
575
661
|
elif sampling or serially:
|
|
576
662
|
log2(f'{" ".join(self.begin)} serially...')
|
|
577
663
|
else:
|
|
578
|
-
log2(f'{" ".join(self.begin)} with {self.
|
|
664
|
+
log2(f'{" ".join(self.begin)} with {self.workers} workers...')
|
|
665
|
+
|
|
666
|
+
def parallelize(collection: list, workers: int = 0, samples = sys.maxsize, msg: str = None, collect = True):
|
|
667
|
+
return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect)
|
|
579
668
|
|
|
580
669
|
class OffloadService:
|
|
581
670
|
def __init__(self, handler: 'OffloadHandler'):
|
|
@@ -600,10 +689,11 @@ class OffloadHandler(ParallelMapHandler):
|
|
|
600
689
|
def __enter__(self):
|
|
601
690
|
self.calc_msgs()
|
|
602
691
|
|
|
603
|
-
if self.
|
|
692
|
+
if self.workers > 1:
|
|
693
|
+
# if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
|
|
604
694
|
self.start_time = time.time()
|
|
605
695
|
|
|
606
|
-
self.executor = ThreadPoolExecutor(max_workers=self.
|
|
696
|
+
self.executor = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix='offload')
|
|
607
697
|
self.executor.__enter__()
|
|
608
698
|
|
|
609
699
|
return OffloadService(self)
|
|
@@ -617,11 +707,11 @@ class OffloadHandler(ParallelMapHandler):
|
|
|
617
707
|
|
|
618
708
|
return False
|
|
619
709
|
|
|
620
|
-
def size(self):
|
|
621
|
-
|
|
622
|
-
|
|
710
|
+
# def size(self):
|
|
711
|
+
# if not self.collection:
|
|
712
|
+
# return 0
|
|
623
713
|
|
|
624
|
-
|
|
714
|
+
# return len(self.collection)
|
|
625
715
|
|
|
626
716
|
def calc_msgs(self):
|
|
627
717
|
if not self.msg:
|
|
@@ -636,7 +726,7 @@ class OffloadHandler(ParallelMapHandler):
|
|
|
636
726
|
if size == 0:
|
|
637
727
|
offloaded = True
|
|
638
728
|
self.msg = self.msg.replace('{size}', '1')
|
|
639
|
-
elif self.
|
|
729
|
+
elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
|
|
640
730
|
self.msg = self.msg.replace('{size}', f'{size}')
|
|
641
731
|
elif self.samples < sys.maxsize:
|
|
642
732
|
sampling = True
|
|
@@ -663,10 +753,7 @@ class OffloadHandler(ParallelMapHandler):
|
|
|
663
753
|
elif sampling or serially:
|
|
664
754
|
log2(f'{" ".join(self.begin)} serially...')
|
|
665
755
|
else:
|
|
666
|
-
log2(f'{" ".join(self.begin)} with {self.
|
|
667
|
-
|
|
668
|
-
def parallelize(collection: list, max_workers: int = 0, samples = sys.maxsize, msg: str = None, collect = True):
|
|
669
|
-
return ParallelMapHandler(collection, max_workers, samples = samples, msg = msg, collect = collect)
|
|
756
|
+
log2(f'{" ".join(self.begin)} with {self.workers} workers...')
|
|
670
757
|
|
|
671
758
|
def offload(max_workers: int = 3, msg: str = None):
|
|
672
759
|
return OffloadHandler(max_workers, msg = msg)
|
adam/utils_athena.py
CHANGED
|
@@ -5,7 +5,7 @@ import boto3
|
|
|
5
5
|
import botocore
|
|
6
6
|
|
|
7
7
|
from adam.config import Config
|
|
8
|
-
from adam.utils import
|
|
8
|
+
from adam.utils import tabulize, log, log2, log_exc, wait_log
|
|
9
9
|
|
|
10
10
|
# no state utility class
|
|
11
11
|
class Athena:
|
|
@@ -90,18 +90,17 @@ class Athena:
|
|
|
90
90
|
if rs:
|
|
91
91
|
column_info = rs[0]['Data']
|
|
92
92
|
columns = [col.get('VarCharValue') for col in column_info]
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
out = lines_to_tabular(lines, header='\t'.join(columns), separator='\t')
|
|
93
|
+
out = tabulize(rs[1:],
|
|
94
|
+
lambda r: '\t'.join(col.get('VarCharValue') if col else '' for col in r['Data']),
|
|
95
|
+
header='\t'.join(columns),
|
|
96
|
+
separator='\t',
|
|
97
|
+
to=0)
|
|
99
98
|
if output:
|
|
100
99
|
log_file = output(out)
|
|
101
100
|
else:
|
|
102
101
|
log(out)
|
|
103
102
|
|
|
104
|
-
return len(
|
|
103
|
+
return len(rs)-1, log_file
|
|
105
104
|
else:
|
|
106
105
|
log2(f"Query failed or was cancelled. State: {state}")
|
|
107
106
|
log2(f"Reason: {reason}")
|
adam/utils_issues.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from adam.checks.check_result import CheckResult
|
|
2
2
|
from adam.checks.issue import Issue
|
|
3
3
|
from adam.repl_session import ReplSession
|
|
4
|
-
from adam.utils import
|
|
4
|
+
from adam.utils import tabulize, log2
|
|
5
5
|
|
|
6
6
|
class IssuesUtils:
|
|
7
7
|
def show(check_results: list[CheckResult], in_repl = False):
|
|
@@ -26,7 +26,7 @@ class IssuesUtils:
|
|
|
26
26
|
if in_repl:
|
|
27
27
|
ReplSession().prompt_session.history.append_string(issue.suggestion)
|
|
28
28
|
suggested += 1
|
|
29
|
-
|
|
29
|
+
tabulize(lines, separator='||', to=2)
|
|
30
30
|
if suggested:
|
|
31
31
|
log2()
|
|
32
32
|
log2(f'* {suggested} suggested commands are added to history. Press <Up> arrow to access them.')
|
adam/utils_k8s/app_clusters.py
CHANGED
|
@@ -12,11 +12,11 @@ T = TypeVar('T')
|
|
|
12
12
|
# utility collection on app clusters; methods are all static
|
|
13
13
|
class AppClusters:
|
|
14
14
|
def exec(pods: list[str], namespace: str, command: str, action: str = 'action',
|
|
15
|
-
max_workers=0, show_out=True, on_any = False, shell = '/bin/sh',
|
|
15
|
+
max_workers=0, show_out=True, on_any = False, shell = '/bin/sh', backgrounded = False) -> list[PodExecResult]:
|
|
16
16
|
samples = 1 if on_any else sys.maxsize
|
|
17
17
|
msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
|
|
18
18
|
with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
|
|
19
|
-
results: list[PodExecResult] = exec.map(lambda pod: AppPods.exec(pod, namespace, command, False, False, shell,
|
|
19
|
+
results: list[PodExecResult] = exec.map(lambda pod: AppPods.exec(pod, namespace, command, False, False, shell, backgrounded))
|
|
20
20
|
for result in results:
|
|
21
21
|
if KubeContext.show_out(show_out):
|
|
22
22
|
log(result.command)
|
adam/utils_k8s/app_pods.py
CHANGED
|
@@ -11,6 +11,9 @@ from adam.repl_session import ReplSession
|
|
|
11
11
|
class AppPods:
|
|
12
12
|
@functools.lru_cache()
|
|
13
13
|
def pod_names(namespace: str, env: str, app: str):
|
|
14
|
+
if not env or not app:
|
|
15
|
+
return []
|
|
16
|
+
|
|
14
17
|
return [pod.metadata.name for pod in AppPods.app_pods(namespace, env, app)]
|
|
15
18
|
|
|
16
19
|
def app_pods(namespace: str, env: str, app: str) -> List[client.V1Pod]:
|
|
@@ -22,9 +25,9 @@ class AppPods:
|
|
|
22
25
|
|
|
23
26
|
return v1.list_namespaced_pod(namespace, label_selector=label_selector).items
|
|
24
27
|
|
|
25
|
-
def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh',
|
|
28
|
+
def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh', backgrounded = False) -> PodExecResult:
|
|
26
29
|
container = Config().get('app.container-name', 'c3-server')
|
|
27
|
-
r = Pods.exec(pod_name, container, namespace, command, show_out = show_out, throw_err = throw_err, shell = shell,
|
|
30
|
+
r = Pods.exec(pod_name, container, namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, backgrounded = backgrounded)
|
|
28
31
|
|
|
29
32
|
if r and Config().get('repl.history.push-cat-remote-log-file', True):
|
|
30
33
|
if r.log_file and ReplSession().prompt_session:
|
|
@@ -12,14 +12,22 @@ T = TypeVar('T')
|
|
|
12
12
|
|
|
13
13
|
# utility collection on cassandra clusters; methods are all static
|
|
14
14
|
class CassandraClusters:
|
|
15
|
-
def exec(sts: str,
|
|
16
|
-
|
|
15
|
+
def exec(sts: str,
|
|
16
|
+
namespace: str,
|
|
17
|
+
command: str,
|
|
18
|
+
action: str = 'action',
|
|
19
|
+
max_workers=0,
|
|
20
|
+
show_out=True,
|
|
21
|
+
on_any = False,
|
|
22
|
+
shell = '/bin/sh',
|
|
23
|
+
backgrounded = False,
|
|
24
|
+
log_file = None) -> list[PodExecResult]:
|
|
17
25
|
|
|
18
26
|
pods = StatefulSets.pod_names(sts, namespace)
|
|
19
27
|
samples = 1 if on_any else sys.maxsize
|
|
20
28
|
msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
|
|
21
29
|
with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
|
|
22
|
-
results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell,
|
|
30
|
+
results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell, backgrounded, log_file))
|
|
23
31
|
for result in results:
|
|
24
32
|
if show_out and not Config().is_debug():
|
|
25
33
|
log(result.command)
|
|
@@ -6,8 +6,8 @@ from adam.repl_session import ReplSession
|
|
|
6
6
|
|
|
7
7
|
# utility collection on cassandra nodes; methods are all static
|
|
8
8
|
class CassandraNodes:
|
|
9
|
-
def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh',
|
|
10
|
-
r = Pods.exec(pod_name, "cassandra", namespace, command, show_out = show_out, throw_err = throw_err, shell = shell,
|
|
9
|
+
def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh', backgrounded = False, log_file = None) -> PodExecResult:
|
|
10
|
+
r = Pods.exec(pod_name, "cassandra", namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, backgrounded = backgrounded, log_file=log_file)
|
|
11
11
|
|
|
12
12
|
if r and Config().get('repl.history.push-cat-remote-log-file', True):
|
|
13
13
|
if r.log_file and ReplSession().prompt_session:
|
adam/utils_k8s/k8s.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from collections.abc import Callable
|
|
2
|
+
import inspect
|
|
2
3
|
import re
|
|
3
4
|
import portforward
|
|
4
5
|
|
|
@@ -49,6 +50,14 @@ class PortForwardHandler:
|
|
|
49
50
|
raise InvalidStateException(state)
|
|
50
51
|
|
|
51
52
|
self.pod = pod
|
|
53
|
+
|
|
54
|
+
# pf = portforward.forward(state.namespace, pod, self.local_port + 1, self.target_port, log_level=portforward.LogLevel.DEBUG)
|
|
55
|
+
# print(inspect.getsource(pf.__enter__))
|
|
56
|
+
# print('test portforward START', state.namespace, pod, self.local_port + 1, self.target_port, pf.__enter__)
|
|
57
|
+
# with pf:
|
|
58
|
+
# print('test portforward BODY')
|
|
59
|
+
# print('test portforward OK')
|
|
60
|
+
|
|
52
61
|
self.forward_connection = portforward.forward(state.namespace, pod, self.local_port, self.target_port)
|
|
53
62
|
if self.inc_connection_cnt() == 1:
|
|
54
63
|
self.forward_connection.__enter__()
|
adam/utils_k8s/kube_context.py
CHANGED
|
@@ -3,7 +3,7 @@ import re
|
|
|
3
3
|
from kubernetes import config as kconfig
|
|
4
4
|
|
|
5
5
|
from adam.config import Config
|
|
6
|
-
from adam.utils import idp_token_from_env,
|
|
6
|
+
from adam.utils import idp_token_from_env, log2, tabulize
|
|
7
7
|
|
|
8
8
|
class KubeContext:
|
|
9
9
|
_in_cluster = False
|
|
@@ -56,7 +56,7 @@ class KubeContext:
|
|
|
56
56
|
log2('Use -v <key>=<value> format.')
|
|
57
57
|
log2()
|
|
58
58
|
lines = [f'{key}\t{Config().get(key, None)}' for key in Config().keys()]
|
|
59
|
-
|
|
59
|
+
tabulize(lines, separator='\t', to=2)
|
|
60
60
|
|
|
61
61
|
for p in param_ovrs:
|
|
62
62
|
tokens = p.split('=')
|
adam/utils_k8s/pods.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from collections.abc import Callable
|
|
2
2
|
from datetime import datetime
|
|
3
|
+
import os
|
|
3
4
|
import sys
|
|
4
5
|
import time
|
|
5
6
|
from typing import TypeVar
|
|
@@ -10,7 +11,8 @@ from kubernetes.stream.ws_client import ERROR_CHANNEL, WSClient
|
|
|
10
11
|
from adam.config import Config
|
|
11
12
|
from adam.utils_k8s.volumes import ConfigMapMount
|
|
12
13
|
from adam.pod_exec_result import PodExecResult
|
|
13
|
-
from adam.utils import ParallelMapHandler, log2, debug, log_exc
|
|
14
|
+
from adam.utils import GeneratorStream, ParallelMapHandler, log2, debug, log_exc
|
|
15
|
+
from adam.utils_local import local_tmp_dir
|
|
14
16
|
from .kube_context import KubeContext
|
|
15
17
|
|
|
16
18
|
from websocket._core import WebSocket
|
|
@@ -48,9 +50,14 @@ class Pods:
|
|
|
48
50
|
|
|
49
51
|
return ParallelMapHandler(collection, max_workers, samples = samples, msg = msg)
|
|
50
52
|
|
|
51
|
-
def exec(pod_name: str,
|
|
52
|
-
|
|
53
|
-
|
|
53
|
+
def exec(pod_name: str,
|
|
54
|
+
container: str,
|
|
55
|
+
namespace: str,
|
|
56
|
+
command: str,
|
|
57
|
+
show_out = True,
|
|
58
|
+
throw_err = False,
|
|
59
|
+
shell = '/bin/sh',
|
|
60
|
+
backgrounded = False,
|
|
54
61
|
log_file = None,
|
|
55
62
|
interaction: Callable[[any, list[str]], any] = None,
|
|
56
63
|
env_prefix: str = None):
|
|
@@ -66,7 +73,7 @@ class Pods:
|
|
|
66
73
|
if env_prefix:
|
|
67
74
|
exec_command = [shell, '-c', f'{env_prefix} {command}']
|
|
68
75
|
|
|
69
|
-
if
|
|
76
|
+
if backgrounded or command.endswith(' &'):
|
|
70
77
|
# should be false for starting a background process
|
|
71
78
|
tty = False
|
|
72
79
|
|
|
@@ -166,6 +173,17 @@ class Pods:
|
|
|
166
173
|
with log_exc():
|
|
167
174
|
s.sock.close()
|
|
168
175
|
|
|
176
|
+
def download_file(pod_name: str, container: str, namespace: str, from_path: str, to_path: str = None):
|
|
177
|
+
if not to_path:
|
|
178
|
+
to_path = f'{local_tmp_dir()}/{os.path.basename(from_path)}'
|
|
179
|
+
|
|
180
|
+
bytes = Pods.read_file(pod_name, container, namespace, from_path)
|
|
181
|
+
with open(to_path, 'wb') as f:
|
|
182
|
+
for item in GeneratorStream(bytes):
|
|
183
|
+
f.write(item)
|
|
184
|
+
|
|
185
|
+
return to_path
|
|
186
|
+
|
|
169
187
|
def get_container(namespace: str, pod_name: str, container_name: str):
|
|
170
188
|
pod = Pods.get(namespace, pod_name)
|
|
171
189
|
if not pod:
|
adam/utils_k8s/statefulsets.py
CHANGED
|
@@ -56,8 +56,11 @@ class StatefulSets:
|
|
|
56
56
|
return statefulset_pods
|
|
57
57
|
|
|
58
58
|
@functools.lru_cache()
|
|
59
|
-
def pod_names(
|
|
60
|
-
|
|
59
|
+
def pod_names(sts: str, ns: str):
|
|
60
|
+
if not sts:
|
|
61
|
+
return []
|
|
62
|
+
|
|
63
|
+
return [pod.metadata.name for pod in StatefulSets.pods(sts, ns)]
|
|
61
64
|
|
|
62
65
|
def restarted_at(ss: str, ns: str):
|
|
63
66
|
# returns timestamp and if being rolled out
|
adam/utils_local.py
ADDED