kaqing 2.0.200__py3-none-any.whl → 2.0.211__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaqing might be problematic. Click here for more details.
- adam/batch.py +1 -1
- adam/commands/app/utils_app.py +1 -1
- adam/commands/cql/completions_c.py +1 -1
- adam/commands/cql/utils_cql.py +14 -13
- adam/commands/devices/device.py +1 -1
- adam/commands/download_cassandra_log.py +2 -2
- adam/commands/export/export_databases.py +13 -8
- adam/commands/export/export_sessions.py +12 -11
- adam/commands/export/exporter.py +140 -53
- adam/commands/export/import_session.py +0 -4
- adam/commands/export/importer.py +11 -11
- adam/commands/export/importer_athena.py +15 -6
- adam/commands/export/importer_sqlite.py +19 -8
- adam/commands/export/utils_export.py +37 -15
- adam/commands/postgres/postgres_databases.py +1 -1
- adam/commands/postgres/postgres_ls.py +1 -1
- adam/commands/postgres/utils_postgres.py +2 -1
- adam/commands/show/show_cassandra_status.py +3 -10
- adam/commands/show/show_processes.py +1 -1
- adam/commands/show/show_storage.py +2 -1
- adam/embedded_params.py +1 -1
- adam/repl_commands.py +13 -12
- adam/sso/cred_cache.py +2 -5
- adam/utils.py +122 -71
- adam/utils_k8s/app_clusters.py +10 -3
- adam/utils_k8s/app_pods.py +9 -3
- adam/utils_k8s/cassandra_clusters.py +4 -4
- adam/utils_k8s/cassandra_nodes.py +13 -7
- adam/{pod_exec_result.py → utils_k8s/pod_exec_result.py} +8 -2
- adam/utils_k8s/pods.py +34 -29
- adam/utils_local.py +78 -2
- adam/utils_repl/repl_completer.py +6 -2
- adam/utils_sqlite.py +3 -8
- adam/version.py +1 -1
- {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/METADATA +1 -1
- {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/RECORD +39 -61
- adam/commands/alter_tables.py +0 -66
- adam/commands/cassandra/download_cassandra_log.py +0 -45
- adam/commands/cassandra/nodetool.py +0 -64
- adam/commands/cassandra/nodetool_commands.py +0 -120
- adam/commands/cassandra/restart_cluster.py +0 -47
- adam/commands/cassandra/restart_node.py +0 -51
- adam/commands/cassandra/restart_nodes.py +0 -47
- adam/commands/cassandra/rollout.py +0 -88
- adam/commands/cat.py +0 -36
- adam/commands/cd.py +0 -41
- adam/commands/download_file.py +0 -47
- adam/commands/find_files.py +0 -51
- adam/commands/find_processes.py +0 -76
- adam/commands/head.py +0 -36
- adam/commands/ls.py +0 -41
- adam/commands/os/cat.py +0 -36
- adam/commands/os/download_file.py +0 -47
- adam/commands/os/find_files.py +0 -51
- adam/commands/os/find_processes.py +0 -76
- adam/commands/os/head.py +0 -36
- adam/commands/os/shell.py +0 -41
- adam/commands/shell.py +0 -41
- {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/WHEEL +0 -0
- {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/entry_points.txt +0 -0
- {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/top_level.txt +0 -0
adam/utils.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from abc import ABC
|
|
1
2
|
from concurrent.futures import Future, ThreadPoolExecutor
|
|
2
3
|
from contextlib import redirect_stdout
|
|
3
4
|
import copy
|
|
@@ -12,7 +13,7 @@ import random
|
|
|
12
13
|
import string
|
|
13
14
|
import threading
|
|
14
15
|
import traceback
|
|
15
|
-
from typing import Callable, Iterator, TypeVar, Union
|
|
16
|
+
from typing import Callable, Iterator, TextIO, TypeVar, Union
|
|
16
17
|
from dateutil import parser
|
|
17
18
|
import subprocess
|
|
18
19
|
import sys
|
|
@@ -128,14 +129,24 @@ def log(s = None):
|
|
|
128
129
|
|
|
129
130
|
return True
|
|
130
131
|
|
|
131
|
-
def log2(s = None, nl = True):
|
|
132
|
+
def log2(s = None, nl = True, file: str = None):
|
|
132
133
|
if not loggable():
|
|
133
134
|
return False
|
|
134
135
|
|
|
135
136
|
if s:
|
|
136
|
-
|
|
137
|
+
if file:
|
|
138
|
+
with open(file, 'at') as f:
|
|
139
|
+
f.write(s)
|
|
140
|
+
if nl:
|
|
141
|
+
f.write('\n')
|
|
142
|
+
else:
|
|
143
|
+
click.echo(s, err=True, nl=nl)
|
|
137
144
|
else:
|
|
138
|
-
|
|
145
|
+
if file:
|
|
146
|
+
with open(file, 'at') as f:
|
|
147
|
+
f.write('\n')
|
|
148
|
+
else:
|
|
149
|
+
print(file=sys.stderr)
|
|
139
150
|
|
|
140
151
|
return True
|
|
141
152
|
|
|
@@ -273,28 +284,10 @@ def json_to_csv(json_data: list[dict[any, any]], delimiter: str = ','):
|
|
|
273
284
|
else:
|
|
274
285
|
return None
|
|
275
286
|
|
|
276
|
-
def log_to_file(config: dict[any, any]):
|
|
277
|
-
with log_exc():
|
|
278
|
-
base = f"/kaqing/logs"
|
|
279
|
-
os.makedirs(base, exist_ok=True)
|
|
280
|
-
|
|
281
|
-
now = datetime.now()
|
|
282
|
-
timestamp_str = now.strftime("%Y%m%d-%H%M%S")
|
|
283
|
-
filename = f"{base}/login.{timestamp_str}.txt"
|
|
284
|
-
with open(filename, 'w') as f:
|
|
285
|
-
if isinstance(config, dict):
|
|
286
|
-
try:
|
|
287
|
-
json.dump(config, f, indent=4)
|
|
288
|
-
except:
|
|
289
|
-
f.write(config)
|
|
290
|
-
else:
|
|
291
|
-
f.write(config)
|
|
292
|
-
|
|
293
287
|
def copy_config_file(rel_path: str, module: str, suffix: str = '.yaml', show_out = True):
|
|
294
|
-
dir = f'{Path.home()}/.kaqing'
|
|
288
|
+
dir = creating_dir(f'{Path.home()}/.kaqing')
|
|
295
289
|
path = f'{dir}/{rel_path}'
|
|
296
290
|
if not os.path.exists(path):
|
|
297
|
-
os.makedirs(dir, exist_ok=True)
|
|
298
291
|
module = importlib.import_module(module)
|
|
299
292
|
with open(path, 'w') as f:
|
|
300
293
|
yaml.dump(module.config(), f, default_flow_style=False)
|
|
@@ -336,34 +329,42 @@ def in_docker() -> bool:
|
|
|
336
329
|
return False
|
|
337
330
|
|
|
338
331
|
class Ing:
|
|
339
|
-
def __init__(self, msg: str, suppress_log=False):
|
|
332
|
+
def __init__(self, msg: str, suppress_log=False, job_log: str = None, condition = True):
|
|
340
333
|
self.msg = msg
|
|
341
334
|
self.suppress_log = suppress_log
|
|
335
|
+
self.job_log = job_log
|
|
336
|
+
self.condition = condition
|
|
342
337
|
|
|
343
338
|
def __enter__(self):
|
|
339
|
+
if not self.condition:
|
|
340
|
+
return None
|
|
341
|
+
|
|
344
342
|
if not hasattr(log_state, 'ing_cnt'):
|
|
345
343
|
log_state.ing_cnt = 0
|
|
346
344
|
|
|
347
345
|
try:
|
|
348
346
|
if not log_state.ing_cnt:
|
|
349
347
|
if not self.suppress_log and not ConfigHolder().config.is_debug():
|
|
350
|
-
log2(f'{self.msg}...', nl=False)
|
|
348
|
+
log2(f'{self.msg}...', nl=False, file=self.job_log)
|
|
351
349
|
|
|
352
350
|
return None
|
|
353
351
|
finally:
|
|
354
352
|
log_state.ing_cnt += 1
|
|
355
353
|
|
|
356
354
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
355
|
+
if not self.condition:
|
|
356
|
+
return False
|
|
357
|
+
|
|
357
358
|
log_state.ing_cnt -= 1
|
|
358
359
|
if not log_state.ing_cnt:
|
|
359
360
|
if not self.suppress_log and not ConfigHolder().config.is_debug():
|
|
360
|
-
log2(' OK')
|
|
361
|
+
log2(' OK', file=self.job_log)
|
|
361
362
|
|
|
362
363
|
return False
|
|
363
364
|
|
|
364
|
-
def ing(msg: str, body: Callable[[], None]=None, suppress_log=False):
|
|
365
|
+
def ing(msg: str, body: Callable[[], None]=None, suppress_log=False, job_log: str = None, condition = True):
|
|
365
366
|
if not body:
|
|
366
|
-
return Ing(msg, suppress_log=suppress_log)
|
|
367
|
+
return Ing(msg, suppress_log=suppress_log, job_log=job_log, condition=condition)
|
|
367
368
|
|
|
368
369
|
r = None
|
|
369
370
|
|
|
@@ -598,8 +599,11 @@ class ParallelService:
|
|
|
598
599
|
else:
|
|
599
600
|
return iterator
|
|
600
601
|
|
|
602
|
+
thread_pools: dict[str, ThreadPoolExecutor] = {}
|
|
603
|
+
thread_pool_lock = threading.Lock()
|
|
604
|
+
|
|
601
605
|
class ParallelMapHandler:
|
|
602
|
-
def __init__(self, collection: list, workers: int, samples: int = sys.maxsize, msg: str = None, collect = True):
|
|
606
|
+
def __init__(self, collection: list, workers: int, samples: int = sys.maxsize, msg: str = None, collect = True, name = None):
|
|
603
607
|
self.collection = collection
|
|
604
608
|
self.workers = workers
|
|
605
609
|
self.executor = None
|
|
@@ -611,24 +615,28 @@ class ParallelMapHandler:
|
|
|
611
615
|
else:
|
|
612
616
|
self.msg = None
|
|
613
617
|
self.collect = collect
|
|
618
|
+
self.name = name
|
|
614
619
|
|
|
615
620
|
self.begin = []
|
|
616
621
|
self.end = []
|
|
617
622
|
self.start_time = None
|
|
618
623
|
|
|
619
624
|
def __enter__(self):
|
|
625
|
+
self.start_time = None
|
|
626
|
+
|
|
620
627
|
self.calc_msgs()
|
|
621
628
|
|
|
622
629
|
if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
|
|
623
630
|
self.start_time = time.time()
|
|
624
631
|
|
|
625
|
-
self.executor =
|
|
632
|
+
self.executor = self.pool()
|
|
633
|
+
# self.executor = ThreadPoolExecutor(max_workers=self.workers)
|
|
626
634
|
self.executor.__enter__()
|
|
627
635
|
|
|
628
636
|
return ParallelService(self)
|
|
629
637
|
|
|
630
638
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
631
|
-
if self.executor:
|
|
639
|
+
if not self.name and self.executor:
|
|
632
640
|
self.executor.__exit__(exc_type, exc_val, exc_tb)
|
|
633
641
|
|
|
634
642
|
if self.end:
|
|
@@ -636,6 +644,15 @@ class ParallelMapHandler:
|
|
|
636
644
|
|
|
637
645
|
return False
|
|
638
646
|
|
|
647
|
+
def pool(self, thread_name_prefix: str = None):
|
|
648
|
+
if not self.name:
|
|
649
|
+
return ThreadPoolExecutor(max_workers=self.workers)
|
|
650
|
+
|
|
651
|
+
if self.name not in thread_pools:
|
|
652
|
+
thread_pools[self.name] = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix=thread_name_prefix)
|
|
653
|
+
|
|
654
|
+
return thread_pools[self.name]
|
|
655
|
+
|
|
639
656
|
def size(self):
|
|
640
657
|
if not self.collection:
|
|
641
658
|
return 0
|
|
@@ -646,25 +663,28 @@ class ParallelMapHandler:
|
|
|
646
663
|
if not self.msg:
|
|
647
664
|
return
|
|
648
665
|
|
|
666
|
+
self.begin = []
|
|
667
|
+
self.end = []
|
|
649
668
|
size = self.size()
|
|
650
669
|
offloaded = False
|
|
651
670
|
serially = False
|
|
652
671
|
sampling = False
|
|
653
672
|
if size == 0:
|
|
654
673
|
offloaded = True
|
|
655
|
-
|
|
674
|
+
msg = self.msg.replace('{size}', '1')
|
|
656
675
|
elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
|
|
657
|
-
|
|
676
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
658
677
|
elif self.samples < sys.maxsize:
|
|
659
678
|
sampling = True
|
|
679
|
+
samples = self.samples
|
|
660
680
|
if self.samples > size:
|
|
661
|
-
|
|
662
|
-
|
|
681
|
+
samples = size
|
|
682
|
+
msg = self.msg.replace('{size}', f'{samples}/{size} sample')
|
|
663
683
|
else:
|
|
664
684
|
serially = True
|
|
665
|
-
|
|
685
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
666
686
|
|
|
667
|
-
for token in
|
|
687
|
+
for token in msg.split(' '):
|
|
668
688
|
if '|' in token:
|
|
669
689
|
self.begin.append(token.split('|')[0])
|
|
670
690
|
if not sampling and not serially and not offloaded:
|
|
@@ -681,8 +701,19 @@ class ParallelMapHandler:
|
|
|
681
701
|
else:
|
|
682
702
|
log2(f'{" ".join(self.begin)} with {self.workers} workers...')
|
|
683
703
|
|
|
684
|
-
|
|
685
|
-
|
|
704
|
+
# parallelizers: dict[str, ParallelMapHandler] = {}
|
|
705
|
+
# parallelizer_lock = threading.Lock()
|
|
706
|
+
|
|
707
|
+
def parallelize(collection: list, workers: int = 0, samples = sys.maxsize, msg: str = None, collect = True, name = None):
|
|
708
|
+
return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect, name = name)
|
|
709
|
+
# if not name:
|
|
710
|
+
# return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect)
|
|
711
|
+
|
|
712
|
+
# with parallelizer_lock:
|
|
713
|
+
# if name not in parallelizers:
|
|
714
|
+
# parallelizers[name] = ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect, name = name)
|
|
715
|
+
|
|
716
|
+
# return parallelizers[name]
|
|
686
717
|
|
|
687
718
|
class OffloadService:
|
|
688
719
|
def __init__(self, handler: 'OffloadHandler'):
|
|
@@ -701,23 +732,24 @@ class OffloadService:
|
|
|
701
732
|
return future
|
|
702
733
|
|
|
703
734
|
class OffloadHandler(ParallelMapHandler):
|
|
704
|
-
def __init__(self, max_workers: int, msg: str = None):
|
|
705
|
-
super().__init__(None, max_workers, msg=msg, collect=False )
|
|
735
|
+
def __init__(self, max_workers: int, msg: str = None, name: str = None):
|
|
736
|
+
super().__init__(None, max_workers, msg=msg, collect=False, name=f'offload-{name}')
|
|
706
737
|
|
|
707
738
|
def __enter__(self):
|
|
739
|
+
self.start_time = None
|
|
708
740
|
self.calc_msgs()
|
|
709
741
|
|
|
710
742
|
if self.workers > 1:
|
|
711
|
-
# if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
|
|
712
743
|
self.start_time = time.time()
|
|
713
744
|
|
|
714
|
-
self.executor =
|
|
745
|
+
self.executor = self.pool(thread_name_prefix='offload')
|
|
746
|
+
# self.executor = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix='offload')
|
|
715
747
|
self.executor.__enter__()
|
|
716
748
|
|
|
717
749
|
return OffloadService(self)
|
|
718
750
|
|
|
719
751
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
720
|
-
if self.executor:
|
|
752
|
+
if not self.name and self.executor:
|
|
721
753
|
self.executor.__exit__(exc_type, exc_val, exc_tb)
|
|
722
754
|
|
|
723
755
|
if self.end:
|
|
@@ -725,38 +757,33 @@ class OffloadHandler(ParallelMapHandler):
|
|
|
725
757
|
|
|
726
758
|
return False
|
|
727
759
|
|
|
728
|
-
# def size(self):
|
|
729
|
-
# if not self.collection:
|
|
730
|
-
# return 0
|
|
731
|
-
|
|
732
|
-
# return len(self.collection)
|
|
733
|
-
|
|
734
760
|
def calc_msgs(self):
|
|
735
761
|
if not self.msg:
|
|
736
762
|
return
|
|
737
763
|
|
|
764
|
+
self.begin = []
|
|
765
|
+
self.end = []
|
|
738
766
|
size = self.size()
|
|
739
|
-
# return
|
|
740
767
|
|
|
741
768
|
offloaded = False
|
|
742
769
|
serially = False
|
|
743
770
|
sampling = False
|
|
744
771
|
if size == 0:
|
|
745
772
|
offloaded = True
|
|
746
|
-
|
|
773
|
+
msg = self.msg.replace('{size}', '1')
|
|
747
774
|
elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
|
|
748
|
-
|
|
775
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
749
776
|
elif self.samples < sys.maxsize:
|
|
750
777
|
sampling = True
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
778
|
+
samples = self.samples
|
|
779
|
+
if samples > size:
|
|
780
|
+
samples = size
|
|
781
|
+
msg = self.msg.replace('{size}', f'{samples}/{size} sample')
|
|
754
782
|
else:
|
|
755
783
|
serially = True
|
|
756
|
-
|
|
757
|
-
# return
|
|
784
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
758
785
|
|
|
759
|
-
for token in
|
|
786
|
+
for token in msg.split(' '):
|
|
760
787
|
if '|' in token:
|
|
761
788
|
self.begin.append(token.split('|')[0])
|
|
762
789
|
if not sampling and not serially and not offloaded:
|
|
@@ -773,32 +800,39 @@ class OffloadHandler(ParallelMapHandler):
|
|
|
773
800
|
else:
|
|
774
801
|
log2(f'{" ".join(self.begin)} with {self.workers} workers...')
|
|
775
802
|
|
|
776
|
-
def offload(max_workers: int = 3, msg: str = None):
|
|
777
|
-
return OffloadHandler(max_workers, msg = msg)
|
|
803
|
+
def offload(max_workers: int = 3, msg: str = None, name: str = None):
|
|
804
|
+
return OffloadHandler(max_workers, msg = msg, name = name)
|
|
778
805
|
|
|
779
806
|
def kaqing_log_file_name(suffix = 'log'):
|
|
780
|
-
return f"{
|
|
807
|
+
return f"{log_dir()}/{datetime.now().strftime('%d%H%M%S')}.{suffix}"
|
|
808
|
+
|
|
809
|
+
def log_dir():
|
|
810
|
+
return creating_dir(ConfigHolder().config.get('log-dir', '/tmp/qing-db/q/logs'))
|
|
781
811
|
|
|
782
812
|
class LogFileHandler:
|
|
783
|
-
def __init__(self, suffix = 'log'):
|
|
813
|
+
def __init__(self, suffix = 'log', condition=True):
|
|
784
814
|
self.suffix = suffix
|
|
815
|
+
self.condition = condition
|
|
785
816
|
|
|
786
817
|
def __enter__(self):
|
|
787
|
-
self.f =
|
|
788
|
-
self.
|
|
818
|
+
self.f = None
|
|
819
|
+
if self.condition:
|
|
820
|
+
self.f = open(kaqing_log_file_name(suffix=self.suffix), 'w')
|
|
821
|
+
self.f.__enter__()
|
|
789
822
|
|
|
790
823
|
return self.f
|
|
791
824
|
|
|
792
825
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
793
|
-
self.f
|
|
826
|
+
if self.f:
|
|
827
|
+
self.f.__exit__(exc_type, exc_val, exc_tb)
|
|
794
828
|
|
|
795
|
-
|
|
796
|
-
|
|
829
|
+
if ConfigHolder().append_command_history:
|
|
830
|
+
ConfigHolder().append_command_history(f':cat {self.f.name}')
|
|
797
831
|
|
|
798
832
|
return False
|
|
799
833
|
|
|
800
|
-
def kaqing_log_file(suffix = 'log'):
|
|
801
|
-
return LogFileHandler(suffix = suffix)
|
|
834
|
+
def kaqing_log_file(suffix = 'log', condition=True):
|
|
835
|
+
return LogFileHandler(suffix = suffix, condition=condition)
|
|
802
836
|
|
|
803
837
|
class CommandLog:
|
|
804
838
|
log_file = None
|
|
@@ -826,6 +860,23 @@ class CommandLog:
|
|
|
826
860
|
pass
|
|
827
861
|
|
|
828
862
|
if ConfigHolder().append_command_history:
|
|
829
|
-
ConfigHolder().append_command_history(f':
|
|
863
|
+
ConfigHolder().append_command_history(f':cat {CommandLog.log_file.name}')
|
|
830
864
|
|
|
831
865
|
CommandLog.log_file = None
|
|
866
|
+
|
|
867
|
+
class ExecResult(ABC):
|
|
868
|
+
def exit_code(self) -> int:
|
|
869
|
+
pass
|
|
870
|
+
|
|
871
|
+
def cat_log_file_cmd(self) -> str:
|
|
872
|
+
pass
|
|
873
|
+
|
|
874
|
+
_dirs_created = set()
|
|
875
|
+
|
|
876
|
+
def creating_dir(dir):
|
|
877
|
+
if dir not in _dirs_created:
|
|
878
|
+
_dirs_created.add(dir)
|
|
879
|
+
if not os.path.exists(dir):
|
|
880
|
+
os.makedirs(dir, exist_ok=True)
|
|
881
|
+
|
|
882
|
+
return dir
|
adam/utils_k8s/app_clusters.py
CHANGED
|
@@ -2,7 +2,7 @@ import sys
|
|
|
2
2
|
from typing import TypeVar
|
|
3
3
|
|
|
4
4
|
from adam.utils_k8s.app_pods import AppPods
|
|
5
|
-
from adam.pod_exec_result import PodExecResult
|
|
5
|
+
from adam.utils_k8s.pod_exec_result import PodExecResult
|
|
6
6
|
from adam.utils import log, log2
|
|
7
7
|
from adam.utils_k8s.pods import Pods
|
|
8
8
|
from .kube_context import KubeContext
|
|
@@ -11,8 +11,15 @@ T = TypeVar('T')
|
|
|
11
11
|
|
|
12
12
|
# utility collection on app clusters; methods are all static
|
|
13
13
|
class AppClusters:
|
|
14
|
-
def exec(pods: list[str],
|
|
15
|
-
|
|
14
|
+
def exec(pods: list[str],
|
|
15
|
+
namespace: str,
|
|
16
|
+
command: str,
|
|
17
|
+
action: str = 'action',
|
|
18
|
+
max_workers=0,
|
|
19
|
+
show_out=True,
|
|
20
|
+
on_any = False,
|
|
21
|
+
shell = '/bin/sh',
|
|
22
|
+
backgrounded = False) -> list[PodExecResult]:
|
|
16
23
|
samples = 1 if on_any else sys.maxsize
|
|
17
24
|
msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
|
|
18
25
|
with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
|
adam/utils_k8s/app_pods.py
CHANGED
|
@@ -4,7 +4,7 @@ from kubernetes import client
|
|
|
4
4
|
|
|
5
5
|
from adam.config import Config
|
|
6
6
|
from adam.utils_k8s.pods import Pods
|
|
7
|
-
from adam.pod_exec_result import PodExecResult
|
|
7
|
+
from adam.utils_k8s.pod_exec_result import PodExecResult
|
|
8
8
|
from adam.repl_session import ReplSession
|
|
9
9
|
|
|
10
10
|
# utility collection on app pods; methods are all static
|
|
@@ -25,11 +25,17 @@ class AppPods:
|
|
|
25
25
|
|
|
26
26
|
return v1.list_namespaced_pod(namespace, label_selector=label_selector).items
|
|
27
27
|
|
|
28
|
-
def exec(pod_name: str,
|
|
28
|
+
def exec(pod_name: str,
|
|
29
|
+
namespace: str,
|
|
30
|
+
command: str,
|
|
31
|
+
show_out = True,
|
|
32
|
+
throw_err = False,
|
|
33
|
+
shell = '/bin/sh',
|
|
34
|
+
backgrounded = False) -> PodExecResult:
|
|
29
35
|
container = Config().get('app.container-name', 'c3-server')
|
|
30
36
|
r = Pods.exec(pod_name, container, namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, backgrounded = backgrounded)
|
|
31
37
|
|
|
32
38
|
if r and r.log_file:
|
|
33
|
-
ReplSession().append_history(f'
|
|
39
|
+
ReplSession().append_history(f':cat {r.log_file}')
|
|
34
40
|
|
|
35
41
|
return r
|
|
@@ -3,7 +3,7 @@ from typing import TypeVar
|
|
|
3
3
|
|
|
4
4
|
from adam.config import Config
|
|
5
5
|
from adam.utils_k8s.cassandra_nodes import CassandraNodes
|
|
6
|
-
from adam.pod_exec_result import PodExecResult
|
|
6
|
+
from adam.utils_k8s.pod_exec_result import PodExecResult
|
|
7
7
|
from adam.utils import log, log2
|
|
8
8
|
from adam.utils_k8s.pods import Pods
|
|
9
9
|
from adam.utils_k8s.statefulsets import StatefulSets
|
|
@@ -22,16 +22,16 @@ class CassandraClusters:
|
|
|
22
22
|
shell = '/bin/sh',
|
|
23
23
|
backgrounded = False,
|
|
24
24
|
log_file = None,
|
|
25
|
-
|
|
25
|
+
history=True) -> list[PodExecResult]:
|
|
26
26
|
|
|
27
27
|
pods = StatefulSets.pod_names(sts, namespace)
|
|
28
28
|
samples = 1 if on_any else sys.maxsize
|
|
29
|
-
if
|
|
29
|
+
if (backgrounded or command.endswith(' &')) and not log_file:
|
|
30
30
|
log_file = Pods.log_file(command)
|
|
31
31
|
|
|
32
32
|
msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
|
|
33
33
|
with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
|
|
34
|
-
results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell, backgrounded, log_file,
|
|
34
|
+
results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell, backgrounded, log_file, history))
|
|
35
35
|
for result in results:
|
|
36
36
|
if show_out and not Config().is_debug():
|
|
37
37
|
log(result.command)
|
|
@@ -1,18 +1,24 @@
|
|
|
1
1
|
from adam.config import Config
|
|
2
2
|
from adam.utils_k8s.pods import Pods
|
|
3
3
|
from adam.utils_k8s.secrets import Secrets
|
|
4
|
-
from adam.pod_exec_result import PodExecResult
|
|
4
|
+
from adam.utils_k8s.pod_exec_result import PodExecResult
|
|
5
5
|
from adam.repl_session import ReplSession
|
|
6
6
|
|
|
7
7
|
# utility collection on cassandra nodes; methods are all static
|
|
8
8
|
class CassandraNodes:
|
|
9
|
-
def exec(pod_name: str,
|
|
10
|
-
|
|
9
|
+
def exec(pod_name: str,
|
|
10
|
+
namespace: str,
|
|
11
|
+
command: str,
|
|
12
|
+
show_out = True,
|
|
13
|
+
throw_err = False,
|
|
14
|
+
shell = '/bin/sh',
|
|
15
|
+
backgrounded = False,
|
|
16
|
+
log_file = None,
|
|
17
|
+
history = True) -> PodExecResult:
|
|
18
|
+
r = Pods.exec(pod_name, "cassandra", namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, backgrounded = backgrounded, log_file=log_file)
|
|
11
19
|
|
|
12
|
-
if
|
|
13
|
-
entry = f'
|
|
14
|
-
if Config().get('repl.background-process.via-sh', True):
|
|
15
|
-
entry = f':sh cat {r.log_file}'
|
|
20
|
+
if history and r and r.log_file:
|
|
21
|
+
entry = f':cat {r.log_file}'
|
|
16
22
|
|
|
17
23
|
ReplSession().append_history(entry)
|
|
18
24
|
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import yaml
|
|
2
2
|
|
|
3
|
-
from adam.utils import log_exc
|
|
3
|
+
from adam.utils import ExecResult, log_exc
|
|
4
4
|
|
|
5
|
-
class PodExecResult:
|
|
5
|
+
class PodExecResult(ExecResult):
|
|
6
6
|
# {
|
|
7
7
|
# 'metadata': {},
|
|
8
8
|
# 'status': 'Failure',
|
|
@@ -34,6 +34,12 @@ class PodExecResult:
|
|
|
34
34
|
|
|
35
35
|
return code
|
|
36
36
|
|
|
37
|
+
def cat_log_file_cmd(self):
|
|
38
|
+
if self.pod and self.log_file:
|
|
39
|
+
return f'@{self.pod} cat {self.log_file}'
|
|
40
|
+
|
|
41
|
+
return None
|
|
42
|
+
|
|
37
43
|
def __str__(self):
|
|
38
44
|
return f'{"OK" if self.exit_code() == 0 else self.exit_code()} {self.command}'
|
|
39
45
|
|
adam/utils_k8s/pods.py
CHANGED
|
@@ -13,9 +13,9 @@ from kubernetes.stream.ws_client import ERROR_CHANNEL, WSClient
|
|
|
13
13
|
from adam.config import Config
|
|
14
14
|
from adam.repl_session import ReplSession
|
|
15
15
|
from adam.utils_k8s.volumes import ConfigMapMount
|
|
16
|
-
from adam.pod_exec_result import PodExecResult
|
|
17
|
-
from adam.utils import GeneratorStream, ParallelMapHandler, log2, debug, log_exc
|
|
18
|
-
from adam.utils_local import
|
|
16
|
+
from adam.utils_k8s.pod_exec_result import PodExecResult
|
|
17
|
+
from adam.utils import GeneratorStream, ParallelMapHandler, log2, debug, log_dir, log_exc
|
|
18
|
+
from adam.utils_local import local_downloads_dir
|
|
19
19
|
from .kube_context import KubeContext
|
|
20
20
|
|
|
21
21
|
from websocket._core import WebSocket
|
|
@@ -63,14 +63,13 @@ class Pods:
|
|
|
63
63
|
backgrounded = False,
|
|
64
64
|
log_file = None,
|
|
65
65
|
interaction: Callable[[any, list[str]], any] = None,
|
|
66
|
-
env_prefix: str = None
|
|
67
|
-
via_sh = True):
|
|
66
|
+
env_prefix: str = None):
|
|
68
67
|
if _TEST_POD_EXEC_OUTS:
|
|
69
68
|
return _TEST_POD_EXEC_OUTS
|
|
70
69
|
|
|
71
70
|
show_out = KubeContext.show_out(show_out)
|
|
72
71
|
|
|
73
|
-
if
|
|
72
|
+
if backgrounded or command.endswith(' &'):
|
|
74
73
|
command = command.strip(' &')
|
|
75
74
|
|
|
76
75
|
log_all_file = None
|
|
@@ -82,11 +81,17 @@ class Pods:
|
|
|
82
81
|
else:
|
|
83
82
|
log_pod_file = Pods.log_file(command, pod_name=pod_name)
|
|
84
83
|
|
|
84
|
+
if env_prefix:
|
|
85
|
+
command = f'{env_prefix} {command}'
|
|
86
|
+
|
|
85
87
|
command = command.replace('"', '\\"')
|
|
86
88
|
cmd = f'nohup kubectl exec {pod_name} -c {container} -- {shell} -c "{command} &" > {log_pod_file} 2>&1 &'
|
|
87
89
|
if log_all_file:
|
|
88
90
|
cmd = f'{cmd} >> {log_all_file}'
|
|
89
91
|
|
|
92
|
+
if show_out:
|
|
93
|
+
log2(cmd)
|
|
94
|
+
|
|
90
95
|
result = subprocess.run(cmd, capture_output=True, text=True, shell=True)
|
|
91
96
|
|
|
92
97
|
return PodExecResult(result.stdout, result.stderr, cmd, None, pod=pod_name, log_file=log_pod_file)
|
|
@@ -98,22 +103,22 @@ class Pods:
|
|
|
98
103
|
if env_prefix:
|
|
99
104
|
exec_command = [shell, '-c', f'{env_prefix} {command}']
|
|
100
105
|
|
|
101
|
-
if backgrounded or command.endswith(' &'):
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
106
|
+
# if backgrounded or command.endswith(' &'):
|
|
107
|
+
# # should be false for starting a background process
|
|
108
|
+
# tty = False
|
|
109
|
+
|
|
110
|
+
# if Config().get('repl.background-process.auto-nohup', True):
|
|
111
|
+
# command = command.strip(' &')
|
|
112
|
+
# cmd_name = ''
|
|
113
|
+
# if command.startswith('nodetool '):
|
|
114
|
+
# cmd_name = f".{'_'.join(command.split(' ')[5:])}"
|
|
115
|
+
|
|
116
|
+
# if not log_file:
|
|
117
|
+
# log_file = f'{log_prefix()}-{datetime.now().strftime("%d%H%M%S")}{cmd_name}.log'
|
|
118
|
+
# command = f"nohup {command} > {log_file} 2>&1 &"
|
|
119
|
+
# if env_prefix:
|
|
120
|
+
# command = f'{env_prefix} {command}'
|
|
121
|
+
# exec_command = [shell, '-c', command]
|
|
117
122
|
|
|
118
123
|
k_command = f'kubectl exec {pod_name} -c {container} -n {namespace} -- {shell} -c "{command}"'
|
|
119
124
|
debug(k_command)
|
|
@@ -178,10 +183,13 @@ class Pods:
|
|
|
178
183
|
if groups := re.match(r'.*-(.*)', pod_name):
|
|
179
184
|
pod_suffix = f'-{groups[1]}'
|
|
180
185
|
|
|
186
|
+
return f'{log_dir()}/{Pods.job_id()}{cmd_name}{pod_suffix}.log'
|
|
187
|
+
|
|
188
|
+
def job_id(dt: datetime = None):
|
|
181
189
|
if not dt:
|
|
182
190
|
dt = datetime.now()
|
|
183
191
|
|
|
184
|
-
return
|
|
192
|
+
return dt.strftime("%d%H%M%S")
|
|
185
193
|
|
|
186
194
|
def log_file_from_template(log_file: str, pod_name: str):
|
|
187
195
|
pod_suffix = pod_name
|
|
@@ -227,14 +235,14 @@ class Pods:
|
|
|
227
235
|
|
|
228
236
|
def download_file(pod_name: str, container: str, namespace: str, from_path: str, to_path: str = None):
|
|
229
237
|
if not to_path:
|
|
230
|
-
to_path = f'{
|
|
238
|
+
to_path = f'{local_downloads_dir()}/{os.path.basename(from_path)}'
|
|
231
239
|
|
|
232
240
|
bytes = Pods.read_file(pod_name, container, namespace, from_path)
|
|
233
241
|
with open(to_path, 'wb') as f:
|
|
234
242
|
for item in GeneratorStream(bytes):
|
|
235
243
|
f.write(item)
|
|
236
244
|
|
|
237
|
-
ReplSession().append_history(f':
|
|
245
|
+
ReplSession().append_history(f':cat {to_path}')
|
|
238
246
|
|
|
239
247
|
return to_path
|
|
240
248
|
|
|
@@ -346,7 +354,4 @@ class Pods:
|
|
|
346
354
|
log2(' Timed Out')
|
|
347
355
|
|
|
348
356
|
def completed(namespace: str, pod_name: str):
|
|
349
|
-
return Pods.get(namespace, pod_name).status.phase in ['Succeeded', 'Failed']
|
|
350
|
-
|
|
351
|
-
def log_prefix():
|
|
352
|
-
return Config().get('log-prefix', '/tmp/qing')
|
|
357
|
+
return Pods.get(namespace, pod_name).status.phase in ['Succeeded', 'Failed']
|