kaqing 2.0.188__py3-none-any.whl → 2.0.211__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaqing might be problematic. Click here for more details.

Files changed (78) hide show
  1. adam/batch.py +7 -7
  2. adam/commands/app/utils_app.py +1 -1
  3. adam/commands/bash/bash.py +1 -1
  4. adam/commands/bash/utils_bash.py +1 -1
  5. adam/commands/cassandra/__init__.py +0 -0
  6. adam/commands/command.py +1 -1
  7. adam/commands/commands_utils.py +8 -13
  8. adam/commands/{alter_tables.py → cql/alter_tables.py} +1 -1
  9. adam/commands/cql/completions_c.py +1 -0
  10. adam/commands/cql/utils_cql.py +14 -13
  11. adam/commands/debug/__init__.py +0 -0
  12. adam/commands/debug/debug.py +22 -0
  13. adam/commands/debug/debug_completes.py +35 -0
  14. adam/commands/debug/debug_timings.py +35 -0
  15. adam/commands/devices/device.py +1 -1
  16. adam/commands/devices/devices.py +1 -1
  17. adam/commands/download_cassandra_log.py +45 -0
  18. adam/commands/export/export_databases.py +13 -8
  19. adam/commands/export/export_sessions.py +12 -11
  20. adam/commands/export/exporter.py +140 -53
  21. adam/commands/export/import_session.py +0 -4
  22. adam/commands/export/importer.py +11 -11
  23. adam/commands/export/importer_athena.py +15 -6
  24. adam/commands/export/importer_sqlite.py +19 -8
  25. adam/commands/export/utils_export.py +37 -15
  26. adam/commands/generate_report.py +52 -0
  27. adam/commands/medusa/medusa_restore.py +0 -16
  28. adam/commands/nodetool.py +1 -1
  29. adam/commands/os/__init__.py +0 -0
  30. adam/commands/postgres/postgres_databases.py +2 -3
  31. adam/commands/postgres/postgres_ls.py +1 -1
  32. adam/commands/postgres/utils_postgres.py +2 -1
  33. adam/commands/preview_table.py +1 -1
  34. adam/commands/restart_cluster.py +47 -0
  35. adam/commands/restart_node.py +51 -0
  36. adam/commands/restart_nodes.py +47 -0
  37. adam/commands/show/show_cassandra_status.py +3 -10
  38. adam/commands/show/show_cli_commands.py +1 -1
  39. adam/commands/show/show_processes.py +1 -1
  40. adam/commands/show/show_storage.py +2 -1
  41. adam/config.py +4 -6
  42. adam/embedded_params.py +1 -1
  43. adam/repl.py +5 -3
  44. adam/repl_commands.py +23 -17
  45. adam/repl_session.py +4 -3
  46. adam/repl_state.py +6 -0
  47. adam/sql/async_executor.py +44 -0
  48. adam/sql/lark_completer.py +6 -4
  49. adam/sql/qingl.lark +1076 -0
  50. adam/sso/cred_cache.py +2 -5
  51. adam/utils.py +206 -83
  52. adam/utils_k8s/app_clusters.py +11 -4
  53. adam/utils_k8s/app_pods.py +10 -5
  54. adam/utils_k8s/cassandra_clusters.py +8 -4
  55. adam/utils_k8s/cassandra_nodes.py +14 -5
  56. adam/utils_k8s/kube_context.py +1 -4
  57. adam/{pod_exec_result.py → utils_k8s/pod_exec_result.py} +8 -2
  58. adam/utils_k8s/pods.py +83 -24
  59. adam/utils_local.py +78 -2
  60. adam/utils_repl/repl_completer.py +10 -89
  61. adam/utils_sqlite.py +3 -8
  62. adam/version.py +1 -1
  63. {kaqing-2.0.188.dist-info → kaqing-2.0.211.dist-info}/METADATA +1 -1
  64. {kaqing-2.0.188.dist-info → kaqing-2.0.211.dist-info}/RECORD +67 -65
  65. adam/commands/cat.py +0 -36
  66. adam/commands/cd.py +0 -41
  67. adam/commands/download_file.py +0 -47
  68. adam/commands/find_files.py +0 -51
  69. adam/commands/find_processes.py +0 -76
  70. adam/commands/head.py +0 -36
  71. adam/commands/logs.py +0 -37
  72. adam/commands/ls.py +0 -41
  73. adam/commands/report.py +0 -61
  74. adam/commands/restart.py +0 -60
  75. adam/commands/shell.py +0 -41
  76. {kaqing-2.0.188.dist-info → kaqing-2.0.211.dist-info}/WHEEL +0 -0
  77. {kaqing-2.0.188.dist-info → kaqing-2.0.211.dist-info}/entry_points.txt +0 -0
  78. {kaqing-2.0.188.dist-info → kaqing-2.0.211.dist-info}/top_level.txt +0 -0
adam/sso/cred_cache.py CHANGED
@@ -2,8 +2,7 @@ import os
2
2
  from pathlib import Path
3
3
  from dotenv import load_dotenv
4
4
 
5
- from adam.config import Config
6
- from adam.utils import debug, log_exc
5
+ from adam.utils import creating_dir, debug, log_exc
7
6
  from adam.utils_k8s.kube_context import KubeContext
8
7
 
9
8
  class CredCache:
@@ -15,7 +14,7 @@ class CredCache:
15
14
 
16
15
  def __init__(self):
17
16
  if not hasattr(self, 'env_f'):
18
- self.dir = f'{Path.home()}/.kaqing'
17
+ self.dir = creating_dir(f'{Path.home()}/.kaqing')
19
18
  self.env_f = f'{self.dir}/.credentials'
20
19
  # immutable - cannot reload with different file content
21
20
  load_dotenv(dotenv_path=self.env_f)
@@ -44,8 +43,6 @@ class CredCache:
44
43
  updated.append(f'IDP_PASSWORD={password}')
45
44
 
46
45
  if updated:
47
- if not os.path.exists(self.env_f):
48
- os.makedirs(self.dir, exist_ok=True)
49
46
  with open(self.env_f, 'w') as file:
50
47
  file.write('\n'.join(updated))
51
48
 
adam/utils.py CHANGED
@@ -1,3 +1,4 @@
1
+ from abc import ABC
1
2
  from concurrent.futures import Future, ThreadPoolExecutor
2
3
  from contextlib import redirect_stdout
3
4
  import copy
@@ -12,14 +13,13 @@ import random
12
13
  import string
13
14
  import threading
14
15
  import traceback
15
- from typing import Callable, Iterator, TypeVar, Union
16
+ from typing import Callable, Iterator, TextIO, TypeVar, Union
16
17
  from dateutil import parser
17
18
  import subprocess
18
19
  import sys
19
20
  import time
20
21
  import click
21
22
  import yaml
22
-
23
23
  from prompt_toolkit.completion import Completer
24
24
 
25
25
  from . import __version__
@@ -28,11 +28,28 @@ T = TypeVar('T')
28
28
 
29
29
  log_state = threading.local()
30
30
 
31
- class LogConfig:
32
- is_debug = lambda: False
33
- is_debug_timing = lambda: False
34
- is_debug_complete = lambda: False
35
- is_display_help = True
31
+ class ConfigReadable:
32
+ def is_debug() -> bool:
33
+ pass
34
+
35
+ def get(self, key: str, default: T) -> T:
36
+ pass
37
+
38
+ class ConfigHolder:
39
+ # the singleton pattern
40
+ def __new__(cls, *args, **kwargs):
41
+ if not hasattr(cls, 'instance'): cls.instance = super(ConfigHolder, cls).__new__(cls)
42
+
43
+ return cls.instance
44
+
45
+ def __init__(self):
46
+ if not hasattr(self, 'config'):
47
+ # set by Config
48
+ self.config: 'ConfigReadable' = None
49
+ # only for testing
50
+ self.is_display_help = True
51
+ # set by ReplSession
52
+ self.append_command_history = lambda entry: None
36
53
 
37
54
  NO_SORT = 0
38
55
  SORT = 1
@@ -112,14 +129,24 @@ def log(s = None):
112
129
 
113
130
  return True
114
131
 
115
- def log2(s = None, nl = True):
132
+ def log2(s = None, nl = True, file: str = None):
116
133
  if not loggable():
117
134
  return False
118
135
 
119
136
  if s:
120
- click.echo(s, err=True, nl=nl)
137
+ if file:
138
+ with open(file, 'at') as f:
139
+ f.write(s)
140
+ if nl:
141
+ f.write('\n')
142
+ else:
143
+ click.echo(s, err=True, nl=nl)
121
144
  else:
122
- print(file=sys.stderr)
145
+ if file:
146
+ with open(file, 'at') as f:
147
+ f.write('\n')
148
+ else:
149
+ print(file=sys.stderr)
123
150
 
124
151
  return True
125
152
 
@@ -164,7 +191,8 @@ def deep_merge_dicts(dict1, dict2):
164
191
  elif key not in merged_dict or value:
165
192
  # Otherwise, overwrite or add the value from dict2
166
193
  if key in merged_dict and isinstance(merged_dict[key], Completer):
167
- print('SEAN completer found, ignoring', key, value)
194
+ pass
195
+ # print('SEAN completer found, ignoring', key, value)
168
196
  else:
169
197
  merged_dict[key] = value
170
198
  return merged_dict
@@ -204,7 +232,7 @@ def get_deep_keys(d, current_path=""):
204
232
  return keys
205
233
 
206
234
  def display_help(replace_arg = False):
207
- if not LogConfig.is_display_help:
235
+ if not ConfigHolder().is_display_help:
208
236
  return
209
237
 
210
238
  args = copy.copy(sys.argv)
@@ -256,28 +284,10 @@ def json_to_csv(json_data: list[dict[any, any]], delimiter: str = ','):
256
284
  else:
257
285
  return None
258
286
 
259
- def log_to_file(config: dict[any, any]):
260
- with log_exc():
261
- base = f"/kaqing/logs"
262
- os.makedirs(base, exist_ok=True)
263
-
264
- now = datetime.now()
265
- timestamp_str = now.strftime("%Y%m%d-%H%M%S")
266
- filename = f"{base}/login.{timestamp_str}.txt"
267
- with open(filename, 'w') as f:
268
- if isinstance(config, dict):
269
- try:
270
- json.dump(config, f, indent=4)
271
- except:
272
- f.write(config)
273
- else:
274
- f.write(config)
275
-
276
287
  def copy_config_file(rel_path: str, module: str, suffix: str = '.yaml', show_out = True):
277
- dir = f'{Path.home()}/.kaqing'
288
+ dir = creating_dir(f'{Path.home()}/.kaqing')
278
289
  path = f'{dir}/{rel_path}'
279
290
  if not os.path.exists(path):
280
- os.makedirs(dir, exist_ok=True)
281
291
  module = importlib.import_module(module)
282
292
  with open(path, 'w') as f:
283
293
  yaml.dump(module.config(), f, default_flow_style=False)
@@ -293,15 +303,15 @@ def is_lambda(func):
293
303
  return callable(func) and hasattr(func, '__name__') and func.__name__ == '<lambda>'
294
304
 
295
305
  def debug(s = None):
296
- if LogConfig.is_debug():
306
+ if ConfigHolder().config.is_debug():
297
307
  log2(f'DEBUG {s}')
298
308
 
299
309
  def debug_complete(s = None):
300
- if LogConfig.is_debug_complete():
301
- log2(f'DEBUG {s}')
310
+ CommandLog.log(f'DEBUG {s}', config=ConfigHolder().config.get('debugs.complete', 'off'))
302
311
 
303
312
  def debug_trace():
304
- if LogConfig.is_debug():
313
+ if ConfigHolder().config.is_debug():
314
+ # if LogConfig.is_debug():
305
315
  log2(traceback.format_exc())
306
316
 
307
317
  def in_docker() -> bool:
@@ -319,34 +329,42 @@ def in_docker() -> bool:
319
329
  return False
320
330
 
321
331
  class Ing:
322
- def __init__(self, msg: str, suppress_log=False):
332
+ def __init__(self, msg: str, suppress_log=False, job_log: str = None, condition = True):
323
333
  self.msg = msg
324
334
  self.suppress_log = suppress_log
335
+ self.job_log = job_log
336
+ self.condition = condition
325
337
 
326
338
  def __enter__(self):
339
+ if not self.condition:
340
+ return None
341
+
327
342
  if not hasattr(log_state, 'ing_cnt'):
328
343
  log_state.ing_cnt = 0
329
344
 
330
345
  try:
331
346
  if not log_state.ing_cnt:
332
- if not self.suppress_log and not LogConfig.is_debug():
333
- log2(f'{self.msg}...', nl=False)
347
+ if not self.suppress_log and not ConfigHolder().config.is_debug():
348
+ log2(f'{self.msg}...', nl=False, file=self.job_log)
334
349
 
335
350
  return None
336
351
  finally:
337
352
  log_state.ing_cnt += 1
338
353
 
339
354
  def __exit__(self, exc_type, exc_val, exc_tb):
355
+ if not self.condition:
356
+ return False
357
+
340
358
  log_state.ing_cnt -= 1
341
359
  if not log_state.ing_cnt:
342
- if not self.suppress_log and not LogConfig.is_debug():
343
- log2(' OK')
360
+ if not self.suppress_log and not ConfigHolder().config.is_debug():
361
+ log2(' OK', file=self.job_log)
344
362
 
345
363
  return False
346
364
 
347
- def ing(msg: str, body: Callable[[], None]=None, suppress_log=False):
365
+ def ing(msg: str, body: Callable[[], None]=None, suppress_log=False, job_log: str = None, condition = True):
348
366
  if not body:
349
- return Ing(msg, suppress_log=suppress_log)
367
+ return Ing(msg, suppress_log=suppress_log, job_log=job_log, condition=condition)
350
368
 
351
369
  r = None
352
370
 
@@ -360,7 +378,7 @@ def ing(msg: str, body: Callable[[], None]=None, suppress_log=False):
360
378
  return r
361
379
 
362
380
  def loggable():
363
- return LogConfig.is_debug() or not hasattr(log_state, 'ing_cnt') or not log_state.ing_cnt
381
+ return ConfigHolder().config and ConfigHolder().config.is_debug() or not hasattr(log_state, 'ing_cnt') or not log_state.ing_cnt
364
382
 
365
383
  class TimingNode:
366
384
  def __init__(self, depth: int, s0: time.time = time.time(), line: str = None):
@@ -388,7 +406,7 @@ class LogTiming:
388
406
  self.s0 = s0
389
407
 
390
408
  def __enter__(self):
391
- if not LogConfig.is_debug_timing():
409
+ if (config := ConfigHolder().config.get('debugs.timings', 'off')) not in ['on', 'file']:
392
410
  return
393
411
 
394
412
  if not hasattr(log_state, 'timings'):
@@ -400,7 +418,7 @@ class LogTiming:
400
418
  self.s0 = time.time()
401
419
 
402
420
  def __exit__(self, exc_type, exc_val, exc_tb):
403
- if not LogConfig.is_debug_timing():
421
+ if (config := ConfigHolder().config.get('debugs.timings', 'off')) not in ['on', 'file']:
404
422
  return False
405
423
 
406
424
  child = log_state.timings
@@ -411,7 +429,9 @@ class LogTiming:
411
429
  log_state.timings = self.me
412
430
 
413
431
  if not self.me.depth:
414
- log2(self.me.tree())
432
+ # log timings finally
433
+ CommandLog.log(self.me.tree(), config)
434
+
415
435
  log_state.timings = TimingNode(0)
416
436
 
417
437
  return False
@@ -420,7 +440,7 @@ def log_timing(msg: str, body: Callable[[], None]=None, s0: time.time = None):
420
440
  if not s0 and not body:
421
441
  return LogTiming(msg, s0=s0)
422
442
 
423
- if not LogConfig.is_debug_timing():
443
+ if not ConfigHolder().config.get('debugs.timings', False):
424
444
  if body:
425
445
  return body()
426
446
 
@@ -439,7 +459,6 @@ def log_timing(msg: str, body: Callable[[], None]=None, s0: time.time = None):
439
459
  return r
440
460
 
441
461
  def timing_log_line(depth: int, msg: str, s0: time.time):
442
- # print('SEAN log timing', msg, threading.current_thread().name)
443
462
  elapsed = time.time() - s0
444
463
  offloaded = '-' if threading.current_thread().name.startswith('offload') or threading.current_thread().name.startswith('async') else '+'
445
464
  prefix = f'[{offloaded} timings] '
@@ -539,7 +558,7 @@ class LogTrace:
539
558
  elif self.err_msg is not False and self.err_msg:
540
559
  log2(self.err_msg)
541
560
 
542
- if self.err_msg is not False and LogConfig.is_debug():
561
+ if self.err_msg is not False and ConfigHolder().config.is_debug():
543
562
  traceback.print_exception(exc_type, exc_val, exc_tb, file=sys.stderr)
544
563
 
545
564
  # swallow exception
@@ -580,37 +599,44 @@ class ParallelService:
580
599
  else:
581
600
  return iterator
582
601
 
602
+ thread_pools: dict[str, ThreadPoolExecutor] = {}
603
+ thread_pool_lock = threading.Lock()
604
+
583
605
  class ParallelMapHandler:
584
- def __init__(self, collection: list, workers: int, samples: int = sys.maxsize, msg: str = None, collect = True):
606
+ def __init__(self, collection: list, workers: int, samples: int = sys.maxsize, msg: str = None, collect = True, name = None):
585
607
  self.collection = collection
586
608
  self.workers = workers
587
609
  self.executor = None
588
610
  self.samples = samples
589
611
  self.msg = msg
590
612
  if msg and msg.startswith('d`'):
591
- if LogConfig.is_debug():
613
+ if ConfigHolder().config.is_debug():
592
614
  self.msg = msg.replace('d`', '', 1)
593
615
  else:
594
616
  self.msg = None
595
617
  self.collect = collect
618
+ self.name = name
596
619
 
597
620
  self.begin = []
598
621
  self.end = []
599
622
  self.start_time = None
600
623
 
601
624
  def __enter__(self):
625
+ self.start_time = None
626
+
602
627
  self.calc_msgs()
603
628
 
604
629
  if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
605
630
  self.start_time = time.time()
606
631
 
607
- self.executor = ThreadPoolExecutor(max_workers=self.workers)
632
+ self.executor = self.pool()
633
+ # self.executor = ThreadPoolExecutor(max_workers=self.workers)
608
634
  self.executor.__enter__()
609
635
 
610
636
  return ParallelService(self)
611
637
 
612
638
  def __exit__(self, exc_type, exc_val, exc_tb):
613
- if self.executor:
639
+ if not self.name and self.executor:
614
640
  self.executor.__exit__(exc_type, exc_val, exc_tb)
615
641
 
616
642
  if self.end:
@@ -618,6 +644,15 @@ class ParallelMapHandler:
618
644
 
619
645
  return False
620
646
 
647
+ def pool(self, thread_name_prefix: str = None):
648
+ if not self.name:
649
+ return ThreadPoolExecutor(max_workers=self.workers)
650
+
651
+ if self.name not in thread_pools:
652
+ thread_pools[self.name] = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix=thread_name_prefix)
653
+
654
+ return thread_pools[self.name]
655
+
621
656
  def size(self):
622
657
  if not self.collection:
623
658
  return 0
@@ -628,25 +663,28 @@ class ParallelMapHandler:
628
663
  if not self.msg:
629
664
  return
630
665
 
666
+ self.begin = []
667
+ self.end = []
631
668
  size = self.size()
632
669
  offloaded = False
633
670
  serially = False
634
671
  sampling = False
635
672
  if size == 0:
636
673
  offloaded = True
637
- self.msg = self.msg.replace('{size}', '1')
674
+ msg = self.msg.replace('{size}', '1')
638
675
  elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
639
- self.msg = self.msg.replace('{size}', f'{size}')
676
+ msg = self.msg.replace('{size}', f'{size}')
640
677
  elif self.samples < sys.maxsize:
641
678
  sampling = True
679
+ samples = self.samples
642
680
  if self.samples > size:
643
- self.samples = size
644
- self.msg = self.msg.replace('{size}', f'{self.samples}/{size} sample')
681
+ samples = size
682
+ msg = self.msg.replace('{size}', f'{samples}/{size} sample')
645
683
  else:
646
684
  serially = True
647
- self.msg = self.msg.replace('{size}', f'{size}')
685
+ msg = self.msg.replace('{size}', f'{size}')
648
686
 
649
- for token in self.msg.split(' '):
687
+ for token in msg.split(' '):
650
688
  if '|' in token:
651
689
  self.begin.append(token.split('|')[0])
652
690
  if not sampling and not serially and not offloaded:
@@ -663,8 +701,19 @@ class ParallelMapHandler:
663
701
  else:
664
702
  log2(f'{" ".join(self.begin)} with {self.workers} workers...')
665
703
 
666
- def parallelize(collection: list, workers: int = 0, samples = sys.maxsize, msg: str = None, collect = True):
667
- return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect)
704
+ # parallelizers: dict[str, ParallelMapHandler] = {}
705
+ # parallelizer_lock = threading.Lock()
706
+
707
+ def parallelize(collection: list, workers: int = 0, samples = sys.maxsize, msg: str = None, collect = True, name = None):
708
+ return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect, name = name)
709
+ # if not name:
710
+ # return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect)
711
+
712
+ # with parallelizer_lock:
713
+ # if name not in parallelizers:
714
+ # parallelizers[name] = ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect, name = name)
715
+
716
+ # return parallelizers[name]
668
717
 
669
718
  class OffloadService:
670
719
  def __init__(self, handler: 'OffloadHandler'):
@@ -683,23 +732,24 @@ class OffloadService:
683
732
  return future
684
733
 
685
734
  class OffloadHandler(ParallelMapHandler):
686
- def __init__(self, max_workers: int, msg: str = None):
687
- super().__init__(None, max_workers, msg=msg, collect=False )
735
+ def __init__(self, max_workers: int, msg: str = None, name: str = None):
736
+ super().__init__(None, max_workers, msg=msg, collect=False, name=f'offload-{name}')
688
737
 
689
738
  def __enter__(self):
739
+ self.start_time = None
690
740
  self.calc_msgs()
691
741
 
692
742
  if self.workers > 1:
693
- # if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
694
743
  self.start_time = time.time()
695
744
 
696
- self.executor = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix='offload')
745
+ self.executor = self.pool(thread_name_prefix='offload')
746
+ # self.executor = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix='offload')
697
747
  self.executor.__enter__()
698
748
 
699
749
  return OffloadService(self)
700
750
 
701
751
  def __exit__(self, exc_type, exc_val, exc_tb):
702
- if self.executor:
752
+ if not self.name and self.executor:
703
753
  self.executor.__exit__(exc_type, exc_val, exc_tb)
704
754
 
705
755
  if self.end:
@@ -707,38 +757,33 @@ class OffloadHandler(ParallelMapHandler):
707
757
 
708
758
  return False
709
759
 
710
- # def size(self):
711
- # if not self.collection:
712
- # return 0
713
-
714
- # return len(self.collection)
715
-
716
760
  def calc_msgs(self):
717
761
  if not self.msg:
718
762
  return
719
763
 
764
+ self.begin = []
765
+ self.end = []
720
766
  size = self.size()
721
- # return
722
767
 
723
768
  offloaded = False
724
769
  serially = False
725
770
  sampling = False
726
771
  if size == 0:
727
772
  offloaded = True
728
- self.msg = self.msg.replace('{size}', '1')
773
+ msg = self.msg.replace('{size}', '1')
729
774
  elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
730
- self.msg = self.msg.replace('{size}', f'{size}')
775
+ msg = self.msg.replace('{size}', f'{size}')
731
776
  elif self.samples < sys.maxsize:
732
777
  sampling = True
733
- if self.samples > size:
734
- self.samples = size
735
- self.msg = self.msg.replace('{size}', f'{self.samples}/{size} sample')
778
+ samples = self.samples
779
+ if samples > size:
780
+ samples = size
781
+ msg = self.msg.replace('{size}', f'{samples}/{size} sample')
736
782
  else:
737
783
  serially = True
738
- self.msg = self.msg.replace('{size}', f'{size}')
739
- # return
784
+ msg = self.msg.replace('{size}', f'{size}')
740
785
 
741
- for token in self.msg.split(' '):
786
+ for token in msg.split(' '):
742
787
  if '|' in token:
743
788
  self.begin.append(token.split('|')[0])
744
789
  if not sampling and not serially and not offloaded:
@@ -755,5 +800,83 @@ class OffloadHandler(ParallelMapHandler):
755
800
  else:
756
801
  log2(f'{" ".join(self.begin)} with {self.workers} workers...')
757
802
 
758
- def offload(max_workers: int = 3, msg: str = None):
759
- return OffloadHandler(max_workers, msg = msg)
803
+ def offload(max_workers: int = 3, msg: str = None, name: str = None):
804
+ return OffloadHandler(max_workers, msg = msg, name = name)
805
+
806
+ def kaqing_log_file_name(suffix = 'log'):
807
+ return f"{log_dir()}/{datetime.now().strftime('%d%H%M%S')}.{suffix}"
808
+
809
+ def log_dir():
810
+ return creating_dir(ConfigHolder().config.get('log-dir', '/tmp/qing-db/q/logs'))
811
+
812
+ class LogFileHandler:
813
+ def __init__(self, suffix = 'log', condition=True):
814
+ self.suffix = suffix
815
+ self.condition = condition
816
+
817
+ def __enter__(self):
818
+ self.f = None
819
+ if self.condition:
820
+ self.f = open(kaqing_log_file_name(suffix=self.suffix), 'w')
821
+ self.f.__enter__()
822
+
823
+ return self.f
824
+
825
+ def __exit__(self, exc_type, exc_val, exc_tb):
826
+ if self.f:
827
+ self.f.__exit__(exc_type, exc_val, exc_tb)
828
+
829
+ if ConfigHolder().append_command_history:
830
+ ConfigHolder().append_command_history(f':cat {self.f.name}')
831
+
832
+ return False
833
+
834
+ def kaqing_log_file(suffix = 'log', condition=True):
835
+ return LogFileHandler(suffix = suffix, condition=condition)
836
+
837
+ class CommandLog:
838
+ log_file = None
839
+
840
+ def log(line: str, config: str = 'off'):
841
+ if config == 'file':
842
+ if not CommandLog.log_file:
843
+ try:
844
+ CommandLog.log_file = open(kaqing_log_file_name(suffix='cmd.log'), 'w')
845
+ except:
846
+ pass
847
+
848
+ try:
849
+ CommandLog.log_file.write(line + '\n')
850
+ except:
851
+ pass
852
+ elif config == 'on':
853
+ log2(line)
854
+
855
+ def close_log_file():
856
+ if CommandLog.log_file:
857
+ try:
858
+ CommandLog.log_file.close()
859
+ except:
860
+ pass
861
+
862
+ if ConfigHolder().append_command_history:
863
+ ConfigHolder().append_command_history(f':cat {CommandLog.log_file.name}')
864
+
865
+ CommandLog.log_file = None
866
+
867
+ class ExecResult(ABC):
868
+ def exit_code(self) -> int:
869
+ pass
870
+
871
+ def cat_log_file_cmd(self) -> str:
872
+ pass
873
+
874
+ _dirs_created = set()
875
+
876
+ def creating_dir(dir):
877
+ if dir not in _dirs_created:
878
+ _dirs_created.add(dir)
879
+ if not os.path.exists(dir):
880
+ os.makedirs(dir, exist_ok=True)
881
+
882
+ return dir
@@ -2,7 +2,7 @@ import sys
2
2
  from typing import TypeVar
3
3
 
4
4
  from adam.utils_k8s.app_pods import AppPods
5
- from adam.pod_exec_result import PodExecResult
5
+ from adam.utils_k8s.pod_exec_result import PodExecResult
6
6
  from adam.utils import log, log2
7
7
  from adam.utils_k8s.pods import Pods
8
8
  from .kube_context import KubeContext
@@ -11,8 +11,15 @@ T = TypeVar('T')
11
11
 
12
12
  # utility collection on app clusters; methods are all static
13
13
  class AppClusters:
14
- def exec(pods: list[str], namespace: str, command: str, action: str = 'action',
15
- max_workers=0, show_out=True, on_any = False, shell = '/bin/sh', backgrounded = False) -> list[PodExecResult]:
14
+ def exec(pods: list[str],
15
+ namespace: str,
16
+ command: str,
17
+ action: str = 'action',
18
+ max_workers=0,
19
+ show_out=True,
20
+ on_any = False,
21
+ shell = '/bin/sh',
22
+ backgrounded = False) -> list[PodExecResult]:
16
23
  samples = 1 if on_any else sys.maxsize
17
24
  msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
18
25
  with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
@@ -23,6 +30,6 @@ class AppClusters:
23
30
  if result.stdout:
24
31
  log(result.stdout)
25
32
  if result.stderr:
26
- log2(result.stderr, file=sys.stderr)
33
+ log2(result.stderr)
27
34
 
28
35
  return results
@@ -4,7 +4,7 @@ from kubernetes import client
4
4
 
5
5
  from adam.config import Config
6
6
  from adam.utils_k8s.pods import Pods
7
- from adam.pod_exec_result import PodExecResult
7
+ from adam.utils_k8s.pod_exec_result import PodExecResult
8
8
  from adam.repl_session import ReplSession
9
9
 
10
10
  # utility collection on app pods; methods are all static
@@ -25,12 +25,17 @@ class AppPods:
25
25
 
26
26
  return v1.list_namespaced_pod(namespace, label_selector=label_selector).items
27
27
 
28
- def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh', backgrounded = False) -> PodExecResult:
28
+ def exec(pod_name: str,
29
+ namespace: str,
30
+ command: str,
31
+ show_out = True,
32
+ throw_err = False,
33
+ shell = '/bin/sh',
34
+ backgrounded = False) -> PodExecResult:
29
35
  container = Config().get('app.container-name', 'c3-server')
30
36
  r = Pods.exec(pod_name, container, namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, backgrounded = backgrounded)
31
37
 
32
- if r and Config().get('repl.history.push-cat-remote-log-file', True):
33
- if r.log_file and ReplSession().prompt_session:
34
- ReplSession().prompt_session.history.append_string(f'@{r.pod} cat {r.log_file}')
38
+ if r and r.log_file:
39
+ ReplSession().append_history(f':cat {r.log_file}')
35
40
 
36
41
  return r
@@ -3,7 +3,7 @@ from typing import TypeVar
3
3
 
4
4
  from adam.config import Config
5
5
  from adam.utils_k8s.cassandra_nodes import CassandraNodes
6
- from adam.pod_exec_result import PodExecResult
6
+ from adam.utils_k8s.pod_exec_result import PodExecResult
7
7
  from adam.utils import log, log2
8
8
  from adam.utils_k8s.pods import Pods
9
9
  from adam.utils_k8s.statefulsets import StatefulSets
@@ -21,20 +21,24 @@ class CassandraClusters:
21
21
  on_any = False,
22
22
  shell = '/bin/sh',
23
23
  backgrounded = False,
24
- log_file = None) -> list[PodExecResult]:
24
+ log_file = None,
25
+ history=True) -> list[PodExecResult]:
25
26
 
26
27
  pods = StatefulSets.pod_names(sts, namespace)
27
28
  samples = 1 if on_any else sys.maxsize
29
+ if (backgrounded or command.endswith(' &')) and not log_file:
30
+ log_file = Pods.log_file(command)
31
+
28
32
  msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
29
33
  with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
30
- results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell, backgrounded, log_file))
34
+ results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell, backgrounded, log_file, history))
31
35
  for result in results:
32
36
  if show_out and not Config().is_debug():
33
37
  log(result.command)
34
38
  if result.stdout:
35
39
  log(result.stdout)
36
40
  if result.stderr:
37
- log2(result.stderr, file=sys.stderr)
41
+ log2(result.stderr)
38
42
 
39
43
  return results
40
44