kaqing 2.0.184__py3-none-any.whl → 2.0.214__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaqing might be problematic. Click here for more details.

Files changed (152) hide show
  1. adam/app_session.py +1 -1
  2. adam/batch.py +15 -15
  3. adam/commands/app/app.py +2 -2
  4. adam/commands/app/show_app_actions.py +1 -1
  5. adam/commands/{show → app}/show_login.py +1 -1
  6. adam/commands/app/utils_app.py +9 -1
  7. adam/commands/audit/audit.py +6 -20
  8. adam/commands/audit/audit_repair_tables.py +1 -1
  9. adam/commands/audit/audit_run.py +1 -1
  10. adam/commands/audit/completions_l.py +15 -0
  11. adam/commands/audit/show_last10.py +0 -1
  12. adam/commands/bash/bash.py +1 -1
  13. adam/commands/bash/utils_bash.py +1 -1
  14. adam/commands/cassandra/download_cassandra_log.py +45 -0
  15. adam/commands/cassandra/restart_cluster.py +47 -0
  16. adam/commands/cassandra/restart_node.py +51 -0
  17. adam/commands/cassandra/restart_nodes.py +47 -0
  18. adam/commands/{rollout.py → cassandra/rollout.py} +1 -1
  19. adam/commands/{show → cassandra}/show_cassandra_repairs.py +5 -3
  20. adam/commands/{show → cassandra}/show_cassandra_status.py +22 -15
  21. adam/commands/cassandra/show_processes.py +50 -0
  22. adam/commands/{show → cassandra}/show_storage.py +10 -8
  23. adam/commands/cli/__init__.py +0 -0
  24. adam/commands/{cli_commands.py → cli/cli_commands.py} +6 -1
  25. adam/commands/{clipboard_copy.py → cli/clipboard_copy.py} +2 -2
  26. adam/commands/{show/show_commands.py → cli/show_cli_commands.py} +2 -2
  27. adam/commands/command.py +22 -9
  28. adam/commands/commands_utils.py +14 -6
  29. adam/commands/config/__init__.py +0 -0
  30. adam/commands/{show → config}/show_params.py +1 -1
  31. adam/commands/{alter_tables.py → cql/alter_tables.py} +1 -1
  32. adam/commands/cql/completions_c.py +29 -0
  33. adam/commands/cql/cqlsh.py +2 -6
  34. adam/commands/cql/utils_cql.py +26 -17
  35. adam/commands/debug/__init__.py +0 -0
  36. adam/commands/debug/debug.py +22 -0
  37. adam/commands/debug/debug_completes.py +35 -0
  38. adam/commands/debug/debug_timings.py +35 -0
  39. adam/commands/debug/show_offloaded_completes.py +45 -0
  40. adam/commands/devices/device.py +30 -4
  41. adam/commands/devices/device_app.py +1 -1
  42. adam/commands/devices/device_export.py +5 -2
  43. adam/commands/devices/device_postgres.py +13 -3
  44. adam/commands/devices/devices.py +1 -1
  45. adam/commands/diag/__init__.py +0 -0
  46. adam/commands/{check.py → diag/check.py} +1 -1
  47. adam/commands/diag/generate_report.py +52 -0
  48. adam/commands/export/completions_x.py +11 -0
  49. adam/commands/export/download_export_session.py +2 -1
  50. adam/commands/export/export.py +0 -16
  51. adam/commands/export/export_databases.py +16 -10
  52. adam/commands/export/export_select.py +8 -33
  53. adam/commands/export/export_sessions.py +12 -11
  54. adam/commands/export/export_use.py +3 -3
  55. adam/commands/export/export_x_select.py +48 -0
  56. adam/commands/export/exporter.py +140 -53
  57. adam/commands/export/import_files.py +2 -2
  58. adam/commands/export/import_session.py +0 -4
  59. adam/commands/export/importer.py +11 -11
  60. adam/commands/export/importer_athena.py +15 -35
  61. adam/commands/export/importer_sqlite.py +19 -8
  62. adam/commands/export/show_column_counts.py +10 -10
  63. adam/commands/export/show_export_databases.py +2 -1
  64. adam/commands/export/show_export_session.py +1 -1
  65. adam/commands/export/show_export_sessions.py +1 -1
  66. adam/commands/export/utils_export.py +38 -15
  67. adam/commands/fs/__init__.py +0 -0
  68. adam/commands/{cat.py → fs/cat.py} +2 -2
  69. adam/commands/fs/cat_local.py +42 -0
  70. adam/commands/{cd.py → fs/cd.py} +2 -2
  71. adam/commands/{download_file.py → fs/download_file.py} +5 -5
  72. adam/commands/{find_files.py → fs/find_files.py} +4 -4
  73. adam/commands/{find_processes.py → fs/find_processes.py} +3 -3
  74. adam/commands/{head.py → fs/head.py} +2 -2
  75. adam/commands/{ls.py → fs/ls.py} +2 -2
  76. adam/commands/fs/ls_local.py +40 -0
  77. adam/commands/fs/rm.py +18 -0
  78. adam/commands/fs/rm_downloads.py +39 -0
  79. adam/commands/fs/rm_logs.py +38 -0
  80. adam/commands/{show → fs}/show_adam.py +1 -1
  81. adam/commands/intermediate_command.py +3 -0
  82. adam/commands/medusa/medusa_restore.py +2 -16
  83. adam/commands/medusa/utils_medusa.py +15 -0
  84. adam/commands/nodetool/__init__.py +0 -0
  85. adam/commands/{nodetool.py → nodetool/nodetool.py} +3 -8
  86. adam/commands/postgres/completions_p.py +22 -0
  87. adam/commands/postgres/postgres.py +7 -14
  88. adam/commands/postgres/postgres_databases.py +3 -3
  89. adam/commands/postgres/postgres_ls.py +1 -1
  90. adam/commands/postgres/utils_postgres.py +12 -2
  91. adam/commands/preview_table.py +1 -1
  92. adam/commands/reaper/reaper_schedule_activate.py +6 -2
  93. adam/commands/reaper/reaper_schedule_start.py +1 -2
  94. adam/commands/reaper/reaper_schedule_stop.py +1 -2
  95. adam/commands/reaper/utils_reaper.py +10 -1
  96. adam/commands/repair/repair_scan.py +0 -2
  97. adam/commands/repair/repair_stop.py +0 -1
  98. adam/commands/{show/show.py → show.py} +12 -11
  99. adam/config.py +4 -5
  100. adam/embedded_params.py +1 -1
  101. adam/repl.py +22 -9
  102. adam/repl_commands.py +50 -42
  103. adam/repl_session.py +9 -1
  104. adam/repl_state.py +16 -1
  105. adam/sql/async_executor.py +62 -0
  106. adam/sql/lark_completer.py +286 -0
  107. adam/sql/lark_parser.py +604 -0
  108. adam/sql/qingl.lark +1076 -0
  109. adam/sso/cred_cache.py +2 -5
  110. adam/utils.py +216 -79
  111. adam/utils_k8s/app_clusters.py +11 -4
  112. adam/utils_k8s/app_pods.py +10 -5
  113. adam/utils_k8s/cassandra_clusters.py +8 -4
  114. adam/utils_k8s/cassandra_nodes.py +14 -5
  115. adam/utils_k8s/k8s.py +9 -0
  116. adam/utils_k8s/kube_context.py +1 -4
  117. adam/{pod_exec_result.py → utils_k8s/pod_exec_result.py} +8 -2
  118. adam/utils_k8s/pods.py +83 -24
  119. adam/utils_k8s/statefulsets.py +5 -2
  120. adam/utils_local.py +78 -2
  121. adam/utils_repl/appendable_completer.py +6 -0
  122. adam/utils_repl/repl_completer.py +51 -4
  123. adam/utils_sqlite.py +3 -8
  124. adam/version.py +1 -1
  125. {kaqing-2.0.184.dist-info → kaqing-2.0.214.dist-info}/METADATA +1 -1
  126. kaqing-2.0.214.dist-info/RECORD +272 -0
  127. kaqing-2.0.214.dist-info/top_level.txt +2 -0
  128. teddy/__init__.py +0 -0
  129. teddy/lark_parser.py +436 -0
  130. teddy/lark_parser2.py +618 -0
  131. adam/commands/cql/cql_completions.py +0 -32
  132. adam/commands/export/export_select_x.py +0 -54
  133. adam/commands/logs.py +0 -37
  134. adam/commands/postgres/psql_completions.py +0 -11
  135. adam/commands/report.py +0 -61
  136. adam/commands/restart.py +0 -60
  137. adam/commands/show/show_processes.py +0 -49
  138. kaqing-2.0.184.dist-info/RECORD +0 -244
  139. kaqing-2.0.184.dist-info/top_level.txt +0 -1
  140. /adam/commands/{login.py → app/login.py} +0 -0
  141. /adam/commands/{show → cassandra}/__init__.py +0 -0
  142. /adam/commands/{show → cassandra}/show_cassandra_version.py +0 -0
  143. /adam/commands/{watch.py → cassandra/watch.py} +0 -0
  144. /adam/commands/{param_get.py → config/param_get.py} +0 -0
  145. /adam/commands/{param_set.py → config/param_set.py} +0 -0
  146. /adam/commands/{issues.py → diag/issues.py} +0 -0
  147. /adam/commands/{pwd.py → fs/pwd.py} +0 -0
  148. /adam/commands/{shell.py → fs/shell.py} +0 -0
  149. /adam/commands/{show → fs}/show_host.py +0 -0
  150. /adam/commands/{nodetool_commands.py → nodetool/nodetool_commands.py} +0 -0
  151. {kaqing-2.0.184.dist-info → kaqing-2.0.214.dist-info}/WHEEL +0 -0
  152. {kaqing-2.0.184.dist-info → kaqing-2.0.214.dist-info}/entry_points.txt +0 -0
adam/sso/cred_cache.py CHANGED
@@ -2,8 +2,7 @@ import os
2
2
  from pathlib import Path
3
3
  from dotenv import load_dotenv
4
4
 
5
- from adam.config import Config
6
- from adam.utils import debug, log_exc
5
+ from adam.utils import creating_dir, debug, log_exc
7
6
  from adam.utils_k8s.kube_context import KubeContext
8
7
 
9
8
  class CredCache:
@@ -15,7 +14,7 @@ class CredCache:
15
14
 
16
15
  def __init__(self):
17
16
  if not hasattr(self, 'env_f'):
18
- self.dir = f'{Path.home()}/.kaqing'
17
+ self.dir = creating_dir(f'{Path.home()}/.kaqing')
19
18
  self.env_f = f'{self.dir}/.credentials'
20
19
  # immutable - cannot reload with different file content
21
20
  load_dotenv(dotenv_path=self.env_f)
@@ -44,8 +43,6 @@ class CredCache:
44
43
  updated.append(f'IDP_PASSWORD={password}')
45
44
 
46
45
  if updated:
47
- if not os.path.exists(self.env_f):
48
- os.makedirs(self.dir, exist_ok=True)
49
46
  with open(self.env_f, 'w') as file:
50
47
  file.write('\n'.join(updated))
51
48
 
adam/utils.py CHANGED
@@ -1,3 +1,4 @@
1
+ from abc import ABC
1
2
  from concurrent.futures import Future, ThreadPoolExecutor
2
3
  from contextlib import redirect_stdout
3
4
  import copy
@@ -12,13 +13,14 @@ import random
12
13
  import string
13
14
  import threading
14
15
  import traceback
15
- from typing import Callable, Iterator, TypeVar, Union
16
+ from typing import Callable, Iterator, TextIO, TypeVar, Union
16
17
  from dateutil import parser
17
18
  import subprocess
18
19
  import sys
19
20
  import time
20
21
  import click
21
22
  import yaml
23
+ from prompt_toolkit.completion import Completer
22
24
 
23
25
  from . import __version__
24
26
 
@@ -26,10 +28,28 @@ T = TypeVar('T')
26
28
 
27
29
  log_state = threading.local()
28
30
 
29
- class LogConfig:
30
- is_debug = lambda: False
31
- is_debug_timing = lambda: False
32
- is_display_help = True
31
+ class ConfigReadable:
32
+ def is_debug() -> bool:
33
+ pass
34
+
35
+ def get(self, key: str, default: T) -> T:
36
+ pass
37
+
38
+ class ConfigHolder:
39
+ # the singleton pattern
40
+ def __new__(cls, *args, **kwargs):
41
+ if not hasattr(cls, 'instance'): cls.instance = super(ConfigHolder, cls).__new__(cls)
42
+
43
+ return cls.instance
44
+
45
+ def __init__(self):
46
+ if not hasattr(self, 'config'):
47
+ # set by Config
48
+ self.config: 'ConfigReadable' = None
49
+ # only for testing
50
+ self.is_display_help = True
51
+ # set by ReplSession
52
+ self.append_command_history = lambda entry: None
33
53
 
34
54
  NO_SORT = 0
35
55
  SORT = 1
@@ -109,14 +129,24 @@ def log(s = None):
109
129
 
110
130
  return True
111
131
 
112
- def log2(s = None, nl = True):
132
+ def log2(s = None, nl = True, file: str = None):
113
133
  if not loggable():
114
134
  return False
115
135
 
116
136
  if s:
117
- click.echo(s, err=True, nl=nl)
137
+ if file:
138
+ with open(file, 'at') as f:
139
+ f.write(s)
140
+ if nl:
141
+ f.write('\n')
142
+ else:
143
+ click.echo(s, err=True, nl=nl)
118
144
  else:
119
- print(file=sys.stderr)
145
+ if file:
146
+ with open(file, 'at') as f:
147
+ f.write('\n')
148
+ else:
149
+ print(file=sys.stderr)
120
150
 
121
151
  return True
122
152
 
@@ -160,7 +190,11 @@ def deep_merge_dicts(dict1, dict2):
160
190
  merged_dict[key] = deep_merge_dicts(merged_dict[key], value)
161
191
  elif key not in merged_dict or value:
162
192
  # Otherwise, overwrite or add the value from dict2
163
- merged_dict[key] = value
193
+ if key in merged_dict and isinstance(merged_dict[key], Completer):
194
+ pass
195
+ # print('SEAN completer found, ignoring', key, value)
196
+ else:
197
+ merged_dict[key] = value
164
198
  return merged_dict
165
199
 
166
200
  def deep_sort_dict(d):
@@ -198,7 +232,7 @@ def get_deep_keys(d, current_path=""):
198
232
  return keys
199
233
 
200
234
  def display_help(replace_arg = False):
201
- if not LogConfig.is_display_help:
235
+ if not ConfigHolder().is_display_help:
202
236
  return
203
237
 
204
238
  args = copy.copy(sys.argv)
@@ -250,28 +284,10 @@ def json_to_csv(json_data: list[dict[any, any]], delimiter: str = ','):
250
284
  else:
251
285
  return None
252
286
 
253
- def log_to_file(config: dict[any, any]):
254
- with log_exc():
255
- base = f"/kaqing/logs"
256
- os.makedirs(base, exist_ok=True)
257
-
258
- now = datetime.now()
259
- timestamp_str = now.strftime("%Y%m%d-%H%M%S")
260
- filename = f"{base}/login.{timestamp_str}.txt"
261
- with open(filename, 'w') as f:
262
- if isinstance(config, dict):
263
- try:
264
- json.dump(config, f, indent=4)
265
- except:
266
- f.write(config)
267
- else:
268
- f.write(config)
269
-
270
287
  def copy_config_file(rel_path: str, module: str, suffix: str = '.yaml', show_out = True):
271
- dir = f'{Path.home()}/.kaqing'
288
+ dir = creating_dir(f'{Path.home()}/.kaqing')
272
289
  path = f'{dir}/{rel_path}'
273
290
  if not os.path.exists(path):
274
- os.makedirs(dir, exist_ok=True)
275
291
  module = importlib.import_module(module)
276
292
  with open(path, 'w') as f:
277
293
  yaml.dump(module.config(), f, default_flow_style=False)
@@ -287,11 +303,15 @@ def is_lambda(func):
287
303
  return callable(func) and hasattr(func, '__name__') and func.__name__ == '<lambda>'
288
304
 
289
305
  def debug(s = None):
290
- if LogConfig.is_debug():
306
+ if ConfigHolder().config.is_debug():
291
307
  log2(f'DEBUG {s}')
292
308
 
309
+ def debug_complete(s = None):
310
+ CommandLog.log(f'DEBUG {s}', config=ConfigHolder().config.get('debugs.complete', 'off'))
311
+
293
312
  def debug_trace():
294
- if LogConfig.is_debug():
313
+ if ConfigHolder().config.is_debug():
314
+ # if LogConfig.is_debug():
295
315
  log2(traceback.format_exc())
296
316
 
297
317
  def in_docker() -> bool:
@@ -309,34 +329,42 @@ def in_docker() -> bool:
309
329
  return False
310
330
 
311
331
  class Ing:
312
- def __init__(self, msg: str, suppress_log=False):
332
+ def __init__(self, msg: str, suppress_log=False, job_log: str = None, condition = True):
313
333
  self.msg = msg
314
334
  self.suppress_log = suppress_log
335
+ self.job_log = job_log
336
+ self.condition = condition
315
337
 
316
338
  def __enter__(self):
339
+ if not self.condition:
340
+ return None
341
+
317
342
  if not hasattr(log_state, 'ing_cnt'):
318
343
  log_state.ing_cnt = 0
319
344
 
320
345
  try:
321
346
  if not log_state.ing_cnt:
322
- if not self.suppress_log and not LogConfig.is_debug():
323
- log2(f'{self.msg}...', nl=False)
347
+ if not self.suppress_log and not ConfigHolder().config.is_debug():
348
+ log2(f'{self.msg}...', nl=False, file=self.job_log)
324
349
 
325
350
  return None
326
351
  finally:
327
352
  log_state.ing_cnt += 1
328
353
 
329
354
  def __exit__(self, exc_type, exc_val, exc_tb):
355
+ if not self.condition:
356
+ return False
357
+
330
358
  log_state.ing_cnt -= 1
331
359
  if not log_state.ing_cnt:
332
- if not self.suppress_log and not LogConfig.is_debug():
333
- log2(' OK')
360
+ if not self.suppress_log and not ConfigHolder().config.is_debug():
361
+ log2(' OK', file=self.job_log)
334
362
 
335
363
  return False
336
364
 
337
- def ing(msg: str, body: Callable[[], None]=None, suppress_log=False):
365
+ def ing(msg: str, body: Callable[[], None]=None, suppress_log=False, job_log: str = None, condition = True):
338
366
  if not body:
339
- return Ing(msg, suppress_log=suppress_log)
367
+ return Ing(msg, suppress_log=suppress_log, job_log=job_log, condition=condition)
340
368
 
341
369
  r = None
342
370
 
@@ -350,7 +378,7 @@ def ing(msg: str, body: Callable[[], None]=None, suppress_log=False):
350
378
  return r
351
379
 
352
380
  def loggable():
353
- return LogConfig.is_debug() or not hasattr(log_state, 'ing_cnt') or not log_state.ing_cnt
381
+ return ConfigHolder().config and ConfigHolder().config.is_debug() or not hasattr(log_state, 'ing_cnt') or not log_state.ing_cnt
354
382
 
355
383
  class TimingNode:
356
384
  def __init__(self, depth: int, s0: time.time = time.time(), line: str = None):
@@ -378,7 +406,7 @@ class LogTiming:
378
406
  self.s0 = s0
379
407
 
380
408
  def __enter__(self):
381
- if not LogConfig.is_debug_timing():
409
+ if (config := ConfigHolder().config.get('debugs.timings', 'off')) not in ['on', 'file']:
382
410
  return
383
411
 
384
412
  if not hasattr(log_state, 'timings'):
@@ -390,7 +418,7 @@ class LogTiming:
390
418
  self.s0 = time.time()
391
419
 
392
420
  def __exit__(self, exc_type, exc_val, exc_tb):
393
- if not LogConfig.is_debug_timing():
421
+ if (config := ConfigHolder().config.get('debugs.timings', 'off')) not in ['on', 'file']:
394
422
  return False
395
423
 
396
424
  child = log_state.timings
@@ -401,7 +429,9 @@ class LogTiming:
401
429
  log_state.timings = self.me
402
430
 
403
431
  if not self.me.depth:
404
- log2(self.me.tree())
432
+ # log timings finally
433
+ CommandLog.log(self.me.tree(), config)
434
+
405
435
  log_state.timings = TimingNode(0)
406
436
 
407
437
  return False
@@ -410,7 +440,7 @@ def log_timing(msg: str, body: Callable[[], None]=None, s0: time.time = None):
410
440
  if not s0 and not body:
411
441
  return LogTiming(msg, s0=s0)
412
442
 
413
- if not LogConfig.is_debug_timing():
443
+ if not ConfigHolder().config.get('debugs.timings', False):
414
444
  if body:
415
445
  return body()
416
446
 
@@ -430,7 +460,9 @@ def log_timing(msg: str, body: Callable[[], None]=None, s0: time.time = None):
430
460
 
431
461
  def timing_log_line(depth: int, msg: str, s0: time.time):
432
462
  elapsed = time.time() - s0
433
- prefix = '[timings] '
463
+ offloaded = '-' if threading.current_thread().name.startswith('offload') or threading.current_thread().name.startswith('async') else '+'
464
+ prefix = f'[{offloaded} timings] '
465
+
434
466
  if depth:
435
467
  if elapsed > 0.01:
436
468
  prefix = (' ' * (depth-1)) + '* '
@@ -526,7 +558,7 @@ class LogTrace:
526
558
  elif self.err_msg is not False and self.err_msg:
527
559
  log2(self.err_msg)
528
560
 
529
- if self.err_msg is not False and LogConfig.is_debug():
561
+ if self.err_msg is not False and ConfigHolder().config.is_debug():
530
562
  traceback.print_exception(exc_type, exc_val, exc_tb, file=sys.stderr)
531
563
 
532
564
  # swallow exception
@@ -567,37 +599,44 @@ class ParallelService:
567
599
  else:
568
600
  return iterator
569
601
 
602
+ thread_pools: dict[str, ThreadPoolExecutor] = {}
603
+ thread_pool_lock = threading.Lock()
604
+
570
605
  class ParallelMapHandler:
571
- def __init__(self, collection: list, workers: int, samples: int = sys.maxsize, msg: str = None, collect = True):
606
+ def __init__(self, collection: list, workers: int, samples: int = sys.maxsize, msg: str = None, collect = True, name = None):
572
607
  self.collection = collection
573
608
  self.workers = workers
574
609
  self.executor = None
575
610
  self.samples = samples
576
611
  self.msg = msg
577
612
  if msg and msg.startswith('d`'):
578
- if LogConfig.is_debug():
613
+ if ConfigHolder().config.is_debug():
579
614
  self.msg = msg.replace('d`', '', 1)
580
615
  else:
581
616
  self.msg = None
582
617
  self.collect = collect
618
+ self.name = name
583
619
 
584
620
  self.begin = []
585
621
  self.end = []
586
622
  self.start_time = None
587
623
 
588
624
  def __enter__(self):
625
+ self.start_time = None
626
+
589
627
  self.calc_msgs()
590
628
 
591
629
  if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
592
630
  self.start_time = time.time()
593
631
 
594
- self.executor = ThreadPoolExecutor(max_workers=self.workers)
632
+ self.executor = self.pool()
633
+ # self.executor = ThreadPoolExecutor(max_workers=self.workers)
595
634
  self.executor.__enter__()
596
635
 
597
636
  return ParallelService(self)
598
637
 
599
638
  def __exit__(self, exc_type, exc_val, exc_tb):
600
- if self.executor:
639
+ if not self.name and self.executor:
601
640
  self.executor.__exit__(exc_type, exc_val, exc_tb)
602
641
 
603
642
  if self.end:
@@ -605,6 +644,15 @@ class ParallelMapHandler:
605
644
 
606
645
  return False
607
646
 
647
+ def pool(self, thread_name_prefix: str = None):
648
+ if not self.name:
649
+ return ThreadPoolExecutor(max_workers=self.workers)
650
+
651
+ if self.name not in thread_pools:
652
+ thread_pools[self.name] = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix=thread_name_prefix)
653
+
654
+ return thread_pools[self.name]
655
+
608
656
  def size(self):
609
657
  if not self.collection:
610
658
  return 0
@@ -615,25 +663,28 @@ class ParallelMapHandler:
615
663
  if not self.msg:
616
664
  return
617
665
 
666
+ self.begin = []
667
+ self.end = []
618
668
  size = self.size()
619
669
  offloaded = False
620
670
  serially = False
621
671
  sampling = False
622
672
  if size == 0:
623
673
  offloaded = True
624
- self.msg = self.msg.replace('{size}', '1')
674
+ msg = self.msg.replace('{size}', '1')
625
675
  elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
626
- self.msg = self.msg.replace('{size}', f'{size}')
676
+ msg = self.msg.replace('{size}', f'{size}')
627
677
  elif self.samples < sys.maxsize:
628
678
  sampling = True
679
+ samples = self.samples
629
680
  if self.samples > size:
630
- self.samples = size
631
- self.msg = self.msg.replace('{size}', f'{self.samples}/{size} sample')
681
+ samples = size
682
+ msg = self.msg.replace('{size}', f'{samples}/{size} sample')
632
683
  else:
633
684
  serially = True
634
- self.msg = self.msg.replace('{size}', f'{size}')
685
+ msg = self.msg.replace('{size}', f'{size}')
635
686
 
636
- for token in self.msg.split(' '):
687
+ for token in msg.split(' '):
637
688
  if '|' in token:
638
689
  self.begin.append(token.split('|')[0])
639
690
  if not sampling and not serially and not offloaded:
@@ -650,8 +701,19 @@ class ParallelMapHandler:
650
701
  else:
651
702
  log2(f'{" ".join(self.begin)} with {self.workers} workers...')
652
703
 
653
- def parallelize(collection: list, workers: int = 0, samples = sys.maxsize, msg: str = None, collect = True):
654
- return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect)
704
+ # parallelizers: dict[str, ParallelMapHandler] = {}
705
+ # parallelizer_lock = threading.Lock()
706
+
707
+ def parallelize(collection: list, workers: int = 0, samples = sys.maxsize, msg: str = None, collect = True, name = None):
708
+ return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect, name = name)
709
+ # if not name:
710
+ # return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect)
711
+
712
+ # with parallelizer_lock:
713
+ # if name not in parallelizers:
714
+ # parallelizers[name] = ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect, name = name)
715
+
716
+ # return parallelizers[name]
655
717
 
656
718
  class OffloadService:
657
719
  def __init__(self, handler: 'OffloadHandler'):
@@ -670,22 +732,24 @@ class OffloadService:
670
732
  return future
671
733
 
672
734
  class OffloadHandler(ParallelMapHandler):
673
- def __init__(self, max_workers: int, msg: str = None):
674
- super().__init__(None, max_workers, msg=msg, collect=False )
735
+ def __init__(self, max_workers: int, msg: str = None, name: str = None):
736
+ super().__init__(None, max_workers, msg=msg, collect=False, name=f'offload-{name}')
675
737
 
676
738
  def __enter__(self):
739
+ self.start_time = None
677
740
  self.calc_msgs()
678
741
 
679
- if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
742
+ if self.workers > 1:
680
743
  self.start_time = time.time()
681
744
 
682
- self.executor = ThreadPoolExecutor(max_workers=self.workers)
745
+ self.executor = self.pool(thread_name_prefix='offload')
746
+ # self.executor = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix='offload')
683
747
  self.executor.__enter__()
684
748
 
685
749
  return OffloadService(self)
686
750
 
687
751
  def __exit__(self, exc_type, exc_val, exc_tb):
688
- if self.executor:
752
+ if not self.name and self.executor:
689
753
  self.executor.__exit__(exc_type, exc_val, exc_tb)
690
754
 
691
755
  if self.end:
@@ -693,38 +757,33 @@ class OffloadHandler(ParallelMapHandler):
693
757
 
694
758
  return False
695
759
 
696
- def size(self):
697
- if not self.collection:
698
- return 0
699
-
700
- return len(self.collection)
701
-
702
760
  def calc_msgs(self):
703
761
  if not self.msg:
704
762
  return
705
763
 
764
+ self.begin = []
765
+ self.end = []
706
766
  size = self.size()
707
- # return
708
767
 
709
768
  offloaded = False
710
769
  serially = False
711
770
  sampling = False
712
771
  if size == 0:
713
772
  offloaded = True
714
- self.msg = self.msg.replace('{size}', '1')
773
+ msg = self.msg.replace('{size}', '1')
715
774
  elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
716
- self.msg = self.msg.replace('{size}', f'{size}')
775
+ msg = self.msg.replace('{size}', f'{size}')
717
776
  elif self.samples < sys.maxsize:
718
777
  sampling = True
719
- if self.samples > size:
720
- self.samples = size
721
- self.msg = self.msg.replace('{size}', f'{self.samples}/{size} sample')
778
+ samples = self.samples
779
+ if samples > size:
780
+ samples = size
781
+ msg = self.msg.replace('{size}', f'{samples}/{size} sample')
722
782
  else:
723
783
  serially = True
724
- self.msg = self.msg.replace('{size}', f'{size}')
725
- # return
784
+ msg = self.msg.replace('{size}', f'{size}')
726
785
 
727
- for token in self.msg.split(' '):
786
+ for token in msg.split(' '):
728
787
  if '|' in token:
729
788
  self.begin.append(token.split('|')[0])
730
789
  if not sampling and not serially and not offloaded:
@@ -741,5 +800,83 @@ class OffloadHandler(ParallelMapHandler):
741
800
  else:
742
801
  log2(f'{" ".join(self.begin)} with {self.workers} workers...')
743
802
 
744
- def offload(max_workers: int = 3, msg: str = None):
745
- return OffloadHandler(max_workers, msg = msg)
803
+ def offload(max_workers: int = 3, msg: str = None, name: str = None):
804
+ return OffloadHandler(max_workers, msg = msg, name = name)
805
+
806
+ def kaqing_log_file_name(suffix = 'log'):
807
+ return f"{log_dir()}/{datetime.now().strftime('%d%H%M%S')}.{suffix}"
808
+
809
+ def log_dir():
810
+ return creating_dir(ConfigHolder().config.get('log-dir', '/tmp/qing-db/q/logs'))
811
+
812
+ class LogFileHandler:
813
+ def __init__(self, suffix = 'log', condition=True):
814
+ self.suffix = suffix
815
+ self.condition = condition
816
+
817
+ def __enter__(self):
818
+ self.f = None
819
+ if self.condition:
820
+ self.f = open(kaqing_log_file_name(suffix=self.suffix), 'w')
821
+ self.f.__enter__()
822
+
823
+ return self.f
824
+
825
+ def __exit__(self, exc_type, exc_val, exc_tb):
826
+ if self.f:
827
+ self.f.__exit__(exc_type, exc_val, exc_tb)
828
+
829
+ if ConfigHolder().append_command_history:
830
+ ConfigHolder().append_command_history(f':cat {self.f.name}')
831
+
832
+ return False
833
+
834
+ def kaqing_log_file(suffix = 'log', condition=True):
835
+ return LogFileHandler(suffix = suffix, condition=condition)
836
+
837
+ class CommandLog:
838
+ log_file = None
839
+
840
+ def log(line: str, config: str = 'off'):
841
+ if config == 'file':
842
+ if not CommandLog.log_file:
843
+ try:
844
+ CommandLog.log_file = open(kaqing_log_file_name(suffix='cmd.log'), 'w')
845
+ except:
846
+ pass
847
+
848
+ try:
849
+ CommandLog.log_file.write(line + '\n')
850
+ except:
851
+ pass
852
+ elif config == 'on':
853
+ log2(line)
854
+
855
+ def close_log_file():
856
+ if CommandLog.log_file:
857
+ try:
858
+ CommandLog.log_file.close()
859
+ except:
860
+ pass
861
+
862
+ if ConfigHolder().append_command_history:
863
+ ConfigHolder().append_command_history(f':cat {CommandLog.log_file.name}')
864
+
865
+ CommandLog.log_file = None
866
+
867
+ class ExecResult(ABC):
868
+ def exit_code(self) -> int:
869
+ pass
870
+
871
+ def cat_log_file_cmd(self) -> str:
872
+ pass
873
+
874
+ _dirs_created = set()
875
+
876
+ def creating_dir(dir):
877
+ if dir not in _dirs_created:
878
+ _dirs_created.add(dir)
879
+ if not os.path.exists(dir):
880
+ os.makedirs(dir, exist_ok=True)
881
+
882
+ return dir
@@ -2,7 +2,7 @@ import sys
2
2
  from typing import TypeVar
3
3
 
4
4
  from adam.utils_k8s.app_pods import AppPods
5
- from adam.pod_exec_result import PodExecResult
5
+ from adam.utils_k8s.pod_exec_result import PodExecResult
6
6
  from adam.utils import log, log2
7
7
  from adam.utils_k8s.pods import Pods
8
8
  from .kube_context import KubeContext
@@ -11,8 +11,15 @@ T = TypeVar('T')
11
11
 
12
12
  # utility collection on app clusters; methods are all static
13
13
  class AppClusters:
14
- def exec(pods: list[str], namespace: str, command: str, action: str = 'action',
15
- max_workers=0, show_out=True, on_any = False, shell = '/bin/sh', backgrounded = False) -> list[PodExecResult]:
14
+ def exec(pods: list[str],
15
+ namespace: str,
16
+ command: str,
17
+ action: str = 'action',
18
+ max_workers=0,
19
+ show_out=True,
20
+ on_any = False,
21
+ shell = '/bin/sh',
22
+ backgrounded = False) -> list[PodExecResult]:
16
23
  samples = 1 if on_any else sys.maxsize
17
24
  msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
18
25
  with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
@@ -23,6 +30,6 @@ class AppClusters:
23
30
  if result.stdout:
24
31
  log(result.stdout)
25
32
  if result.stderr:
26
- log2(result.stderr, file=sys.stderr)
33
+ log2(result.stderr)
27
34
 
28
35
  return results
@@ -4,7 +4,7 @@ from kubernetes import client
4
4
 
5
5
  from adam.config import Config
6
6
  from adam.utils_k8s.pods import Pods
7
- from adam.pod_exec_result import PodExecResult
7
+ from adam.utils_k8s.pod_exec_result import PodExecResult
8
8
  from adam.repl_session import ReplSession
9
9
 
10
10
  # utility collection on app pods; methods are all static
@@ -25,12 +25,17 @@ class AppPods:
25
25
 
26
26
  return v1.list_namespaced_pod(namespace, label_selector=label_selector).items
27
27
 
28
- def exec(pod_name: str, namespace: str, command: str, show_out = True, throw_err = False, shell = '/bin/sh', backgrounded = False) -> PodExecResult:
28
+ def exec(pod_name: str,
29
+ namespace: str,
30
+ command: str,
31
+ show_out = True,
32
+ throw_err = False,
33
+ shell = '/bin/sh',
34
+ backgrounded = False) -> PodExecResult:
29
35
  container = Config().get('app.container-name', 'c3-server')
30
36
  r = Pods.exec(pod_name, container, namespace, command, show_out = show_out, throw_err = throw_err, shell = shell, backgrounded = backgrounded)
31
37
 
32
- if r and Config().get('repl.history.push-cat-remote-log-file', True):
33
- if r.log_file and ReplSession().prompt_session:
34
- ReplSession().prompt_session.history.append_string(f'@{r.pod} cat {r.log_file}')
38
+ if r and r.log_file:
39
+ ReplSession().append_history(f':cat {r.log_file}')
35
40
 
36
41
  return r
@@ -3,7 +3,7 @@ from typing import TypeVar
3
3
 
4
4
  from adam.config import Config
5
5
  from adam.utils_k8s.cassandra_nodes import CassandraNodes
6
- from adam.pod_exec_result import PodExecResult
6
+ from adam.utils_k8s.pod_exec_result import PodExecResult
7
7
  from adam.utils import log, log2
8
8
  from adam.utils_k8s.pods import Pods
9
9
  from adam.utils_k8s.statefulsets import StatefulSets
@@ -21,20 +21,24 @@ class CassandraClusters:
21
21
  on_any = False,
22
22
  shell = '/bin/sh',
23
23
  backgrounded = False,
24
- log_file = None) -> list[PodExecResult]:
24
+ log_file = None,
25
+ history=True) -> list[PodExecResult]:
25
26
 
26
27
  pods = StatefulSets.pod_names(sts, namespace)
27
28
  samples = 1 if on_any else sys.maxsize
29
+ if (backgrounded or command.endswith(' &')) and not log_file:
30
+ log_file = Pods.log_file(command)
31
+
28
32
  msg = 'd`Running|Ran ' + action + ' command onto {size} pods'
29
33
  with Pods.parallelize(pods, max_workers, samples, msg, action=action) as exec:
30
- results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell, backgrounded, log_file))
34
+ results: list[PodExecResult] = exec.map(lambda pod: CassandraNodes.exec(pod, namespace, command, False, False, shell, backgrounded, log_file, history))
31
35
  for result in results:
32
36
  if show_out and not Config().is_debug():
33
37
  log(result.command)
34
38
  if result.stdout:
35
39
  log(result.stdout)
36
40
  if result.stderr:
37
- log2(result.stderr, file=sys.stderr)
41
+ log2(result.stderr)
38
42
 
39
43
  return results
40
44