kaqing 2.0.145__py3-none-any.whl → 2.0.172__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaqing might be problematic. Click here for more details.

Files changed (172) hide show
  1. adam/__init__.py +0 -2
  2. adam/app_session.py +8 -11
  3. adam/batch.py +3 -3
  4. adam/checks/check_utils.py +14 -46
  5. adam/checks/cpu.py +7 -1
  6. adam/checks/cpu_metrics.py +52 -0
  7. adam/checks/disk.py +2 -3
  8. adam/columns/columns.py +3 -1
  9. adam/columns/cpu.py +3 -1
  10. adam/columns/cpu_metrics.py +22 -0
  11. adam/columns/memory.py +3 -4
  12. adam/commands/__init__.py +18 -0
  13. adam/commands/alter_tables.py +43 -47
  14. adam/commands/audit/audit.py +22 -23
  15. adam/commands/audit/audit_repair_tables.py +14 -17
  16. adam/commands/audit/audit_run.py +15 -23
  17. adam/commands/audit/show_last10.py +10 -13
  18. adam/commands/audit/show_slow10.py +10 -13
  19. adam/commands/audit/show_top10.py +10 -13
  20. adam/commands/audit/utils_show_top10.py +2 -3
  21. adam/commands/bash/__init__.py +5 -0
  22. adam/commands/bash/bash.py +7 -104
  23. adam/commands/bash/utils_bash.py +16 -0
  24. adam/commands/cat.py +13 -19
  25. adam/commands/cd.py +8 -10
  26. adam/commands/check.py +20 -21
  27. adam/commands/cli_commands.py +2 -3
  28. adam/commands/code.py +20 -23
  29. adam/commands/command.py +120 -39
  30. adam/commands/commands_utils.py +8 -17
  31. adam/commands/cp.py +33 -39
  32. adam/commands/cql/cql_completions.py +9 -4
  33. adam/commands/cql/cqlsh.py +10 -30
  34. adam/commands/cql/{cql_utils.py → utils_cql.py} +149 -15
  35. adam/commands/deploy/code_start.py +7 -10
  36. adam/commands/deploy/code_stop.py +4 -21
  37. adam/commands/deploy/code_utils.py +3 -3
  38. adam/commands/deploy/deploy.py +4 -27
  39. adam/commands/deploy/deploy_frontend.py +14 -17
  40. adam/commands/deploy/deploy_pg_agent.py +2 -5
  41. adam/commands/deploy/deploy_pod.py +64 -68
  42. adam/commands/deploy/undeploy.py +4 -27
  43. adam/commands/deploy/undeploy_frontend.py +4 -7
  44. adam/commands/deploy/undeploy_pg_agent.py +4 -7
  45. adam/commands/deploy/undeploy_pod.py +9 -12
  46. adam/commands/devices/device.py +93 -2
  47. adam/commands/devices/device_app.py +37 -10
  48. adam/commands/devices/device_auit_log.py +8 -2
  49. adam/commands/devices/device_cass.py +47 -7
  50. adam/commands/devices/device_export.py +2 -2
  51. adam/commands/devices/device_postgres.py +41 -6
  52. adam/commands/exit.py +1 -4
  53. adam/commands/export/clean_up_all_export_sessions.py +37 -0
  54. adam/commands/export/clean_up_export_sessions.py +18 -7
  55. adam/commands/export/drop_export_database.py +15 -18
  56. adam/commands/export/drop_export_databases.py +6 -9
  57. adam/commands/export/export.py +8 -38
  58. adam/commands/export/export_databases.py +16 -12
  59. adam/commands/export/export_handlers.py +71 -0
  60. adam/commands/export/export_select.py +33 -24
  61. adam/commands/export/export_use.py +12 -15
  62. adam/commands/export/exporter.py +37 -48
  63. adam/commands/export/import_session.py +4 -32
  64. adam/commands/export/importer_athena.py +4 -7
  65. adam/commands/export/importer_sqlite.py +19 -27
  66. adam/commands/export/show_column_counts.py +13 -22
  67. adam/commands/export/show_export_databases.py +3 -6
  68. adam/commands/export/show_export_session.py +10 -13
  69. adam/commands/export/show_export_sessions.py +8 -11
  70. adam/commands/export/utils_export.py +24 -1
  71. adam/commands/intermediate_command.py +49 -0
  72. adam/commands/issues.py +11 -43
  73. adam/commands/kubectl.py +3 -6
  74. adam/commands/login.py +22 -24
  75. adam/commands/logs.py +3 -6
  76. adam/commands/ls.py +8 -9
  77. adam/commands/medusa/medusa.py +4 -22
  78. adam/commands/medusa/medusa_backup.py +20 -24
  79. adam/commands/medusa/medusa_restore.py +29 -33
  80. adam/commands/medusa/medusa_show_backupjobs.py +14 -18
  81. adam/commands/medusa/medusa_show_restorejobs.py +11 -18
  82. adam/commands/nodetool.py +6 -15
  83. adam/commands/param_get.py +11 -12
  84. adam/commands/param_set.py +9 -10
  85. adam/commands/postgres/postgres.py +29 -37
  86. adam/commands/postgres/postgres_context.py +47 -23
  87. adam/commands/postgres/postgres_ls.py +4 -8
  88. adam/commands/postgres/postgres_preview.py +5 -9
  89. adam/commands/postgres/psql_completions.py +1 -1
  90. adam/commands/postgres/utils_postgres.py +66 -0
  91. adam/commands/preview_table.py +5 -44
  92. adam/commands/pwd.py +13 -16
  93. adam/commands/reaper/reaper.py +4 -27
  94. adam/commands/reaper/reaper_forward.py +48 -55
  95. adam/commands/reaper/reaper_forward_session.py +6 -0
  96. adam/commands/reaper/reaper_forward_stop.py +10 -16
  97. adam/commands/reaper/reaper_restart.py +7 -14
  98. adam/commands/reaper/reaper_run_abort.py +11 -30
  99. adam/commands/reaper/reaper_runs.py +42 -57
  100. adam/commands/reaper/reaper_runs_abort.py +29 -49
  101. adam/commands/reaper/reaper_schedule_activate.py +11 -30
  102. adam/commands/reaper/reaper_schedule_start.py +10 -29
  103. adam/commands/reaper/reaper_schedule_stop.py +10 -29
  104. adam/commands/reaper/reaper_schedules.py +4 -14
  105. adam/commands/reaper/reaper_status.py +8 -16
  106. adam/commands/reaper/utils_reaper.py +196 -0
  107. adam/commands/repair/repair.py +4 -22
  108. adam/commands/repair/repair_log.py +5 -11
  109. adam/commands/repair/repair_run.py +27 -34
  110. adam/commands/repair/repair_scan.py +32 -38
  111. adam/commands/repair/repair_stop.py +5 -11
  112. adam/commands/report.py +27 -29
  113. adam/commands/restart.py +25 -26
  114. adam/commands/rollout.py +19 -24
  115. adam/commands/shell.py +10 -4
  116. adam/commands/show/show.py +10 -26
  117. adam/commands/show/show_cassandra_repairs.py +35 -0
  118. adam/commands/show/show_cassandra_status.py +32 -43
  119. adam/commands/show/show_cassandra_version.py +5 -18
  120. adam/commands/show/show_commands.py +19 -24
  121. adam/commands/show/show_host.py +1 -1
  122. adam/commands/show/show_login.py +20 -27
  123. adam/commands/show/show_processes.py +15 -19
  124. adam/commands/show/show_storage.py +10 -20
  125. adam/commands/watch.py +26 -29
  126. adam/config.py +4 -16
  127. adam/embedded_params.py +1 -1
  128. adam/log.py +4 -4
  129. adam/pod_exec_result.py +3 -3
  130. adam/repl.py +29 -32
  131. adam/repl_commands.py +11 -11
  132. adam/repl_state.py +52 -26
  133. adam/sql/sql_completer.py +4 -6
  134. adam/sql/sql_state_machine.py +21 -14
  135. adam/sso/authn_ad.py +6 -8
  136. adam/sso/authn_okta.py +4 -6
  137. adam/sso/cred_cache.py +3 -5
  138. adam/sso/idp.py +9 -12
  139. adam/utils.py +393 -33
  140. adam/utils_athena.py +14 -13
  141. adam/utils_audits.py +12 -12
  142. adam/utils_issues.py +32 -0
  143. adam/utils_k8s/app_clusters.py +13 -18
  144. adam/utils_k8s/app_pods.py +2 -0
  145. adam/utils_k8s/cassandra_clusters.py +21 -18
  146. adam/utils_k8s/custom_resources.py +16 -17
  147. adam/utils_k8s/ingresses.py +2 -2
  148. adam/utils_k8s/jobs.py +7 -11
  149. adam/utils_k8s/k8s.py +87 -0
  150. adam/utils_k8s/pods.py +14 -76
  151. adam/utils_k8s/secrets.py +4 -4
  152. adam/utils_k8s/service_accounts.py +5 -4
  153. adam/utils_k8s/services.py +2 -2
  154. adam/utils_k8s/statefulsets.py +1 -12
  155. adam/utils_repl/state_machine.py +3 -3
  156. adam/utils_sqlite.py +78 -42
  157. adam/version.py +1 -1
  158. {kaqing-2.0.145.dist-info → kaqing-2.0.172.dist-info}/METADATA +1 -1
  159. kaqing-2.0.172.dist-info/RECORD +230 -0
  160. adam/commands/app.py +0 -67
  161. adam/commands/app_ping.py +0 -44
  162. adam/commands/export/clean_up_export_session.py +0 -53
  163. adam/commands/postgres/postgres_utils.py +0 -31
  164. adam/commands/reaper/reaper_session.py +0 -159
  165. adam/commands/show/show_app_actions.py +0 -56
  166. adam/commands/show/show_app_id.py +0 -47
  167. adam/commands/show/show_app_queues.py +0 -45
  168. adam/commands/show/show_repairs.py +0 -47
  169. kaqing-2.0.145.dist-info/RECORD +0 -227
  170. {kaqing-2.0.145.dist-info → kaqing-2.0.172.dist-info}/WHEEL +0 -0
  171. {kaqing-2.0.145.dist-info → kaqing-2.0.172.dist-info}/entry_points.txt +0 -0
  172. {kaqing-2.0.145.dist-info → kaqing-2.0.172.dist-info}/top_level.txt +0 -0
adam/sso/cred_cache.py CHANGED
@@ -1,9 +1,9 @@
1
1
  import os
2
2
  from pathlib import Path
3
- import traceback
4
3
  from dotenv import load_dotenv
5
4
 
6
5
  from adam.config import Config
6
+ from adam.utils import debug, log_exc
7
7
  from adam.utils_k8s.kube_context import KubeContext
8
8
 
9
9
  class CredCache:
@@ -34,10 +34,8 @@ class CredCache:
34
34
  def cache(self, username: str, password: str = None):
35
35
  if os.path.exists(self.env_f):
36
36
  with open(self.env_f, 'w') as file:
37
- try:
37
+ with log_exc():
38
38
  file.truncate()
39
- except:
40
- Config().debug(traceback.format_exc())
41
39
 
42
40
  updated = []
43
41
  updated.append(f'IDP_USERNAME={username}')
@@ -56,4 +54,4 @@ class CredCache:
56
54
  if password:
57
55
  self.overrides['IDP_PASSWORD'] = password
58
56
 
59
- Config().debug(f'Cached username: {username}, password: {password}, try load: {self.get_username()}')
57
+ debug(f'Cached username: {username}, password: {password}, try load: {self.get_username()}')
adam/sso/idp.py CHANGED
@@ -3,7 +3,6 @@ import getpass
3
3
  import os
4
4
  import sys
5
5
  import termios
6
- import traceback
7
6
  from typing import Callable, TypeVar
8
7
  import requests
9
8
  from kubernetes import config
@@ -15,7 +14,7 @@ from .cred_cache import CredCache
15
14
  from .idp_session import IdpSession
16
15
  from .idp_login import IdpLogin
17
16
  from adam.config import Config
18
- from adam.utils import log, log2
17
+ from adam.utils import debug, log, log_exc
19
18
 
20
19
  T = TypeVar('T')
21
20
 
@@ -31,6 +30,8 @@ class Idp:
31
30
  def login(app_host: str, username: str = None, idp_uri: str = None, forced = False, use_token_from_env = True, use_cached_creds = True, verify = True) -> IdpLogin:
32
31
  session: IdpSession = IdpSession.create(username, app_host, app_host, idp_uri=idp_uri)
33
32
 
33
+ debug(f'Idp.login({username})')
34
+
34
35
  if use_token_from_env:
35
36
  if l0 := session.login_from_env_var():
36
37
  return l0
@@ -39,11 +40,9 @@ class Idp:
39
40
  token_server = Config().get('app.login.token-server-url', 'http://localhost:{port}').replace('{port}', port)
40
41
  res: requests.Response = requests.get(token_server)
41
42
  if res.status_code == 200 and res.text:
42
- try:
43
+ with log_exc():
43
44
  # may fail if the idp token is not complete
44
45
  return session.login_from_token(res.text)
45
- except:
46
- pass
47
46
 
48
47
  r: IdpLogin = None
49
48
  try:
@@ -57,10 +56,11 @@ class Idp:
57
56
  default_user: str = None
58
57
  if use_cached_creds:
59
58
  default_user = CredCache().get_username()
60
- Config().debug(f'User read from cache: {default_user}')
59
+ debug(f'User read from cache: {default_user}')
61
60
 
62
- if from_env := os.getenv('USERNAME'):
63
- default_user = from_env
61
+ # no value in using USERNAME
62
+ # if from_env := os.getenv('USERNAME') and in_docker():
63
+ # default_user = from_env
64
64
  if default_user and default_user != username:
65
65
  session = IdpSession.create(default_user, app_host, app_host)
66
66
 
@@ -125,7 +125,7 @@ class Idp:
125
125
  termios.tcsetattr(fd, termios.TCSADRAIN, old)
126
126
 
127
127
  def try_kubeconfig(username: str, kubeconfig: str):
128
- try:
128
+ with log_exc():
129
129
  if kubeconfig[0] == '\t':
130
130
  kubeconfig = kubeconfig[1:]
131
131
  kubeconfig_string = base64.b64decode(kubeconfig.encode('ascii') + b'==').decode('utf-8')
@@ -136,8 +136,5 @@ class Idp:
136
136
  Secrets.list_secrets(os.getenv('NAMESPACE'))
137
137
 
138
138
  return IdpLogin(None, None, None, username)
139
- except:
140
- Config().debug(traceback.format_exc())
141
- pass
142
139
 
143
140
  return None
adam/utils.py CHANGED
@@ -1,3 +1,4 @@
1
+ from concurrent.futures import Future, ThreadPoolExecutor
1
2
  from contextlib import redirect_stdout
2
3
  import copy
3
4
  import csv
@@ -10,7 +11,8 @@ from pathlib import Path
10
11
  import random
11
12
  import string
12
13
  import threading
13
- from typing import Callable
14
+ import traceback
15
+ from typing import Callable, Iterator, TypeVar, Union
14
16
  from dateutil import parser
15
17
  import subprocess
16
18
  import sys
@@ -20,7 +22,12 @@ import yaml
20
22
 
21
23
  from . import __version__
22
24
 
23
- is_debug_holder = [lambda: False]
25
+ log_state = threading.local()
26
+
27
+ class LogConfig:
28
+ is_debug = lambda: False
29
+ is_debug_timing = lambda: False
30
+ is_display_help = True
24
31
 
25
32
  def to_tabular(lines: str, header: str = None, dashed_line = False):
26
33
  return lines_to_tabular(lines.split('\n'), header, dashed_line)
@@ -74,7 +81,7 @@ def epoch(timestamp_string: str):
74
81
 
75
82
  def log(s = None):
76
83
  if not loggable():
77
- return
84
+ return False
78
85
 
79
86
  # want to print empty line for False or empty collection
80
87
  if s == None:
@@ -82,15 +89,19 @@ def log(s = None):
82
89
  else:
83
90
  click.echo(s)
84
91
 
92
+ return True
93
+
85
94
  def log2(s = None, nl = True):
86
95
  if not loggable():
87
- return
96
+ return False
88
97
 
89
98
  if s:
90
99
  click.echo(s, err=True, nl=nl)
91
100
  else:
92
101
  print(file=sys.stderr)
93
102
 
103
+ return True
104
+
94
105
  def elapsed_time(start_time: float):
95
106
  end_time = time.time()
96
107
  elapsed_time = end_time - start_time
@@ -105,8 +116,8 @@ def duration(start_time: float, end_time: float = None):
105
116
  end_time = time.time()
106
117
  d = convert_seconds(end_time - start_time)
107
118
  t = []
108
- if d[0]:
109
- t.append(f'{d[0]}h')
119
+ if d:
120
+ t.append(f'{d}h')
110
121
  if t or d[1]:
111
122
  t.append(f'{d[1]}m')
112
123
  t.append(f'{d[2]}s')
@@ -169,6 +180,9 @@ def get_deep_keys(d, current_path=""):
169
180
  return keys
170
181
 
171
182
  def display_help(replace_arg = False):
183
+ if not LogConfig.is_display_help:
184
+ return
185
+
172
186
  args = copy.copy(sys.argv)
173
187
  if replace_arg:
174
188
  args[len(args) - 1] = '--help'
@@ -213,12 +227,13 @@ def json_to_csv(json_data: list[dict[any, any]], delimiter: str = ','):
213
227
  with redirect_stdout(body) as f:
214
228
  dict_writer = csv.DictWriter(f, keys, delimiter=delimiter)
215
229
  dict_writer.writerows(flattened_data)
230
+
216
231
  return header.getvalue().strip('\r\n'), [l.strip('\r') for l in body.getvalue().split('\n')]
217
232
  else:
218
233
  return None
219
234
 
220
235
  def log_to_file(config: dict[any, any]):
221
- try:
236
+ with log_exc():
222
237
  base = f"/kaqing/logs"
223
238
  os.makedirs(base, exist_ok=True)
224
239
 
@@ -233,8 +248,6 @@ def log_to_file(config: dict[any, any]):
233
248
  f.write(config)
234
249
  else:
235
250
  f.write(config)
236
- except:
237
- pass
238
251
 
239
252
  def copy_config_file(rel_path: str, module: str, suffix: str = '.yaml', show_out = True):
240
253
  dir = f'{Path.home()}/.kaqing'
@@ -255,31 +268,50 @@ def idp_token_from_env():
255
268
  def is_lambda(func):
256
269
  return callable(func) and hasattr(func, '__name__') and func.__name__ == '<lambda>'
257
270
 
258
- class Ing:
259
- state = threading.local()
271
+ def debug(s = None):
272
+ if LogConfig.is_debug():
273
+ log2(f'DEBUG {s}')
274
+
275
+ def debug_trace():
276
+ if LogConfig.is_debug():
277
+ log2(traceback.format_exc())
260
278
 
279
+ def in_docker() -> bool:
280
+ if os.path.exists('/.dockerenv'):
281
+ return True
282
+
283
+ try:
284
+ with open('/proc/1/cgroup', 'rt') as f:
285
+ for line in f:
286
+ if 'docker' in line or 'lxc' in line:
287
+ return True
288
+ except FileNotFoundError:
289
+ pass
290
+
291
+ return False
292
+
293
+ class Ing:
261
294
  def __init__(self, msg: str, suppress_log=False):
262
295
  self.msg = msg
263
296
  self.suppress_log = suppress_log
264
- self.nested = False
265
297
 
266
298
  def __enter__(self):
267
- if not hasattr(Ing.state, 'ing_cnt'):
268
- Ing.state.ing_cnt = 0
299
+ if not hasattr(log_state, 'ing_cnt'):
300
+ log_state.ing_cnt = 0
269
301
 
270
302
  try:
271
- if not Ing.state.ing_cnt:
272
- if not self.suppress_log and not is_debug_holder[0]():
303
+ if not log_state.ing_cnt:
304
+ if not self.suppress_log and not LogConfig.is_debug():
273
305
  log2(f'{self.msg}...', nl=False)
274
306
 
275
307
  return None
276
308
  finally:
277
- Ing.state.ing_cnt += 1
309
+ log_state.ing_cnt += 1
278
310
 
279
311
  def __exit__(self, exc_type, exc_val, exc_tb):
280
- Ing.state.ing_cnt -= 1
281
- if not Ing.state.ing_cnt:
282
- if not self.suppress_log and not is_debug_holder[0]():
312
+ log_state.ing_cnt -= 1
313
+ if not log_state.ing_cnt:
314
+ if not self.suppress_log and not LogConfig.is_debug():
283
315
  log2(' OK')
284
316
 
285
317
  return False
@@ -290,23 +322,351 @@ def ing(msg: str, body: Callable[[], None]=None, suppress_log=False):
290
322
 
291
323
  r = None
292
324
 
293
- if not hasattr(Ing.state, 'ing_cnt'):
294
- Ing.state.ing_cnt = 0
295
-
296
- if not Ing.state.ing_cnt:
297
- if not suppress_log and not is_debug_holder[0]():
298
- log2(f'{msg}...', nl=False)
299
-
300
- Ing.state.ing_cnt += 1
325
+ t = Ing(msg, suppress_log=suppress_log)
326
+ t.__enter__()
301
327
  try:
302
328
  r = body()
303
329
  finally:
304
- Ing.state.ing_cnt -= 1
305
- if not Ing.state.ing_cnt:
306
- if not suppress_log and not is_debug_holder[0]():
307
- log2(' OK')
330
+ t.__exit__(None, None, None)
308
331
 
309
332
  return r
310
333
 
311
334
  def loggable():
312
- return is_debug_holder[0]() or not hasattr(Ing.state, 'ing_cnt') or not Ing.state.ing_cnt
335
+ return LogConfig.is_debug() or not hasattr(log_state, 'ing_cnt') or not log_state.ing_cnt
336
+
337
+ class TimingNode:
338
+ def __init__(self, depth: int, s0: time.time = time.time(), line: str = None):
339
+ self.depth = depth
340
+ self.s0 = s0
341
+ self.line = line
342
+ self.children = []
343
+
344
+ def __str__(self):
345
+ return f'[{self.depth}: {self.line}, children={len(self.children)}]'
346
+
347
+ def tree(self):
348
+ lines = []
349
+ if self.line:
350
+ lines.append(self.line)
351
+
352
+ for child in self.children:
353
+ if child.line:
354
+ lines.append(child.tree())
355
+ return '\n'.join(lines)
356
+
357
+ class LogTiming:
358
+ def __init__(self, msg: str, s0: time.time = None):
359
+ self.msg = msg
360
+ self.s0 = s0
361
+
362
+ def __enter__(self):
363
+ if not LogConfig.is_debug_timing():
364
+ return
365
+
366
+ if not hasattr(log_state, 'timings'):
367
+ log_state.timings = TimingNode(0)
368
+
369
+ self.me = log_state.timings
370
+ log_state.timings = TimingNode(self.me.depth+1)
371
+ if not self.s0:
372
+ self.s0 = time.time()
373
+
374
+ def __exit__(self, exc_type, exc_val, exc_tb):
375
+ if not LogConfig.is_debug_timing():
376
+ return False
377
+
378
+ child = log_state.timings
379
+ log_state.timings.line = timing_log_line(self.me.depth, self.msg, self.s0)
380
+
381
+ if child and child.line:
382
+ self.me.children.append(child)
383
+ log_state.timings = self.me
384
+
385
+ if not self.me.depth:
386
+ log2(self.me.tree())
387
+ log_state.timings = TimingNode(0)
388
+
389
+ return False
390
+
391
+ def log_timing(msg: str, body: Callable[[], None]=None, s0: time.time = None):
392
+ if not s0 and not body:
393
+ return LogTiming(msg, s0=s0)
394
+
395
+ if not LogConfig.is_debug_timing():
396
+ if body:
397
+ return body()
398
+
399
+ return
400
+
401
+ r = None
402
+
403
+ t = LogTiming(msg, s0=s0)
404
+ t.__enter__()
405
+ try:
406
+ if body:
407
+ r = body()
408
+ finally:
409
+ t.__exit__(None, None, None)
410
+
411
+ return r
412
+
413
+ def timing_log_line(depth: int, msg: str, s0: time.time):
414
+ elapsed = time.time() - s0
415
+ prefix = '[timings] '
416
+ if depth:
417
+ if elapsed > 0.01:
418
+ prefix = (' ' * (depth-1)) + '* '
419
+ else:
420
+ prefix = ' ' * depth
421
+
422
+ return f'{prefix}{msg}: {elapsed:.2f} sec'
423
+
424
+ class WaitLog:
425
+ wait_log_flag = False
426
+
427
+ def wait_log(msg: str):
428
+ if not WaitLog.wait_log_flag:
429
+ log2(msg)
430
+ WaitLog.wait_log_flag = True
431
+
432
+ def clear_wait_log_flag():
433
+ WaitLog.wait_log_flag = False
434
+
435
+ class LogTrace:
436
+ def __init__(self, err_msg: Union[str, callable, bool] = None):
437
+ self.err_msg = err_msg
438
+
439
+ def __enter__(self):
440
+ return None
441
+
442
+ def __exit__(self, exc_type, exc_val, exc_tb):
443
+ if exc_type is not None:
444
+ if self.err_msg is True:
445
+ log2(str(exc_val))
446
+ elif callable(self.err_msg):
447
+ log2(self.err_msg(exc_val))
448
+ elif self.err_msg is not False and self.err_msg:
449
+ log2(self.err_msg)
450
+
451
+ if self.err_msg is not False and LogConfig.is_debug():
452
+ traceback.print_exception(exc_type, exc_val, exc_tb, file=sys.stderr)
453
+
454
+ # swallow exception
455
+ return True
456
+
457
+ def log_exc(err_msg: Union[str, callable, bool] = None):
458
+ return LogTrace(err_msg=err_msg)
459
+
460
+ T = TypeVar('T')
461
+
462
+ class ParallelService:
463
+ def __init__(self, handler: 'ParallelMapHandler'):
464
+ self.handler = handler
465
+
466
+ def map(self, fn: Callable[..., T]) -> Iterator[T]:
467
+ executor = self.handler.executor
468
+ collection = self.handler.collection
469
+ collect = self.handler.collect
470
+ samples_cnt = self.handler.samples
471
+
472
+ iterator = None
473
+ if executor:
474
+ iterator = executor.map(fn, collection)
475
+ elif samples_cnt < sys.maxsize:
476
+ samples = []
477
+
478
+ for elem in collection:
479
+ if not samples_cnt:
480
+ break
481
+
482
+ samples.append(fn(elem))
483
+ samples_cnt -= 1
484
+
485
+ iterator = iter(samples)
486
+ else:
487
+ iterator = map(fn, collection)
488
+
489
+ if collect:
490
+ return list(iterator)
491
+ else:
492
+ return iterator
493
+
494
+ class ParallelMapHandler:
495
+ def __init__(self, collection: list, max_workers: int, samples: int = sys.maxsize, msg: str = None, collect = True):
496
+ self.collection = collection
497
+ self.max_workers = max_workers
498
+ self.executor = None
499
+ self.samples = samples
500
+ self.msg = msg
501
+ if msg and msg.startswith('d`'):
502
+ if LogConfig.is_debug():
503
+ self.msg = msg.replace('d`', '', 1)
504
+ else:
505
+ self.msg = None
506
+ self.collect = collect
507
+
508
+ self.begin = []
509
+ self.end = []
510
+ self.start_time = None
511
+
512
+ def __enter__(self):
513
+ self.calc_msgs()
514
+
515
+ if self.max_workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
516
+ self.start_time = time.time()
517
+
518
+ self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
519
+ self.executor.__enter__()
520
+
521
+ return ParallelService(self)
522
+
523
+ def __exit__(self, exc_type, exc_val, exc_tb):
524
+ if self.executor:
525
+ self.executor.__exit__(exc_type, exc_val, exc_tb)
526
+
527
+ if self.end:
528
+ log2(f'{" ".join(self.end)} in {elapsed_time(self.start_time)}.')
529
+
530
+ return False
531
+
532
+ def size(self):
533
+ if not self.collection:
534
+ return 0
535
+
536
+ return len(self.collection)
537
+
538
+ def calc_msgs(self):
539
+ if not self.msg:
540
+ return
541
+
542
+ size = self.size()
543
+ # return
544
+
545
+ offloaded = False
546
+ serially = False
547
+ sampling = False
548
+ if size == 0:
549
+ offloaded = True
550
+ self.msg = self.msg.replace('{size}', '1')
551
+ elif self.max_workers > 1 and size > 1 and self.samples == sys.maxsize:
552
+ self.msg = self.msg.replace('{size}', f'{size}')
553
+ elif self.samples < sys.maxsize:
554
+ sampling = True
555
+ if self.samples > size:
556
+ self.samples = size
557
+ self.msg = self.msg.replace('{size}', f'{self.samples}/{size} sample')
558
+ else:
559
+ serially = True
560
+ self.msg = self.msg.replace('{size}', f'{size}')
561
+ # return
562
+
563
+ for token in self.msg.split(' '):
564
+ if '|' in token:
565
+ self.begin.append(token.split('|')[0])
566
+ if not sampling and not serially and not offloaded:
567
+ self.end.append(token.split('|')[1])
568
+ else:
569
+ self.begin.append(token)
570
+ if not sampling and not serially and not offloaded:
571
+ self.end.append(token)
572
+
573
+ if offloaded:
574
+ log2(f'{" ".join(self.begin)} offloaded...')
575
+ elif sampling or serially:
576
+ log2(f'{" ".join(self.begin)} serially...')
577
+ else:
578
+ log2(f'{" ".join(self.begin)} with {self.max_workers} workers...')
579
+
580
+ class OffloadService:
581
+ def __init__(self, handler: 'OffloadHandler'):
582
+ self.handler = handler
583
+
584
+ def submit(self, fn: Callable[..., T], /, *args, **kwargs) -> Future[T]:
585
+ executor = self.handler.executor
586
+
587
+ if executor:
588
+ return executor.submit(fn, *args, **kwargs)
589
+ else:
590
+ future = Future()
591
+
592
+ future.set_result(fn(*args, **kwargs))
593
+
594
+ return future
595
+
596
+ class OffloadHandler(ParallelMapHandler):
597
+ def __init__(self, max_workers: int, msg: str = None):
598
+ super().__init__(None, max_workers, msg=msg, collect=False )
599
+
600
+ def __enter__(self):
601
+ self.calc_msgs()
602
+
603
+ if self.max_workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
604
+ self.start_time = time.time()
605
+
606
+ self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
607
+ self.executor.__enter__()
608
+
609
+ return OffloadService(self)
610
+
611
+ def __exit__(self, exc_type, exc_val, exc_tb):
612
+ if self.executor:
613
+ self.executor.__exit__(exc_type, exc_val, exc_tb)
614
+
615
+ if self.end:
616
+ log2(f'{" ".join(self.end)} in {elapsed_time(self.start_time)}.')
617
+
618
+ return False
619
+
620
+ def size(self):
621
+ if not self.collection:
622
+ return 0
623
+
624
+ return len(self.collection)
625
+
626
+ def calc_msgs(self):
627
+ if not self.msg:
628
+ return
629
+
630
+ size = self.size()
631
+ # return
632
+
633
+ offloaded = False
634
+ serially = False
635
+ sampling = False
636
+ if size == 0:
637
+ offloaded = True
638
+ self.msg = self.msg.replace('{size}', '1')
639
+ elif self.max_workers > 1 and size > 1 and self.samples == sys.maxsize:
640
+ self.msg = self.msg.replace('{size}', f'{size}')
641
+ elif self.samples < sys.maxsize:
642
+ sampling = True
643
+ if self.samples > size:
644
+ self.samples = size
645
+ self.msg = self.msg.replace('{size}', f'{self.samples}/{size} sample')
646
+ else:
647
+ serially = True
648
+ self.msg = self.msg.replace('{size}', f'{size}')
649
+ # return
650
+
651
+ for token in self.msg.split(' '):
652
+ if '|' in token:
653
+ self.begin.append(token.split('|')[0])
654
+ if not sampling and not serially and not offloaded:
655
+ self.end.append(token.split('|')[1])
656
+ else:
657
+ self.begin.append(token)
658
+ if not sampling and not serially and not offloaded:
659
+ self.end.append(token)
660
+
661
+ if offloaded:
662
+ log2(f'{" ".join(self.begin)} offloaded...')
663
+ elif sampling or serially:
664
+ log2(f'{" ".join(self.begin)} serially...')
665
+ else:
666
+ log2(f'{" ".join(self.begin)} with {self.max_workers} workers...')
667
+
668
+ def parallelize(collection: list, max_workers: int = 0, samples = sys.maxsize, msg: str = None, collect = True):
669
+ return ParallelMapHandler(collection, max_workers, samples = samples, msg = msg, collect = collect)
670
+
671
+ def offload(max_workers: int = 3, msg: str = None):
672
+ return OffloadHandler(max_workers, msg = msg)