kaqing 2.0.145__py3-none-any.whl → 2.0.189__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaqing might be problematic. Click here for more details.
- adam/__init__.py +0 -2
- adam/app_session.py +9 -12
- adam/apps.py +18 -4
- adam/batch.py +4 -4
- adam/checks/check_utils.py +16 -46
- adam/checks/cpu.py +7 -1
- adam/checks/cpu_metrics.py +52 -0
- adam/checks/disk.py +2 -3
- adam/columns/columns.py +3 -1
- adam/columns/cpu.py +3 -1
- adam/columns/cpu_metrics.py +22 -0
- adam/columns/memory.py +3 -4
- adam/commands/__init__.py +24 -0
- adam/commands/alter_tables.py +33 -48
- adam/commands/app/__init__.py +0 -0
- adam/commands/app/app.py +38 -0
- adam/commands/{app_ping.py → app/app_ping.py} +7 -13
- adam/commands/app/show_app_actions.py +49 -0
- adam/commands/{show → app}/show_app_id.py +8 -11
- adam/commands/{show → app}/show_app_queues.py +7 -14
- adam/commands/app/utils_app.py +106 -0
- adam/commands/audit/audit.py +21 -40
- adam/commands/audit/audit_repair_tables.py +14 -19
- adam/commands/audit/audit_run.py +14 -22
- adam/commands/audit/completions_l.py +15 -0
- adam/commands/audit/show_last10.py +4 -19
- adam/commands/audit/show_slow10.py +4 -18
- adam/commands/audit/show_top10.py +4 -16
- adam/commands/audit/utils_show_top10.py +15 -3
- adam/commands/bash/__init__.py +5 -0
- adam/commands/bash/bash.py +7 -104
- adam/commands/bash/utils_bash.py +16 -0
- adam/commands/cat.py +7 -27
- adam/commands/cd.py +7 -11
- adam/commands/check.py +15 -24
- adam/commands/cli_commands.py +8 -4
- adam/commands/clipboard_copy.py +87 -0
- adam/commands/code.py +21 -24
- adam/commands/command.py +207 -42
- adam/commands/commands_utils.py +25 -27
- adam/commands/cql/completions_c.py +28 -0
- adam/commands/cql/cqlsh.py +9 -33
- adam/commands/cql/{cql_utils.py → utils_cql.py} +111 -15
- adam/commands/deploy/code_start.py +7 -10
- adam/commands/deploy/code_stop.py +4 -21
- adam/commands/deploy/code_utils.py +3 -3
- adam/commands/deploy/deploy.py +4 -27
- adam/commands/deploy/deploy_frontend.py +14 -17
- adam/commands/deploy/deploy_pg_agent.py +3 -6
- adam/commands/deploy/deploy_pod.py +64 -68
- adam/commands/deploy/undeploy.py +4 -27
- adam/commands/deploy/undeploy_frontend.py +4 -7
- adam/commands/deploy/undeploy_pg_agent.py +5 -8
- adam/commands/deploy/undeploy_pod.py +9 -12
- adam/commands/devices/device.py +124 -2
- adam/commands/devices/device_app.py +41 -24
- adam/commands/devices/device_auit_log.py +10 -4
- adam/commands/devices/device_cass.py +48 -14
- adam/commands/devices/device_export.py +13 -12
- adam/commands/devices/device_postgres.py +105 -54
- adam/commands/download_file.py +47 -0
- adam/commands/exit.py +1 -4
- adam/commands/export/clean_up_all_export_sessions.py +37 -0
- adam/commands/export/clean_up_export_sessions.py +9 -10
- adam/commands/export/completions_x.py +11 -0
- adam/commands/export/download_export_session.py +40 -0
- adam/commands/export/drop_export_database.py +7 -26
- adam/commands/export/drop_export_databases.py +5 -14
- adam/commands/export/export.py +6 -52
- adam/commands/export/export_databases.py +108 -32
- adam/commands/export/export_select.py +8 -59
- adam/commands/export/export_sessions.py +209 -0
- adam/commands/export/export_use.py +14 -20
- adam/commands/export/export_x_select.py +48 -0
- adam/commands/export/exporter.py +135 -167
- adam/commands/export/import_files.py +44 -0
- adam/commands/export/import_session.py +11 -35
- adam/commands/export/importer.py +19 -5
- adam/commands/export/importer_athena.py +112 -44
- adam/commands/export/importer_sqlite.py +42 -22
- adam/commands/export/show_column_counts.py +13 -31
- adam/commands/export/show_export_databases.py +7 -7
- adam/commands/export/show_export_session.py +8 -20
- adam/commands/export/show_export_sessions.py +6 -16
- adam/commands/export/utils_export.py +64 -11
- adam/commands/find_files.py +51 -0
- adam/commands/find_processes.py +76 -0
- adam/commands/head.py +36 -0
- adam/commands/help.py +2 -2
- adam/commands/intermediate_command.py +52 -0
- adam/commands/issues.py +11 -43
- adam/commands/kubectl.py +3 -6
- adam/commands/login.py +22 -24
- adam/commands/logs.py +3 -6
- adam/commands/ls.py +9 -10
- adam/commands/medusa/medusa.py +4 -22
- adam/commands/medusa/medusa_backup.py +20 -27
- adam/commands/medusa/medusa_restore.py +49 -46
- adam/commands/medusa/medusa_show_backupjobs.py +16 -18
- adam/commands/medusa/medusa_show_restorejobs.py +13 -18
- adam/commands/medusa/utils_medusa.py +15 -0
- adam/commands/nodetool.py +7 -21
- adam/commands/param_get.py +11 -14
- adam/commands/param_set.py +8 -12
- adam/commands/postgres/completions_p.py +22 -0
- adam/commands/postgres/postgres.py +34 -57
- adam/commands/postgres/postgres_databases.py +270 -0
- adam/commands/postgres/postgres_ls.py +4 -8
- adam/commands/postgres/postgres_preview.py +5 -9
- adam/commands/postgres/utils_postgres.py +79 -0
- adam/commands/preview_table.py +8 -45
- adam/commands/pwd.py +13 -16
- adam/commands/reaper/reaper.py +4 -27
- adam/commands/reaper/reaper_forward.py +49 -56
- adam/commands/reaper/reaper_forward_session.py +6 -0
- adam/commands/reaper/reaper_forward_stop.py +10 -16
- adam/commands/reaper/reaper_restart.py +7 -14
- adam/commands/reaper/reaper_run_abort.py +8 -33
- adam/commands/reaper/reaper_runs.py +43 -58
- adam/commands/reaper/reaper_runs_abort.py +29 -49
- adam/commands/reaper/reaper_schedule_activate.py +14 -33
- adam/commands/reaper/reaper_schedule_start.py +9 -33
- adam/commands/reaper/reaper_schedule_stop.py +9 -33
- adam/commands/reaper/reaper_schedules.py +4 -14
- adam/commands/reaper/reaper_status.py +8 -16
- adam/commands/reaper/utils_reaper.py +203 -0
- adam/commands/repair/repair.py +4 -22
- adam/commands/repair/repair_log.py +5 -11
- adam/commands/repair/repair_run.py +27 -34
- adam/commands/repair/repair_scan.py +32 -40
- adam/commands/repair/repair_stop.py +5 -12
- adam/commands/report.py +27 -29
- adam/commands/restart.py +25 -26
- adam/commands/rollout.py +19 -24
- adam/commands/shell.py +12 -4
- adam/commands/show/show.py +11 -27
- adam/commands/show/show_adam.py +3 -3
- adam/commands/show/show_cassandra_repairs.py +37 -0
- adam/commands/show/show_cassandra_status.py +47 -51
- adam/commands/show/show_cassandra_version.py +5 -18
- adam/commands/show/show_cli_commands.py +56 -0
- adam/commands/show/show_host.py +1 -1
- adam/commands/show/show_login.py +20 -27
- adam/commands/show/show_params.py +2 -5
- adam/commands/show/show_processes.py +18 -21
- adam/commands/show/show_storage.py +11 -20
- adam/commands/watch.py +26 -29
- adam/config.py +5 -16
- adam/embedded_params.py +1 -1
- adam/log.py +4 -4
- adam/pod_exec_result.py +3 -3
- adam/repl.py +45 -39
- adam/repl_commands.py +26 -19
- adam/repl_session.py +8 -1
- adam/repl_state.py +85 -36
- adam/sql/lark_completer.py +284 -0
- adam/sql/lark_parser.py +604 -0
- adam/sql/sql_completer.py +4 -6
- adam/sql/sql_state_machine.py +29 -16
- adam/sso/authn_ad.py +6 -8
- adam/sso/authn_okta.py +4 -6
- adam/sso/cred_cache.py +3 -5
- adam/sso/idp.py +9 -12
- adam/utils.py +484 -37
- adam/utils_athena.py +19 -19
- adam/utils_audits.py +12 -12
- adam/utils_issues.py +32 -0
- adam/utils_k8s/app_clusters.py +14 -19
- adam/utils_k8s/app_pods.py +7 -2
- adam/utils_k8s/cassandra_clusters.py +30 -19
- adam/utils_k8s/cassandra_nodes.py +2 -2
- adam/utils_k8s/custom_resources.py +16 -17
- adam/utils_k8s/ingresses.py +2 -2
- adam/utils_k8s/jobs.py +7 -11
- adam/utils_k8s/k8s.py +96 -0
- adam/utils_k8s/kube_context.py +2 -2
- adam/utils_k8s/pods.py +37 -81
- adam/utils_k8s/secrets.py +4 -4
- adam/utils_k8s/service_accounts.py +5 -4
- adam/utils_k8s/services.py +2 -2
- adam/utils_k8s/statefulsets.py +6 -14
- adam/utils_local.py +4 -0
- adam/utils_repl/appendable_completer.py +6 -0
- adam/utils_repl/repl_completer.py +128 -2
- adam/utils_repl/state_machine.py +3 -3
- adam/utils_sqlite.py +78 -42
- adam/version.py +1 -1
- {kaqing-2.0.145.dist-info → kaqing-2.0.189.dist-info}/METADATA +1 -1
- kaqing-2.0.189.dist-info/RECORD +253 -0
- kaqing-2.0.189.dist-info/top_level.txt +2 -0
- teddy/__init__.py +0 -0
- teddy/lark_parser.py +436 -0
- teddy/lark_parser2.py +618 -0
- adam/commands/app.py +0 -67
- adam/commands/cp.py +0 -95
- adam/commands/cql/cql_completions.py +0 -28
- adam/commands/export/clean_up_export_session.py +0 -53
- adam/commands/export/export_select_x.py +0 -54
- adam/commands/postgres/postgres_context.py +0 -248
- adam/commands/postgres/postgres_utils.py +0 -31
- adam/commands/postgres/psql_completions.py +0 -10
- adam/commands/reaper/reaper_session.py +0 -159
- adam/commands/show/show_app_actions.py +0 -56
- adam/commands/show/show_commands.py +0 -61
- adam/commands/show/show_repairs.py +0 -47
- kaqing-2.0.145.dist-info/RECORD +0 -227
- kaqing-2.0.145.dist-info/top_level.txt +0 -1
- {kaqing-2.0.145.dist-info → kaqing-2.0.189.dist-info}/WHEEL +0 -0
- {kaqing-2.0.145.dist-info → kaqing-2.0.189.dist-info}/entry_points.txt +0 -0
adam/utils.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from concurrent.futures import Future, ThreadPoolExecutor
|
|
1
2
|
from contextlib import redirect_stdout
|
|
2
3
|
import copy
|
|
3
4
|
import csv
|
|
@@ -10,7 +11,8 @@ from pathlib import Path
|
|
|
10
11
|
import random
|
|
11
12
|
import string
|
|
12
13
|
import threading
|
|
13
|
-
|
|
14
|
+
import traceback
|
|
15
|
+
from typing import Callable, Iterator, TypeVar, Union
|
|
14
16
|
from dateutil import parser
|
|
15
17
|
import subprocess
|
|
16
18
|
import sys
|
|
@@ -18,14 +20,33 @@ import time
|
|
|
18
20
|
import click
|
|
19
21
|
import yaml
|
|
20
22
|
|
|
23
|
+
from prompt_toolkit.completion import Completer
|
|
24
|
+
|
|
21
25
|
from . import __version__
|
|
22
26
|
|
|
23
|
-
|
|
27
|
+
T = TypeVar('T')
|
|
28
|
+
|
|
29
|
+
log_state = threading.local()
|
|
30
|
+
|
|
31
|
+
class LogConfig:
|
|
32
|
+
is_debug = lambda: False
|
|
33
|
+
is_debug_timing = lambda: False
|
|
34
|
+
is_debug_complete = lambda: False
|
|
35
|
+
is_display_help = True
|
|
36
|
+
|
|
37
|
+
NO_SORT = 0
|
|
38
|
+
SORT = 1
|
|
39
|
+
REVERSE_SORT = -1
|
|
24
40
|
|
|
25
|
-
def
|
|
26
|
-
|
|
41
|
+
def tabulize(lines: list[T], fn: Callable[..., T] = None, header: str = None, dashed_line = False, separator = ' ', to: int = 1, sorted: int = NO_SORT):
|
|
42
|
+
if fn:
|
|
43
|
+
lines = list(map(fn, lines))
|
|
44
|
+
|
|
45
|
+
if sorted == SORT:
|
|
46
|
+
lines.sort()
|
|
47
|
+
elif sorted == REVERSE_SORT:
|
|
48
|
+
lines.sort(reverse=True)
|
|
27
49
|
|
|
28
|
-
def lines_to_tabular(lines: list[str], header: str = None, dashed_line = False, separator = ' '):
|
|
29
50
|
maxes = []
|
|
30
51
|
nls = []
|
|
31
52
|
|
|
@@ -56,7 +77,14 @@ def lines_to_tabular(lines: list[str], header: str = None, dashed_line = False,
|
|
|
56
77
|
for line in lines:
|
|
57
78
|
format_line(line)
|
|
58
79
|
|
|
59
|
-
|
|
80
|
+
table = '\n'.join(nls)
|
|
81
|
+
|
|
82
|
+
if to == 1:
|
|
83
|
+
log(table)
|
|
84
|
+
elif to == 2:
|
|
85
|
+
log2(table)
|
|
86
|
+
|
|
87
|
+
return table
|
|
60
88
|
|
|
61
89
|
def convert_seconds(total_seconds_float):
|
|
62
90
|
total_seconds_int = int(total_seconds_float) # Convert float to integer seconds
|
|
@@ -74,7 +102,7 @@ def epoch(timestamp_string: str):
|
|
|
74
102
|
|
|
75
103
|
def log(s = None):
|
|
76
104
|
if not loggable():
|
|
77
|
-
return
|
|
105
|
+
return False
|
|
78
106
|
|
|
79
107
|
# want to print empty line for False or empty collection
|
|
80
108
|
if s == None:
|
|
@@ -82,15 +110,19 @@ def log(s = None):
|
|
|
82
110
|
else:
|
|
83
111
|
click.echo(s)
|
|
84
112
|
|
|
113
|
+
return True
|
|
114
|
+
|
|
85
115
|
def log2(s = None, nl = True):
|
|
86
116
|
if not loggable():
|
|
87
|
-
return
|
|
117
|
+
return False
|
|
88
118
|
|
|
89
119
|
if s:
|
|
90
120
|
click.echo(s, err=True, nl=nl)
|
|
91
121
|
else:
|
|
92
122
|
print(file=sys.stderr)
|
|
93
123
|
|
|
124
|
+
return True
|
|
125
|
+
|
|
94
126
|
def elapsed_time(start_time: float):
|
|
95
127
|
end_time = time.time()
|
|
96
128
|
elapsed_time = end_time - start_time
|
|
@@ -105,7 +137,7 @@ def duration(start_time: float, end_time: float = None):
|
|
|
105
137
|
end_time = time.time()
|
|
106
138
|
d = convert_seconds(end_time - start_time)
|
|
107
139
|
t = []
|
|
108
|
-
if d
|
|
140
|
+
if d:
|
|
109
141
|
t.append(f'{d[0]}h')
|
|
110
142
|
if t or d[1]:
|
|
111
143
|
t.append(f'{d[1]}m')
|
|
@@ -131,7 +163,10 @@ def deep_merge_dicts(dict1, dict2):
|
|
|
131
163
|
merged_dict[key] = deep_merge_dicts(merged_dict[key], value)
|
|
132
164
|
elif key not in merged_dict or value:
|
|
133
165
|
# Otherwise, overwrite or add the value from dict2
|
|
134
|
-
merged_dict[key]
|
|
166
|
+
if key in merged_dict and isinstance(merged_dict[key], Completer):
|
|
167
|
+
print('SEAN completer found, ignoring', key, value)
|
|
168
|
+
else:
|
|
169
|
+
merged_dict[key] = value
|
|
135
170
|
return merged_dict
|
|
136
171
|
|
|
137
172
|
def deep_sort_dict(d):
|
|
@@ -169,6 +204,9 @@ def get_deep_keys(d, current_path=""):
|
|
|
169
204
|
return keys
|
|
170
205
|
|
|
171
206
|
def display_help(replace_arg = False):
|
|
207
|
+
if not LogConfig.is_display_help:
|
|
208
|
+
return
|
|
209
|
+
|
|
172
210
|
args = copy.copy(sys.argv)
|
|
173
211
|
if replace_arg:
|
|
174
212
|
args[len(args) - 1] = '--help'
|
|
@@ -213,12 +251,13 @@ def json_to_csv(json_data: list[dict[any, any]], delimiter: str = ','):
|
|
|
213
251
|
with redirect_stdout(body) as f:
|
|
214
252
|
dict_writer = csv.DictWriter(f, keys, delimiter=delimiter)
|
|
215
253
|
dict_writer.writerows(flattened_data)
|
|
254
|
+
|
|
216
255
|
return header.getvalue().strip('\r\n'), [l.strip('\r') for l in body.getvalue().split('\n')]
|
|
217
256
|
else:
|
|
218
257
|
return None
|
|
219
258
|
|
|
220
259
|
def log_to_file(config: dict[any, any]):
|
|
221
|
-
|
|
260
|
+
with log_exc():
|
|
222
261
|
base = f"/kaqing/logs"
|
|
223
262
|
os.makedirs(base, exist_ok=True)
|
|
224
263
|
|
|
@@ -233,8 +272,6 @@ def log_to_file(config: dict[any, any]):
|
|
|
233
272
|
f.write(config)
|
|
234
273
|
else:
|
|
235
274
|
f.write(config)
|
|
236
|
-
except:
|
|
237
|
-
pass
|
|
238
275
|
|
|
239
276
|
def copy_config_file(rel_path: str, module: str, suffix: str = '.yaml', show_out = True):
|
|
240
277
|
dir = f'{Path.home()}/.kaqing'
|
|
@@ -255,31 +292,54 @@ def idp_token_from_env():
|
|
|
255
292
|
def is_lambda(func):
|
|
256
293
|
return callable(func) and hasattr(func, '__name__') and func.__name__ == '<lambda>'
|
|
257
294
|
|
|
258
|
-
|
|
259
|
-
|
|
295
|
+
def debug(s = None):
|
|
296
|
+
if LogConfig.is_debug():
|
|
297
|
+
log2(f'DEBUG {s}')
|
|
298
|
+
|
|
299
|
+
def debug_complete(s = None):
|
|
300
|
+
if LogConfig.is_debug_complete():
|
|
301
|
+
log2(f'DEBUG {s}')
|
|
302
|
+
|
|
303
|
+
def debug_trace():
|
|
304
|
+
if LogConfig.is_debug():
|
|
305
|
+
log2(traceback.format_exc())
|
|
306
|
+
|
|
307
|
+
def in_docker() -> bool:
|
|
308
|
+
if os.path.exists('/.dockerenv'):
|
|
309
|
+
return True
|
|
260
310
|
|
|
311
|
+
try:
|
|
312
|
+
with open('/proc/1/cgroup', 'rt') as f:
|
|
313
|
+
for line in f:
|
|
314
|
+
if 'docker' in line or 'lxc' in line:
|
|
315
|
+
return True
|
|
316
|
+
except FileNotFoundError:
|
|
317
|
+
pass
|
|
318
|
+
|
|
319
|
+
return False
|
|
320
|
+
|
|
321
|
+
class Ing:
|
|
261
322
|
def __init__(self, msg: str, suppress_log=False):
|
|
262
323
|
self.msg = msg
|
|
263
324
|
self.suppress_log = suppress_log
|
|
264
|
-
self.nested = False
|
|
265
325
|
|
|
266
326
|
def __enter__(self):
|
|
267
|
-
if not hasattr(
|
|
268
|
-
|
|
327
|
+
if not hasattr(log_state, 'ing_cnt'):
|
|
328
|
+
log_state.ing_cnt = 0
|
|
269
329
|
|
|
270
330
|
try:
|
|
271
|
-
if not
|
|
272
|
-
if not self.suppress_log and not
|
|
331
|
+
if not log_state.ing_cnt:
|
|
332
|
+
if not self.suppress_log and not LogConfig.is_debug():
|
|
273
333
|
log2(f'{self.msg}...', nl=False)
|
|
274
334
|
|
|
275
335
|
return None
|
|
276
336
|
finally:
|
|
277
|
-
|
|
337
|
+
log_state.ing_cnt += 1
|
|
278
338
|
|
|
279
339
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
280
|
-
|
|
281
|
-
if not
|
|
282
|
-
if not self.suppress_log and not
|
|
340
|
+
log_state.ing_cnt -= 1
|
|
341
|
+
if not log_state.ing_cnt:
|
|
342
|
+
if not self.suppress_log and not LogConfig.is_debug():
|
|
283
343
|
log2(' OK')
|
|
284
344
|
|
|
285
345
|
return False
|
|
@@ -290,23 +350,410 @@ def ing(msg: str, body: Callable[[], None]=None, suppress_log=False):
|
|
|
290
350
|
|
|
291
351
|
r = None
|
|
292
352
|
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
if not Ing.state.ing_cnt:
|
|
297
|
-
if not suppress_log and not is_debug_holder[0]():
|
|
298
|
-
log2(f'{msg}...', nl=False)
|
|
299
|
-
|
|
300
|
-
Ing.state.ing_cnt += 1
|
|
353
|
+
t = Ing(msg, suppress_log=suppress_log)
|
|
354
|
+
t.__enter__()
|
|
301
355
|
try:
|
|
302
356
|
r = body()
|
|
303
357
|
finally:
|
|
304
|
-
|
|
305
|
-
if not Ing.state.ing_cnt:
|
|
306
|
-
if not suppress_log and not is_debug_holder[0]():
|
|
307
|
-
log2(' OK')
|
|
358
|
+
t.__exit__(None, None, None)
|
|
308
359
|
|
|
309
360
|
return r
|
|
310
361
|
|
|
311
362
|
def loggable():
|
|
312
|
-
return
|
|
363
|
+
return LogConfig.is_debug() or not hasattr(log_state, 'ing_cnt') or not log_state.ing_cnt
|
|
364
|
+
|
|
365
|
+
class TimingNode:
|
|
366
|
+
def __init__(self, depth: int, s0: time.time = time.time(), line: str = None):
|
|
367
|
+
self.depth = depth
|
|
368
|
+
self.s0 = s0
|
|
369
|
+
self.line = line
|
|
370
|
+
self.children = []
|
|
371
|
+
|
|
372
|
+
def __str__(self):
|
|
373
|
+
return f'[{self.depth}: {self.line}, children={len(self.children)}]'
|
|
374
|
+
|
|
375
|
+
def tree(self):
|
|
376
|
+
lines = []
|
|
377
|
+
if self.line:
|
|
378
|
+
lines.append(self.line)
|
|
379
|
+
|
|
380
|
+
for child in self.children:
|
|
381
|
+
if child.line:
|
|
382
|
+
lines.append(child.tree())
|
|
383
|
+
return '\n'.join(lines)
|
|
384
|
+
|
|
385
|
+
class LogTiming:
|
|
386
|
+
def __init__(self, msg: str, s0: time.time = None):
|
|
387
|
+
self.msg = msg
|
|
388
|
+
self.s0 = s0
|
|
389
|
+
|
|
390
|
+
def __enter__(self):
|
|
391
|
+
if not LogConfig.is_debug_timing():
|
|
392
|
+
return
|
|
393
|
+
|
|
394
|
+
if not hasattr(log_state, 'timings'):
|
|
395
|
+
log_state.timings = TimingNode(0)
|
|
396
|
+
|
|
397
|
+
self.me = log_state.timings
|
|
398
|
+
log_state.timings = TimingNode(self.me.depth+1)
|
|
399
|
+
if not self.s0:
|
|
400
|
+
self.s0 = time.time()
|
|
401
|
+
|
|
402
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
403
|
+
if not LogConfig.is_debug_timing():
|
|
404
|
+
return False
|
|
405
|
+
|
|
406
|
+
child = log_state.timings
|
|
407
|
+
log_state.timings.line = timing_log_line(self.me.depth, self.msg, self.s0)
|
|
408
|
+
|
|
409
|
+
if child and child.line:
|
|
410
|
+
self.me.children.append(child)
|
|
411
|
+
log_state.timings = self.me
|
|
412
|
+
|
|
413
|
+
if not self.me.depth:
|
|
414
|
+
log2(self.me.tree())
|
|
415
|
+
log_state.timings = TimingNode(0)
|
|
416
|
+
|
|
417
|
+
return False
|
|
418
|
+
|
|
419
|
+
def log_timing(msg: str, body: Callable[[], None]=None, s0: time.time = None):
|
|
420
|
+
if not s0 and not body:
|
|
421
|
+
return LogTiming(msg, s0=s0)
|
|
422
|
+
|
|
423
|
+
if not LogConfig.is_debug_timing():
|
|
424
|
+
if body:
|
|
425
|
+
return body()
|
|
426
|
+
|
|
427
|
+
return
|
|
428
|
+
|
|
429
|
+
r = None
|
|
430
|
+
|
|
431
|
+
t = LogTiming(msg, s0=s0)
|
|
432
|
+
t.__enter__()
|
|
433
|
+
try:
|
|
434
|
+
if body:
|
|
435
|
+
r = body()
|
|
436
|
+
finally:
|
|
437
|
+
t.__exit__(None, None, None)
|
|
438
|
+
|
|
439
|
+
return r
|
|
440
|
+
|
|
441
|
+
def timing_log_line(depth: int, msg: str, s0: time.time):
|
|
442
|
+
# print('SEAN log timing', msg, threading.current_thread().name)
|
|
443
|
+
elapsed = time.time() - s0
|
|
444
|
+
offloaded = '-' if threading.current_thread().name.startswith('offload') or threading.current_thread().name.startswith('async') else '+'
|
|
445
|
+
prefix = f'[{offloaded} timings] '
|
|
446
|
+
|
|
447
|
+
if depth:
|
|
448
|
+
if elapsed > 0.01:
|
|
449
|
+
prefix = (' ' * (depth-1)) + '* '
|
|
450
|
+
else:
|
|
451
|
+
prefix = ' ' * depth
|
|
452
|
+
|
|
453
|
+
return f'{prefix}{msg}: {elapsed:.2f} sec'
|
|
454
|
+
|
|
455
|
+
class WaitLog:
|
|
456
|
+
wait_log_flag = False
|
|
457
|
+
|
|
458
|
+
def wait_log(msg: str):
|
|
459
|
+
if not WaitLog.wait_log_flag:
|
|
460
|
+
log2(msg)
|
|
461
|
+
WaitLog.wait_log_flag = True
|
|
462
|
+
|
|
463
|
+
def clear_wait_log_flag():
|
|
464
|
+
WaitLog.wait_log_flag = False
|
|
465
|
+
|
|
466
|
+
def bytes_generator_from_file(file_path, chunk_size=4096):
|
|
467
|
+
with open(file_path, 'rb') as f:
|
|
468
|
+
while True:
|
|
469
|
+
chunk = f.read(chunk_size)
|
|
470
|
+
if not chunk:
|
|
471
|
+
break
|
|
472
|
+
yield chunk
|
|
473
|
+
|
|
474
|
+
class GeneratorStream(io.RawIOBase):
|
|
475
|
+
def __init__(self, generator):
|
|
476
|
+
self._generator = generator
|
|
477
|
+
self._buffer = b'' # Buffer to store leftover bytes from generator yields
|
|
478
|
+
|
|
479
|
+
def readable(self):
|
|
480
|
+
return True
|
|
481
|
+
|
|
482
|
+
def _read_from_generator(self):
|
|
483
|
+
try:
|
|
484
|
+
chunk = next(self._generator)
|
|
485
|
+
if isinstance(chunk, str):
|
|
486
|
+
chunk = chunk.encode('utf-8') # Encode if generator yields strings
|
|
487
|
+
self._buffer += chunk
|
|
488
|
+
except StopIteration:
|
|
489
|
+
pass # Generator exhausted
|
|
490
|
+
|
|
491
|
+
def readinto(self, b):
|
|
492
|
+
# Fill the buffer if necessary
|
|
493
|
+
while len(self._buffer) < len(b):
|
|
494
|
+
old_buffer_len = len(self._buffer)
|
|
495
|
+
self._read_from_generator()
|
|
496
|
+
if len(self._buffer) == old_buffer_len: # Generator exhausted and buffer empty
|
|
497
|
+
break
|
|
498
|
+
|
|
499
|
+
bytes_to_read = min(len(b), len(self._buffer))
|
|
500
|
+
b[:bytes_to_read] = self._buffer[:bytes_to_read]
|
|
501
|
+
self._buffer = self._buffer[bytes_to_read:]
|
|
502
|
+
return bytes_to_read
|
|
503
|
+
|
|
504
|
+
def read(self, size=-1):
|
|
505
|
+
if size == -1: # Read all remaining data
|
|
506
|
+
while True:
|
|
507
|
+
old_buffer_len = len(self._buffer)
|
|
508
|
+
self._read_from_generator()
|
|
509
|
+
if len(self._buffer) == old_buffer_len:
|
|
510
|
+
break
|
|
511
|
+
data = self._buffer
|
|
512
|
+
self._buffer = b''
|
|
513
|
+
return data
|
|
514
|
+
else:
|
|
515
|
+
# Ensure enough data in buffer
|
|
516
|
+
while len(self._buffer) < size:
|
|
517
|
+
old_buffer_len = len(self._buffer)
|
|
518
|
+
self._read_from_generator()
|
|
519
|
+
if len(self._buffer) == old_buffer_len:
|
|
520
|
+
break
|
|
521
|
+
|
|
522
|
+
data = self._buffer[:size]
|
|
523
|
+
self._buffer = self._buffer[size:]
|
|
524
|
+
return data
|
|
525
|
+
|
|
526
|
+
class LogTrace:
|
|
527
|
+
def __init__(self, err_msg: Union[str, callable, bool] = None):
|
|
528
|
+
self.err_msg = err_msg
|
|
529
|
+
|
|
530
|
+
def __enter__(self):
|
|
531
|
+
return None
|
|
532
|
+
|
|
533
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
534
|
+
if exc_type is not None:
|
|
535
|
+
if self.err_msg is True:
|
|
536
|
+
log2(str(exc_val))
|
|
537
|
+
elif callable(self.err_msg):
|
|
538
|
+
log2(self.err_msg(exc_val))
|
|
539
|
+
elif self.err_msg is not False and self.err_msg:
|
|
540
|
+
log2(self.err_msg)
|
|
541
|
+
|
|
542
|
+
if self.err_msg is not False and LogConfig.is_debug():
|
|
543
|
+
traceback.print_exception(exc_type, exc_val, exc_tb, file=sys.stderr)
|
|
544
|
+
|
|
545
|
+
# swallow exception
|
|
546
|
+
return True
|
|
547
|
+
|
|
548
|
+
def log_exc(err_msg: Union[str, callable, bool] = None):
|
|
549
|
+
return LogTrace(err_msg=err_msg)
|
|
550
|
+
|
|
551
|
+
class ParallelService:
|
|
552
|
+
def __init__(self, handler: 'ParallelMapHandler'):
|
|
553
|
+
self.handler = handler
|
|
554
|
+
|
|
555
|
+
def map(self, fn: Callable[..., T]) -> Iterator[T]:
|
|
556
|
+
executor = self.handler.executor
|
|
557
|
+
collection = self.handler.collection
|
|
558
|
+
collect = self.handler.collect
|
|
559
|
+
samples_cnt = self.handler.samples
|
|
560
|
+
|
|
561
|
+
iterator = None
|
|
562
|
+
if executor:
|
|
563
|
+
iterator = executor.map(fn, collection)
|
|
564
|
+
elif samples_cnt < sys.maxsize:
|
|
565
|
+
samples = []
|
|
566
|
+
|
|
567
|
+
for elem in collection:
|
|
568
|
+
if not samples_cnt:
|
|
569
|
+
break
|
|
570
|
+
|
|
571
|
+
samples.append(fn(elem))
|
|
572
|
+
samples_cnt -= 1
|
|
573
|
+
|
|
574
|
+
iterator = iter(samples)
|
|
575
|
+
else:
|
|
576
|
+
iterator = map(fn, collection)
|
|
577
|
+
|
|
578
|
+
if collect:
|
|
579
|
+
return list(iterator)
|
|
580
|
+
else:
|
|
581
|
+
return iterator
|
|
582
|
+
|
|
583
|
+
class ParallelMapHandler:
|
|
584
|
+
def __init__(self, collection: list, workers: int, samples: int = sys.maxsize, msg: str = None, collect = True):
|
|
585
|
+
self.collection = collection
|
|
586
|
+
self.workers = workers
|
|
587
|
+
self.executor = None
|
|
588
|
+
self.samples = samples
|
|
589
|
+
self.msg = msg
|
|
590
|
+
if msg and msg.startswith('d`'):
|
|
591
|
+
if LogConfig.is_debug():
|
|
592
|
+
self.msg = msg.replace('d`', '', 1)
|
|
593
|
+
else:
|
|
594
|
+
self.msg = None
|
|
595
|
+
self.collect = collect
|
|
596
|
+
|
|
597
|
+
self.begin = []
|
|
598
|
+
self.end = []
|
|
599
|
+
self.start_time = None
|
|
600
|
+
|
|
601
|
+
def __enter__(self):
|
|
602
|
+
self.calc_msgs()
|
|
603
|
+
|
|
604
|
+
if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
|
|
605
|
+
self.start_time = time.time()
|
|
606
|
+
|
|
607
|
+
self.executor = ThreadPoolExecutor(max_workers=self.workers)
|
|
608
|
+
self.executor.__enter__()
|
|
609
|
+
|
|
610
|
+
return ParallelService(self)
|
|
611
|
+
|
|
612
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
613
|
+
if self.executor:
|
|
614
|
+
self.executor.__exit__(exc_type, exc_val, exc_tb)
|
|
615
|
+
|
|
616
|
+
if self.end:
|
|
617
|
+
log2(f'{" ".join(self.end)} in {elapsed_time(self.start_time)}.')
|
|
618
|
+
|
|
619
|
+
return False
|
|
620
|
+
|
|
621
|
+
def size(self):
|
|
622
|
+
if not self.collection:
|
|
623
|
+
return 0
|
|
624
|
+
|
|
625
|
+
return len(self.collection)
|
|
626
|
+
|
|
627
|
+
def calc_msgs(self):
|
|
628
|
+
if not self.msg:
|
|
629
|
+
return
|
|
630
|
+
|
|
631
|
+
size = self.size()
|
|
632
|
+
offloaded = False
|
|
633
|
+
serially = False
|
|
634
|
+
sampling = False
|
|
635
|
+
if size == 0:
|
|
636
|
+
offloaded = True
|
|
637
|
+
self.msg = self.msg.replace('{size}', '1')
|
|
638
|
+
elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
|
|
639
|
+
self.msg = self.msg.replace('{size}', f'{size}')
|
|
640
|
+
elif self.samples < sys.maxsize:
|
|
641
|
+
sampling = True
|
|
642
|
+
if self.samples > size:
|
|
643
|
+
self.samples = size
|
|
644
|
+
self.msg = self.msg.replace('{size}', f'{self.samples}/{size} sample')
|
|
645
|
+
else:
|
|
646
|
+
serially = True
|
|
647
|
+
self.msg = self.msg.replace('{size}', f'{size}')
|
|
648
|
+
|
|
649
|
+
for token in self.msg.split(' '):
|
|
650
|
+
if '|' in token:
|
|
651
|
+
self.begin.append(token.split('|')[0])
|
|
652
|
+
if not sampling and not serially and not offloaded:
|
|
653
|
+
self.end.append(token.split('|')[1])
|
|
654
|
+
else:
|
|
655
|
+
self.begin.append(token)
|
|
656
|
+
if not sampling and not serially and not offloaded:
|
|
657
|
+
self.end.append(token)
|
|
658
|
+
|
|
659
|
+
if offloaded:
|
|
660
|
+
log2(f'{" ".join(self.begin)} offloaded...')
|
|
661
|
+
elif sampling or serially:
|
|
662
|
+
log2(f'{" ".join(self.begin)} serially...')
|
|
663
|
+
else:
|
|
664
|
+
log2(f'{" ".join(self.begin)} with {self.workers} workers...')
|
|
665
|
+
|
|
666
|
+
def parallelize(collection: list, workers: int = 0, samples = sys.maxsize, msg: str = None, collect = True):
|
|
667
|
+
return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect)
|
|
668
|
+
|
|
669
|
+
class OffloadService:
|
|
670
|
+
def __init__(self, handler: 'OffloadHandler'):
|
|
671
|
+
self.handler = handler
|
|
672
|
+
|
|
673
|
+
def submit(self, fn: Callable[..., T], /, *args, **kwargs) -> Future[T]:
|
|
674
|
+
executor = self.handler.executor
|
|
675
|
+
|
|
676
|
+
if executor:
|
|
677
|
+
return executor.submit(fn, *args, **kwargs)
|
|
678
|
+
else:
|
|
679
|
+
future = Future()
|
|
680
|
+
|
|
681
|
+
future.set_result(fn(*args, **kwargs))
|
|
682
|
+
|
|
683
|
+
return future
|
|
684
|
+
|
|
685
|
+
class OffloadHandler(ParallelMapHandler):
|
|
686
|
+
def __init__(self, max_workers: int, msg: str = None):
|
|
687
|
+
super().__init__(None, max_workers, msg=msg, collect=False )
|
|
688
|
+
|
|
689
|
+
def __enter__(self):
|
|
690
|
+
self.calc_msgs()
|
|
691
|
+
|
|
692
|
+
if self.workers > 1:
|
|
693
|
+
# if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
|
|
694
|
+
self.start_time = time.time()
|
|
695
|
+
|
|
696
|
+
self.executor = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix='offload')
|
|
697
|
+
self.executor.__enter__()
|
|
698
|
+
|
|
699
|
+
return OffloadService(self)
|
|
700
|
+
|
|
701
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
702
|
+
if self.executor:
|
|
703
|
+
self.executor.__exit__(exc_type, exc_val, exc_tb)
|
|
704
|
+
|
|
705
|
+
if self.end:
|
|
706
|
+
log2(f'{" ".join(self.end)} in {elapsed_time(self.start_time)}.')
|
|
707
|
+
|
|
708
|
+
return False
|
|
709
|
+
|
|
710
|
+
# def size(self):
|
|
711
|
+
# if not self.collection:
|
|
712
|
+
# return 0
|
|
713
|
+
|
|
714
|
+
# return len(self.collection)
|
|
715
|
+
|
|
716
|
+
def calc_msgs(self):
|
|
717
|
+
if not self.msg:
|
|
718
|
+
return
|
|
719
|
+
|
|
720
|
+
size = self.size()
|
|
721
|
+
# return
|
|
722
|
+
|
|
723
|
+
offloaded = False
|
|
724
|
+
serially = False
|
|
725
|
+
sampling = False
|
|
726
|
+
if size == 0:
|
|
727
|
+
offloaded = True
|
|
728
|
+
self.msg = self.msg.replace('{size}', '1')
|
|
729
|
+
elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
|
|
730
|
+
self.msg = self.msg.replace('{size}', f'{size}')
|
|
731
|
+
elif self.samples < sys.maxsize:
|
|
732
|
+
sampling = True
|
|
733
|
+
if self.samples > size:
|
|
734
|
+
self.samples = size
|
|
735
|
+
self.msg = self.msg.replace('{size}', f'{self.samples}/{size} sample')
|
|
736
|
+
else:
|
|
737
|
+
serially = True
|
|
738
|
+
self.msg = self.msg.replace('{size}', f'{size}')
|
|
739
|
+
# return
|
|
740
|
+
|
|
741
|
+
for token in self.msg.split(' '):
|
|
742
|
+
if '|' in token:
|
|
743
|
+
self.begin.append(token.split('|')[0])
|
|
744
|
+
if not sampling and not serially and not offloaded:
|
|
745
|
+
self.end.append(token.split('|')[1])
|
|
746
|
+
else:
|
|
747
|
+
self.begin.append(token)
|
|
748
|
+
if not sampling and not serially and not offloaded:
|
|
749
|
+
self.end.append(token)
|
|
750
|
+
|
|
751
|
+
if offloaded:
|
|
752
|
+
log2(f'{" ".join(self.begin)} offloaded...')
|
|
753
|
+
elif sampling or serially:
|
|
754
|
+
log2(f'{" ".join(self.begin)} serially...')
|
|
755
|
+
else:
|
|
756
|
+
log2(f'{" ".join(self.begin)} with {self.workers} workers...')
|
|
757
|
+
|
|
758
|
+
def offload(max_workers: int = 3, msg: str = None):
|
|
759
|
+
return OffloadHandler(max_workers, msg = msg)
|