kaqing 2.0.110__py3-none-any.whl → 2.0.214__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaqing might be problematic. Click here for more details.
- adam/__init__.py +0 -2
- adam/app_session.py +9 -12
- adam/apps.py +18 -4
- adam/batch.py +19 -19
- adam/checks/check_utils.py +16 -46
- adam/checks/cpu.py +7 -1
- adam/checks/cpu_metrics.py +52 -0
- adam/checks/disk.py +2 -3
- adam/columns/columns.py +3 -1
- adam/columns/cpu.py +3 -1
- adam/columns/cpu_metrics.py +22 -0
- adam/columns/memory.py +3 -4
- adam/commands/__init__.py +24 -0
- adam/commands/app/app.py +38 -0
- adam/commands/{app_ping.py → app/app_ping.py} +7 -13
- adam/commands/{login.py → app/login.py} +22 -24
- adam/commands/app/show_app_actions.py +49 -0
- adam/commands/{show → app}/show_app_id.py +8 -11
- adam/commands/{show → app}/show_app_queues.py +7 -14
- adam/commands/app/show_login.py +56 -0
- adam/commands/app/utils_app.py +106 -0
- adam/commands/audit/audit.py +22 -40
- adam/commands/audit/audit_repair_tables.py +15 -19
- adam/commands/audit/audit_run.py +15 -22
- adam/commands/audit/completions_l.py +15 -0
- adam/commands/audit/show_last10.py +4 -18
- adam/commands/audit/show_slow10.py +4 -17
- adam/commands/audit/show_top10.py +4 -16
- adam/commands/audit/utils_show_top10.py +15 -3
- adam/commands/bash/__init__.py +5 -0
- adam/commands/bash/bash.py +36 -0
- adam/commands/bash/bash_completer.py +93 -0
- adam/commands/bash/utils_bash.py +16 -0
- adam/commands/cassandra/__init__.py +0 -0
- adam/commands/cassandra/download_cassandra_log.py +45 -0
- adam/commands/{restart.py → cassandra/restart_cluster.py} +12 -26
- adam/commands/cassandra/restart_node.py +51 -0
- adam/commands/cassandra/restart_nodes.py +47 -0
- adam/commands/{rollout.py → cassandra/rollout.py} +20 -25
- adam/commands/cassandra/show_cassandra_repairs.py +37 -0
- adam/commands/cassandra/show_cassandra_status.py +117 -0
- adam/commands/{show → cassandra}/show_cassandra_version.py +5 -18
- adam/commands/cassandra/show_processes.py +50 -0
- adam/commands/cassandra/show_storage.py +44 -0
- adam/commands/{watch.py → cassandra/watch.py} +26 -29
- adam/commands/cli/__init__.py +0 -0
- adam/commands/{cli_commands.py → cli/cli_commands.py} +8 -4
- adam/commands/cli/clipboard_copy.py +86 -0
- adam/commands/cli/show_cli_commands.py +56 -0
- adam/commands/code.py +57 -0
- adam/commands/command.py +211 -40
- adam/commands/commands_utils.py +20 -27
- adam/commands/config/__init__.py +0 -0
- adam/commands/{param_get.py → config/param_get.py} +11 -14
- adam/commands/{param_set.py → config/param_set.py} +8 -12
- adam/commands/{show → config}/show_params.py +2 -5
- adam/commands/cql/alter_tables.py +66 -0
- adam/commands/cql/completions_c.py +29 -0
- adam/commands/cql/cqlsh.py +10 -32
- adam/commands/cql/utils_cql.py +306 -0
- adam/commands/debug/__init__.py +0 -0
- adam/commands/debug/debug.py +22 -0
- adam/commands/debug/debug_completes.py +35 -0
- adam/commands/debug/debug_timings.py +35 -0
- adam/commands/debug/show_offloaded_completes.py +45 -0
- adam/commands/deploy/code_start.py +7 -10
- adam/commands/deploy/code_stop.py +4 -21
- adam/commands/deploy/code_utils.py +3 -3
- adam/commands/deploy/deploy.py +4 -27
- adam/commands/deploy/deploy_frontend.py +14 -17
- adam/commands/deploy/deploy_pg_agent.py +3 -6
- adam/commands/deploy/deploy_pod.py +65 -73
- adam/commands/deploy/deploy_utils.py +14 -24
- adam/commands/deploy/undeploy.py +4 -27
- adam/commands/deploy/undeploy_frontend.py +4 -7
- adam/commands/deploy/undeploy_pg_agent.py +6 -8
- adam/commands/deploy/undeploy_pod.py +11 -12
- adam/commands/devices/__init__.py +0 -0
- adam/commands/devices/device.py +149 -0
- adam/commands/devices/device_app.py +163 -0
- adam/commands/devices/device_auit_log.py +49 -0
- adam/commands/devices/device_cass.py +179 -0
- adam/commands/devices/device_export.py +87 -0
- adam/commands/devices/device_postgres.py +160 -0
- adam/commands/devices/devices.py +25 -0
- adam/commands/diag/__init__.py +0 -0
- adam/commands/{check.py → diag/check.py} +16 -25
- adam/commands/diag/generate_report.py +52 -0
- adam/commands/diag/issues.py +43 -0
- adam/commands/exit.py +1 -4
- adam/commands/export/__init__.py +0 -0
- adam/commands/export/clean_up_all_export_sessions.py +37 -0
- adam/commands/export/clean_up_export_sessions.py +39 -0
- adam/commands/export/completions_x.py +11 -0
- adam/commands/export/download_export_session.py +40 -0
- adam/commands/export/drop_export_database.py +39 -0
- adam/commands/export/drop_export_databases.py +37 -0
- adam/commands/export/export.py +37 -0
- adam/commands/export/export_databases.py +251 -0
- adam/commands/export/export_select.py +34 -0
- adam/commands/export/export_sessions.py +210 -0
- adam/commands/export/export_use.py +49 -0
- adam/commands/export/export_x_select.py +48 -0
- adam/commands/export/exporter.py +419 -0
- adam/commands/export/import_files.py +44 -0
- adam/commands/export/import_session.py +40 -0
- adam/commands/export/importer.py +81 -0
- adam/commands/export/importer_athena.py +157 -0
- adam/commands/export/importer_sqlite.py +78 -0
- adam/commands/export/show_column_counts.py +45 -0
- adam/commands/export/show_export_databases.py +39 -0
- adam/commands/export/show_export_session.py +39 -0
- adam/commands/export/show_export_sessions.py +37 -0
- adam/commands/export/utils_export.py +366 -0
- adam/commands/fs/__init__.py +0 -0
- adam/commands/fs/cat.py +36 -0
- adam/commands/fs/cat_local.py +42 -0
- adam/commands/fs/cd.py +41 -0
- adam/commands/fs/download_file.py +47 -0
- adam/commands/fs/find_files.py +51 -0
- adam/commands/fs/find_processes.py +76 -0
- adam/commands/fs/head.py +36 -0
- adam/commands/fs/ls.py +41 -0
- adam/commands/fs/ls_local.py +40 -0
- adam/commands/fs/pwd.py +45 -0
- adam/commands/fs/rm.py +18 -0
- adam/commands/fs/rm_downloads.py +39 -0
- adam/commands/fs/rm_logs.py +38 -0
- adam/commands/{shell.py → fs/shell.py} +12 -4
- adam/commands/{show → fs}/show_adam.py +3 -3
- adam/commands/{show → fs}/show_host.py +1 -1
- adam/commands/help.py +5 -3
- adam/commands/intermediate_command.py +52 -0
- adam/commands/kubectl.py +38 -0
- adam/commands/medusa/medusa.py +4 -22
- adam/commands/medusa/medusa_backup.py +20 -27
- adam/commands/medusa/medusa_restore.py +35 -48
- adam/commands/medusa/medusa_show_backupjobs.py +16 -18
- adam/commands/medusa/medusa_show_restorejobs.py +13 -18
- adam/commands/medusa/utils_medusa.py +15 -0
- adam/commands/nodetool/__init__.py +0 -0
- adam/commands/{nodetool.py → nodetool/nodetool.py} +9 -20
- adam/commands/postgres/completions_p.py +22 -0
- adam/commands/postgres/postgres.py +47 -55
- adam/commands/postgres/postgres_databases.py +269 -0
- adam/commands/postgres/postgres_ls.py +5 -9
- adam/commands/postgres/postgres_preview.py +5 -9
- adam/commands/postgres/utils_postgres.py +80 -0
- adam/commands/preview_table.py +8 -44
- adam/commands/reaper/reaper.py +4 -27
- adam/commands/reaper/reaper_forward.py +49 -56
- adam/commands/reaper/reaper_forward_session.py +6 -0
- adam/commands/reaper/reaper_forward_stop.py +10 -16
- adam/commands/reaper/reaper_restart.py +7 -14
- adam/commands/reaper/reaper_run_abort.py +8 -33
- adam/commands/reaper/reaper_runs.py +43 -58
- adam/commands/reaper/reaper_runs_abort.py +29 -49
- adam/commands/reaper/reaper_schedule_activate.py +14 -33
- adam/commands/reaper/reaper_schedule_start.py +9 -33
- adam/commands/reaper/reaper_schedule_stop.py +9 -33
- adam/commands/reaper/reaper_schedules.py +4 -14
- adam/commands/reaper/reaper_status.py +8 -16
- adam/commands/reaper/utils_reaper.py +203 -0
- adam/commands/repair/repair.py +4 -22
- adam/commands/repair/repair_log.py +5 -11
- adam/commands/repair/repair_run.py +27 -34
- adam/commands/repair/repair_scan.py +32 -40
- adam/commands/repair/repair_stop.py +5 -12
- adam/commands/show.py +40 -0
- adam/config.py +5 -15
- adam/embedded_params.py +1 -1
- adam/log.py +4 -4
- adam/repl.py +83 -116
- adam/repl_commands.py +86 -45
- adam/repl_session.py +9 -1
- adam/repl_state.py +176 -40
- adam/sql/async_executor.py +62 -0
- adam/sql/lark_completer.py +286 -0
- adam/sql/lark_parser.py +604 -0
- adam/sql/qingl.lark +1076 -0
- adam/sql/sql_completer.py +52 -27
- adam/sql/sql_state_machine.py +131 -19
- adam/sso/authn_ad.py +6 -8
- adam/sso/authn_okta.py +4 -6
- adam/sso/cred_cache.py +4 -9
- adam/sso/idp.py +9 -12
- adam/utils.py +670 -31
- adam/utils_athena.py +145 -0
- adam/utils_audits.py +12 -103
- adam/utils_issues.py +32 -0
- adam/utils_k8s/app_clusters.py +35 -0
- adam/utils_k8s/app_pods.py +41 -0
- adam/utils_k8s/cassandra_clusters.py +35 -20
- adam/utils_k8s/cassandra_nodes.py +15 -6
- adam/utils_k8s/custom_resources.py +16 -17
- adam/utils_k8s/ingresses.py +2 -2
- adam/utils_k8s/jobs.py +7 -11
- adam/utils_k8s/k8s.py +96 -0
- adam/utils_k8s/kube_context.py +3 -6
- adam/{pod_exec_result.py → utils_k8s/pod_exec_result.py} +13 -4
- adam/utils_k8s/pods.py +159 -89
- adam/utils_k8s/secrets.py +4 -4
- adam/utils_k8s/service_accounts.py +5 -4
- adam/utils_k8s/services.py +2 -2
- adam/utils_k8s/statefulsets.py +6 -14
- adam/utils_local.py +80 -0
- adam/utils_net.py +4 -4
- adam/utils_repl/__init__.py +0 -0
- adam/utils_repl/appendable_completer.py +6 -0
- adam/utils_repl/automata_completer.py +48 -0
- adam/utils_repl/repl_completer.py +93 -0
- adam/utils_repl/state_machine.py +173 -0
- adam/utils_sqlite.py +132 -0
- adam/version.py +1 -1
- {kaqing-2.0.110.dist-info → kaqing-2.0.214.dist-info}/METADATA +1 -1
- kaqing-2.0.214.dist-info/RECORD +272 -0
- kaqing-2.0.214.dist-info/top_level.txt +2 -0
- teddy/__init__.py +0 -0
- teddy/lark_parser.py +436 -0
- teddy/lark_parser2.py +618 -0
- adam/commands/alter_tables.py +0 -81
- adam/commands/app.py +0 -67
- adam/commands/bash.py +0 -150
- adam/commands/cd.py +0 -125
- adam/commands/cp.py +0 -95
- adam/commands/cql/cql_completions.py +0 -15
- adam/commands/cql/cql_utils.py +0 -112
- adam/commands/devices.py +0 -118
- adam/commands/issues.py +0 -75
- adam/commands/logs.py +0 -40
- adam/commands/ls.py +0 -146
- adam/commands/postgres/postgres_context.py +0 -239
- adam/commands/postgres/postgres_utils.py +0 -31
- adam/commands/postgres/psql_completions.py +0 -10
- adam/commands/pwd.py +0 -77
- adam/commands/reaper/reaper_session.py +0 -159
- adam/commands/report.py +0 -63
- adam/commands/show/show.py +0 -54
- adam/commands/show/show_app_actions.py +0 -56
- adam/commands/show/show_cassandra_status.py +0 -128
- adam/commands/show/show_commands.py +0 -61
- adam/commands/show/show_login.py +0 -63
- adam/commands/show/show_processes.py +0 -53
- adam/commands/show/show_repairs.py +0 -47
- adam/commands/show/show_storage.py +0 -52
- kaqing-2.0.110.dist-info/RECORD +0 -187
- kaqing-2.0.110.dist-info/top_level.txt +0 -1
- /adam/commands/{show → app}/__init__.py +0 -0
- /adam/commands/{nodetool_commands.py → nodetool/nodetool_commands.py} +0 -0
- {kaqing-2.0.110.dist-info → kaqing-2.0.214.dist-info}/WHEEL +0 -0
- {kaqing-2.0.110.dist-info → kaqing-2.0.214.dist-info}/entry_points.txt +0 -0
adam/utils.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from abc import ABC
|
|
2
|
+
from concurrent.futures import Future, ThreadPoolExecutor
|
|
1
3
|
from contextlib import redirect_stdout
|
|
2
4
|
import copy
|
|
3
5
|
import csv
|
|
@@ -9,19 +11,59 @@ import os
|
|
|
9
11
|
from pathlib import Path
|
|
10
12
|
import random
|
|
11
13
|
import string
|
|
14
|
+
import threading
|
|
15
|
+
import traceback
|
|
16
|
+
from typing import Callable, Iterator, TextIO, TypeVar, Union
|
|
12
17
|
from dateutil import parser
|
|
13
18
|
import subprocess
|
|
14
19
|
import sys
|
|
15
20
|
import time
|
|
16
21
|
import click
|
|
17
22
|
import yaml
|
|
23
|
+
from prompt_toolkit.completion import Completer
|
|
18
24
|
|
|
19
25
|
from . import __version__
|
|
20
26
|
|
|
21
|
-
|
|
22
|
-
|
|
27
|
+
T = TypeVar('T')
|
|
28
|
+
|
|
29
|
+
log_state = threading.local()
|
|
30
|
+
|
|
31
|
+
class ConfigReadable:
|
|
32
|
+
def is_debug() -> bool:
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
def get(self, key: str, default: T) -> T:
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
class ConfigHolder:
|
|
39
|
+
# the singleton pattern
|
|
40
|
+
def __new__(cls, *args, **kwargs):
|
|
41
|
+
if not hasattr(cls, 'instance'): cls.instance = super(ConfigHolder, cls).__new__(cls)
|
|
42
|
+
|
|
43
|
+
return cls.instance
|
|
44
|
+
|
|
45
|
+
def __init__(self):
|
|
46
|
+
if not hasattr(self, 'config'):
|
|
47
|
+
# set by Config
|
|
48
|
+
self.config: 'ConfigReadable' = None
|
|
49
|
+
# only for testing
|
|
50
|
+
self.is_display_help = True
|
|
51
|
+
# set by ReplSession
|
|
52
|
+
self.append_command_history = lambda entry: None
|
|
53
|
+
|
|
54
|
+
NO_SORT = 0
|
|
55
|
+
SORT = 1
|
|
56
|
+
REVERSE_SORT = -1
|
|
57
|
+
|
|
58
|
+
def tabulize(lines: list[T], fn: Callable[..., T] = None, header: str = None, dashed_line = False, separator = ' ', to: int = 1, sorted: int = NO_SORT):
|
|
59
|
+
if fn:
|
|
60
|
+
lines = list(map(fn, lines))
|
|
61
|
+
|
|
62
|
+
if sorted == SORT:
|
|
63
|
+
lines.sort()
|
|
64
|
+
elif sorted == REVERSE_SORT:
|
|
65
|
+
lines.sort(reverse=True)
|
|
23
66
|
|
|
24
|
-
def lines_to_tabular(lines: list[str], header: str = None, dashed_line = False, separator = ' '):
|
|
25
67
|
maxes = []
|
|
26
68
|
nls = []
|
|
27
69
|
|
|
@@ -52,7 +94,14 @@ def lines_to_tabular(lines: list[str], header: str = None, dashed_line = False,
|
|
|
52
94
|
for line in lines:
|
|
53
95
|
format_line(line)
|
|
54
96
|
|
|
55
|
-
|
|
97
|
+
table = '\n'.join(nls)
|
|
98
|
+
|
|
99
|
+
if to == 1:
|
|
100
|
+
log(table)
|
|
101
|
+
elif to == 2:
|
|
102
|
+
log2(table)
|
|
103
|
+
|
|
104
|
+
return table
|
|
56
105
|
|
|
57
106
|
def convert_seconds(total_seconds_float):
|
|
58
107
|
total_seconds_int = int(total_seconds_float) # Convert float to integer seconds
|
|
@@ -69,17 +118,37 @@ def epoch(timestamp_string: str):
|
|
|
69
118
|
return parser.parse(timestamp_string).timestamp()
|
|
70
119
|
|
|
71
120
|
def log(s = None):
|
|
121
|
+
if not loggable():
|
|
122
|
+
return False
|
|
123
|
+
|
|
72
124
|
# want to print empty line for False or empty collection
|
|
73
125
|
if s == None:
|
|
74
126
|
print()
|
|
75
127
|
else:
|
|
76
128
|
click.echo(s)
|
|
77
129
|
|
|
78
|
-
|
|
130
|
+
return True
|
|
131
|
+
|
|
132
|
+
def log2(s = None, nl = True, file: str = None):
|
|
133
|
+
if not loggable():
|
|
134
|
+
return False
|
|
135
|
+
|
|
79
136
|
if s:
|
|
80
|
-
|
|
137
|
+
if file:
|
|
138
|
+
with open(file, 'at') as f:
|
|
139
|
+
f.write(s)
|
|
140
|
+
if nl:
|
|
141
|
+
f.write('\n')
|
|
142
|
+
else:
|
|
143
|
+
click.echo(s, err=True, nl=nl)
|
|
81
144
|
else:
|
|
82
|
-
|
|
145
|
+
if file:
|
|
146
|
+
with open(file, 'at') as f:
|
|
147
|
+
f.write('\n')
|
|
148
|
+
else:
|
|
149
|
+
print(file=sys.stderr)
|
|
150
|
+
|
|
151
|
+
return True
|
|
83
152
|
|
|
84
153
|
def elapsed_time(start_time: float):
|
|
85
154
|
end_time = time.time()
|
|
@@ -95,7 +164,7 @@ def duration(start_time: float, end_time: float = None):
|
|
|
95
164
|
end_time = time.time()
|
|
96
165
|
d = convert_seconds(end_time - start_time)
|
|
97
166
|
t = []
|
|
98
|
-
if d
|
|
167
|
+
if d:
|
|
99
168
|
t.append(f'{d[0]}h')
|
|
100
169
|
if t or d[1]:
|
|
101
170
|
t.append(f'{d[1]}m')
|
|
@@ -121,7 +190,11 @@ def deep_merge_dicts(dict1, dict2):
|
|
|
121
190
|
merged_dict[key] = deep_merge_dicts(merged_dict[key], value)
|
|
122
191
|
elif key not in merged_dict or value:
|
|
123
192
|
# Otherwise, overwrite or add the value from dict2
|
|
124
|
-
merged_dict[key]
|
|
193
|
+
if key in merged_dict and isinstance(merged_dict[key], Completer):
|
|
194
|
+
pass
|
|
195
|
+
# print('SEAN completer found, ignoring', key, value)
|
|
196
|
+
else:
|
|
197
|
+
merged_dict[key] = value
|
|
125
198
|
return merged_dict
|
|
126
199
|
|
|
127
200
|
def deep_sort_dict(d):
|
|
@@ -159,6 +232,9 @@ def get_deep_keys(d, current_path=""):
|
|
|
159
232
|
return keys
|
|
160
233
|
|
|
161
234
|
def display_help(replace_arg = False):
|
|
235
|
+
if not ConfigHolder().is_display_help:
|
|
236
|
+
return
|
|
237
|
+
|
|
162
238
|
args = copy.copy(sys.argv)
|
|
163
239
|
if replace_arg:
|
|
164
240
|
args[len(args) - 1] = '--help'
|
|
@@ -203,34 +279,15 @@ def json_to_csv(json_data: list[dict[any, any]], delimiter: str = ','):
|
|
|
203
279
|
with redirect_stdout(body) as f:
|
|
204
280
|
dict_writer = csv.DictWriter(f, keys, delimiter=delimiter)
|
|
205
281
|
dict_writer.writerows(flattened_data)
|
|
282
|
+
|
|
206
283
|
return header.getvalue().strip('\r\n'), [l.strip('\r') for l in body.getvalue().split('\n')]
|
|
207
284
|
else:
|
|
208
285
|
return None
|
|
209
286
|
|
|
210
|
-
def log_to_file(config: dict[any, any]):
|
|
211
|
-
try:
|
|
212
|
-
base = f"/kaqing/logs"
|
|
213
|
-
os.makedirs(base, exist_ok=True)
|
|
214
|
-
|
|
215
|
-
now = datetime.now()
|
|
216
|
-
timestamp_str = now.strftime("%Y%m%d-%H%M%S")
|
|
217
|
-
filename = f"{base}/login.{timestamp_str}.txt"
|
|
218
|
-
with open(filename, 'w') as f:
|
|
219
|
-
if isinstance(config, dict):
|
|
220
|
-
try:
|
|
221
|
-
json.dump(config, f, indent=4)
|
|
222
|
-
except:
|
|
223
|
-
f.write(config)
|
|
224
|
-
else:
|
|
225
|
-
f.write(config)
|
|
226
|
-
except:
|
|
227
|
-
pass
|
|
228
|
-
|
|
229
287
|
def copy_config_file(rel_path: str, module: str, suffix: str = '.yaml', show_out = True):
|
|
230
|
-
dir = f'{Path.home()}/.kaqing'
|
|
288
|
+
dir = creating_dir(f'{Path.home()}/.kaqing')
|
|
231
289
|
path = f'{dir}/{rel_path}'
|
|
232
290
|
if not os.path.exists(path):
|
|
233
|
-
os.makedirs(dir, exist_ok=True)
|
|
234
291
|
module = importlib.import_module(module)
|
|
235
292
|
with open(path, 'w') as f:
|
|
236
293
|
yaml.dump(module.config(), f, default_flow_style=False)
|
|
@@ -240,4 +297,586 @@ def copy_config_file(rel_path: str, module: str, suffix: str = '.yaml', show_out
|
|
|
240
297
|
return path
|
|
241
298
|
|
|
242
299
|
def idp_token_from_env():
|
|
243
|
-
return os.getenv('IDP_TOKEN')
|
|
300
|
+
return os.getenv('IDP_TOKEN')
|
|
301
|
+
|
|
302
|
+
def is_lambda(func):
|
|
303
|
+
return callable(func) and hasattr(func, '__name__') and func.__name__ == '<lambda>'
|
|
304
|
+
|
|
305
|
+
def debug(s = None):
|
|
306
|
+
if ConfigHolder().config.is_debug():
|
|
307
|
+
log2(f'DEBUG {s}')
|
|
308
|
+
|
|
309
|
+
def debug_complete(s = None):
|
|
310
|
+
CommandLog.log(f'DEBUG {s}', config=ConfigHolder().config.get('debugs.complete', 'off'))
|
|
311
|
+
|
|
312
|
+
def debug_trace():
|
|
313
|
+
if ConfigHolder().config.is_debug():
|
|
314
|
+
# if LogConfig.is_debug():
|
|
315
|
+
log2(traceback.format_exc())
|
|
316
|
+
|
|
317
|
+
def in_docker() -> bool:
|
|
318
|
+
if os.path.exists('/.dockerenv'):
|
|
319
|
+
return True
|
|
320
|
+
|
|
321
|
+
try:
|
|
322
|
+
with open('/proc/1/cgroup', 'rt') as f:
|
|
323
|
+
for line in f:
|
|
324
|
+
if 'docker' in line or 'lxc' in line:
|
|
325
|
+
return True
|
|
326
|
+
except FileNotFoundError:
|
|
327
|
+
pass
|
|
328
|
+
|
|
329
|
+
return False
|
|
330
|
+
|
|
331
|
+
class Ing:
|
|
332
|
+
def __init__(self, msg: str, suppress_log=False, job_log: str = None, condition = True):
|
|
333
|
+
self.msg = msg
|
|
334
|
+
self.suppress_log = suppress_log
|
|
335
|
+
self.job_log = job_log
|
|
336
|
+
self.condition = condition
|
|
337
|
+
|
|
338
|
+
def __enter__(self):
|
|
339
|
+
if not self.condition:
|
|
340
|
+
return None
|
|
341
|
+
|
|
342
|
+
if not hasattr(log_state, 'ing_cnt'):
|
|
343
|
+
log_state.ing_cnt = 0
|
|
344
|
+
|
|
345
|
+
try:
|
|
346
|
+
if not log_state.ing_cnt:
|
|
347
|
+
if not self.suppress_log and not ConfigHolder().config.is_debug():
|
|
348
|
+
log2(f'{self.msg}...', nl=False, file=self.job_log)
|
|
349
|
+
|
|
350
|
+
return None
|
|
351
|
+
finally:
|
|
352
|
+
log_state.ing_cnt += 1
|
|
353
|
+
|
|
354
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
355
|
+
if not self.condition:
|
|
356
|
+
return False
|
|
357
|
+
|
|
358
|
+
log_state.ing_cnt -= 1
|
|
359
|
+
if not log_state.ing_cnt:
|
|
360
|
+
if not self.suppress_log and not ConfigHolder().config.is_debug():
|
|
361
|
+
log2(' OK', file=self.job_log)
|
|
362
|
+
|
|
363
|
+
return False
|
|
364
|
+
|
|
365
|
+
def ing(msg: str, body: Callable[[], None]=None, suppress_log=False, job_log: str = None, condition = True):
|
|
366
|
+
if not body:
|
|
367
|
+
return Ing(msg, suppress_log=suppress_log, job_log=job_log, condition=condition)
|
|
368
|
+
|
|
369
|
+
r = None
|
|
370
|
+
|
|
371
|
+
t = Ing(msg, suppress_log=suppress_log)
|
|
372
|
+
t.__enter__()
|
|
373
|
+
try:
|
|
374
|
+
r = body()
|
|
375
|
+
finally:
|
|
376
|
+
t.__exit__(None, None, None)
|
|
377
|
+
|
|
378
|
+
return r
|
|
379
|
+
|
|
380
|
+
def loggable():
|
|
381
|
+
return ConfigHolder().config and ConfigHolder().config.is_debug() or not hasattr(log_state, 'ing_cnt') or not log_state.ing_cnt
|
|
382
|
+
|
|
383
|
+
class TimingNode:
|
|
384
|
+
def __init__(self, depth: int, s0: time.time = time.time(), line: str = None):
|
|
385
|
+
self.depth = depth
|
|
386
|
+
self.s0 = s0
|
|
387
|
+
self.line = line
|
|
388
|
+
self.children = []
|
|
389
|
+
|
|
390
|
+
def __str__(self):
|
|
391
|
+
return f'[{self.depth}: {self.line}, children={len(self.children)}]'
|
|
392
|
+
|
|
393
|
+
def tree(self):
|
|
394
|
+
lines = []
|
|
395
|
+
if self.line:
|
|
396
|
+
lines.append(self.line)
|
|
397
|
+
|
|
398
|
+
for child in self.children:
|
|
399
|
+
if child.line:
|
|
400
|
+
lines.append(child.tree())
|
|
401
|
+
return '\n'.join(lines)
|
|
402
|
+
|
|
403
|
+
class LogTiming:
|
|
404
|
+
def __init__(self, msg: str, s0: time.time = None):
|
|
405
|
+
self.msg = msg
|
|
406
|
+
self.s0 = s0
|
|
407
|
+
|
|
408
|
+
def __enter__(self):
|
|
409
|
+
if (config := ConfigHolder().config.get('debugs.timings', 'off')) not in ['on', 'file']:
|
|
410
|
+
return
|
|
411
|
+
|
|
412
|
+
if not hasattr(log_state, 'timings'):
|
|
413
|
+
log_state.timings = TimingNode(0)
|
|
414
|
+
|
|
415
|
+
self.me = log_state.timings
|
|
416
|
+
log_state.timings = TimingNode(self.me.depth+1)
|
|
417
|
+
if not self.s0:
|
|
418
|
+
self.s0 = time.time()
|
|
419
|
+
|
|
420
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
421
|
+
if (config := ConfigHolder().config.get('debugs.timings', 'off')) not in ['on', 'file']:
|
|
422
|
+
return False
|
|
423
|
+
|
|
424
|
+
child = log_state.timings
|
|
425
|
+
log_state.timings.line = timing_log_line(self.me.depth, self.msg, self.s0)
|
|
426
|
+
|
|
427
|
+
if child and child.line:
|
|
428
|
+
self.me.children.append(child)
|
|
429
|
+
log_state.timings = self.me
|
|
430
|
+
|
|
431
|
+
if not self.me.depth:
|
|
432
|
+
# log timings finally
|
|
433
|
+
CommandLog.log(self.me.tree(), config)
|
|
434
|
+
|
|
435
|
+
log_state.timings = TimingNode(0)
|
|
436
|
+
|
|
437
|
+
return False
|
|
438
|
+
|
|
439
|
+
def log_timing(msg: str, body: Callable[[], None]=None, s0: time.time = None):
|
|
440
|
+
if not s0 and not body:
|
|
441
|
+
return LogTiming(msg, s0=s0)
|
|
442
|
+
|
|
443
|
+
if not ConfigHolder().config.get('debugs.timings', False):
|
|
444
|
+
if body:
|
|
445
|
+
return body()
|
|
446
|
+
|
|
447
|
+
return
|
|
448
|
+
|
|
449
|
+
r = None
|
|
450
|
+
|
|
451
|
+
t = LogTiming(msg, s0=s0)
|
|
452
|
+
t.__enter__()
|
|
453
|
+
try:
|
|
454
|
+
if body:
|
|
455
|
+
r = body()
|
|
456
|
+
finally:
|
|
457
|
+
t.__exit__(None, None, None)
|
|
458
|
+
|
|
459
|
+
return r
|
|
460
|
+
|
|
461
|
+
def timing_log_line(depth: int, msg: str, s0: time.time):
|
|
462
|
+
elapsed = time.time() - s0
|
|
463
|
+
offloaded = '-' if threading.current_thread().name.startswith('offload') or threading.current_thread().name.startswith('async') else '+'
|
|
464
|
+
prefix = f'[{offloaded} timings] '
|
|
465
|
+
|
|
466
|
+
if depth:
|
|
467
|
+
if elapsed > 0.01:
|
|
468
|
+
prefix = (' ' * (depth-1)) + '* '
|
|
469
|
+
else:
|
|
470
|
+
prefix = ' ' * depth
|
|
471
|
+
|
|
472
|
+
return f'{prefix}{msg}: {elapsed:.2f} sec'
|
|
473
|
+
|
|
474
|
+
class WaitLog:
|
|
475
|
+
wait_log_flag = False
|
|
476
|
+
|
|
477
|
+
def wait_log(msg: str):
|
|
478
|
+
if not WaitLog.wait_log_flag:
|
|
479
|
+
log2(msg)
|
|
480
|
+
WaitLog.wait_log_flag = True
|
|
481
|
+
|
|
482
|
+
def clear_wait_log_flag():
|
|
483
|
+
WaitLog.wait_log_flag = False
|
|
484
|
+
|
|
485
|
+
def bytes_generator_from_file(file_path, chunk_size=4096):
|
|
486
|
+
with open(file_path, 'rb') as f:
|
|
487
|
+
while True:
|
|
488
|
+
chunk = f.read(chunk_size)
|
|
489
|
+
if not chunk:
|
|
490
|
+
break
|
|
491
|
+
yield chunk
|
|
492
|
+
|
|
493
|
+
class GeneratorStream(io.RawIOBase):
|
|
494
|
+
def __init__(self, generator):
|
|
495
|
+
self._generator = generator
|
|
496
|
+
self._buffer = b'' # Buffer to store leftover bytes from generator yields
|
|
497
|
+
|
|
498
|
+
def readable(self):
|
|
499
|
+
return True
|
|
500
|
+
|
|
501
|
+
def _read_from_generator(self):
|
|
502
|
+
try:
|
|
503
|
+
chunk = next(self._generator)
|
|
504
|
+
if isinstance(chunk, str):
|
|
505
|
+
chunk = chunk.encode('utf-8') # Encode if generator yields strings
|
|
506
|
+
self._buffer += chunk
|
|
507
|
+
except StopIteration:
|
|
508
|
+
pass # Generator exhausted
|
|
509
|
+
|
|
510
|
+
def readinto(self, b):
|
|
511
|
+
# Fill the buffer if necessary
|
|
512
|
+
while len(self._buffer) < len(b):
|
|
513
|
+
old_buffer_len = len(self._buffer)
|
|
514
|
+
self._read_from_generator()
|
|
515
|
+
if len(self._buffer) == old_buffer_len: # Generator exhausted and buffer empty
|
|
516
|
+
break
|
|
517
|
+
|
|
518
|
+
bytes_to_read = min(len(b), len(self._buffer))
|
|
519
|
+
b[:bytes_to_read] = self._buffer[:bytes_to_read]
|
|
520
|
+
self._buffer = self._buffer[bytes_to_read:]
|
|
521
|
+
return bytes_to_read
|
|
522
|
+
|
|
523
|
+
def read(self, size=-1):
|
|
524
|
+
if size == -1: # Read all remaining data
|
|
525
|
+
while True:
|
|
526
|
+
old_buffer_len = len(self._buffer)
|
|
527
|
+
self._read_from_generator()
|
|
528
|
+
if len(self._buffer) == old_buffer_len:
|
|
529
|
+
break
|
|
530
|
+
data = self._buffer
|
|
531
|
+
self._buffer = b''
|
|
532
|
+
return data
|
|
533
|
+
else:
|
|
534
|
+
# Ensure enough data in buffer
|
|
535
|
+
while len(self._buffer) < size:
|
|
536
|
+
old_buffer_len = len(self._buffer)
|
|
537
|
+
self._read_from_generator()
|
|
538
|
+
if len(self._buffer) == old_buffer_len:
|
|
539
|
+
break
|
|
540
|
+
|
|
541
|
+
data = self._buffer[:size]
|
|
542
|
+
self._buffer = self._buffer[size:]
|
|
543
|
+
return data
|
|
544
|
+
|
|
545
|
+
class LogTrace:
|
|
546
|
+
def __init__(self, err_msg: Union[str, callable, bool] = None):
|
|
547
|
+
self.err_msg = err_msg
|
|
548
|
+
|
|
549
|
+
def __enter__(self):
|
|
550
|
+
return None
|
|
551
|
+
|
|
552
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
553
|
+
if exc_type is not None:
|
|
554
|
+
if self.err_msg is True:
|
|
555
|
+
log2(str(exc_val))
|
|
556
|
+
elif callable(self.err_msg):
|
|
557
|
+
log2(self.err_msg(exc_val))
|
|
558
|
+
elif self.err_msg is not False and self.err_msg:
|
|
559
|
+
log2(self.err_msg)
|
|
560
|
+
|
|
561
|
+
if self.err_msg is not False and ConfigHolder().config.is_debug():
|
|
562
|
+
traceback.print_exception(exc_type, exc_val, exc_tb, file=sys.stderr)
|
|
563
|
+
|
|
564
|
+
# swallow exception
|
|
565
|
+
return True
|
|
566
|
+
|
|
567
|
+
def log_exc(err_msg: Union[str, callable, bool] = None):
|
|
568
|
+
return LogTrace(err_msg=err_msg)
|
|
569
|
+
|
|
570
|
+
class ParallelService:
|
|
571
|
+
def __init__(self, handler: 'ParallelMapHandler'):
|
|
572
|
+
self.handler = handler
|
|
573
|
+
|
|
574
|
+
def map(self, fn: Callable[..., T]) -> Iterator[T]:
|
|
575
|
+
executor = self.handler.executor
|
|
576
|
+
collection = self.handler.collection
|
|
577
|
+
collect = self.handler.collect
|
|
578
|
+
samples_cnt = self.handler.samples
|
|
579
|
+
|
|
580
|
+
iterator = None
|
|
581
|
+
if executor:
|
|
582
|
+
iterator = executor.map(fn, collection)
|
|
583
|
+
elif samples_cnt < sys.maxsize:
|
|
584
|
+
samples = []
|
|
585
|
+
|
|
586
|
+
for elem in collection:
|
|
587
|
+
if not samples_cnt:
|
|
588
|
+
break
|
|
589
|
+
|
|
590
|
+
samples.append(fn(elem))
|
|
591
|
+
samples_cnt -= 1
|
|
592
|
+
|
|
593
|
+
iterator = iter(samples)
|
|
594
|
+
else:
|
|
595
|
+
iterator = map(fn, collection)
|
|
596
|
+
|
|
597
|
+
if collect:
|
|
598
|
+
return list(iterator)
|
|
599
|
+
else:
|
|
600
|
+
return iterator
|
|
601
|
+
|
|
602
|
+
thread_pools: dict[str, ThreadPoolExecutor] = {}
|
|
603
|
+
thread_pool_lock = threading.Lock()
|
|
604
|
+
|
|
605
|
+
class ParallelMapHandler:
|
|
606
|
+
def __init__(self, collection: list, workers: int, samples: int = sys.maxsize, msg: str = None, collect = True, name = None):
|
|
607
|
+
self.collection = collection
|
|
608
|
+
self.workers = workers
|
|
609
|
+
self.executor = None
|
|
610
|
+
self.samples = samples
|
|
611
|
+
self.msg = msg
|
|
612
|
+
if msg and msg.startswith('d`'):
|
|
613
|
+
if ConfigHolder().config.is_debug():
|
|
614
|
+
self.msg = msg.replace('d`', '', 1)
|
|
615
|
+
else:
|
|
616
|
+
self.msg = None
|
|
617
|
+
self.collect = collect
|
|
618
|
+
self.name = name
|
|
619
|
+
|
|
620
|
+
self.begin = []
|
|
621
|
+
self.end = []
|
|
622
|
+
self.start_time = None
|
|
623
|
+
|
|
624
|
+
def __enter__(self):
|
|
625
|
+
self.start_time = None
|
|
626
|
+
|
|
627
|
+
self.calc_msgs()
|
|
628
|
+
|
|
629
|
+
if self.workers > 1 and (not self.size() or self.size()) and self.samples == sys.maxsize:
|
|
630
|
+
self.start_time = time.time()
|
|
631
|
+
|
|
632
|
+
self.executor = self.pool()
|
|
633
|
+
# self.executor = ThreadPoolExecutor(max_workers=self.workers)
|
|
634
|
+
self.executor.__enter__()
|
|
635
|
+
|
|
636
|
+
return ParallelService(self)
|
|
637
|
+
|
|
638
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
639
|
+
if not self.name and self.executor:
|
|
640
|
+
self.executor.__exit__(exc_type, exc_val, exc_tb)
|
|
641
|
+
|
|
642
|
+
if self.end:
|
|
643
|
+
log2(f'{" ".join(self.end)} in {elapsed_time(self.start_time)}.')
|
|
644
|
+
|
|
645
|
+
return False
|
|
646
|
+
|
|
647
|
+
def pool(self, thread_name_prefix: str = None):
|
|
648
|
+
if not self.name:
|
|
649
|
+
return ThreadPoolExecutor(max_workers=self.workers)
|
|
650
|
+
|
|
651
|
+
if self.name not in thread_pools:
|
|
652
|
+
thread_pools[self.name] = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix=thread_name_prefix)
|
|
653
|
+
|
|
654
|
+
return thread_pools[self.name]
|
|
655
|
+
|
|
656
|
+
def size(self):
|
|
657
|
+
if not self.collection:
|
|
658
|
+
return 0
|
|
659
|
+
|
|
660
|
+
return len(self.collection)
|
|
661
|
+
|
|
662
|
+
def calc_msgs(self):
|
|
663
|
+
if not self.msg:
|
|
664
|
+
return
|
|
665
|
+
|
|
666
|
+
self.begin = []
|
|
667
|
+
self.end = []
|
|
668
|
+
size = self.size()
|
|
669
|
+
offloaded = False
|
|
670
|
+
serially = False
|
|
671
|
+
sampling = False
|
|
672
|
+
if size == 0:
|
|
673
|
+
offloaded = True
|
|
674
|
+
msg = self.msg.replace('{size}', '1')
|
|
675
|
+
elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
|
|
676
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
677
|
+
elif self.samples < sys.maxsize:
|
|
678
|
+
sampling = True
|
|
679
|
+
samples = self.samples
|
|
680
|
+
if self.samples > size:
|
|
681
|
+
samples = size
|
|
682
|
+
msg = self.msg.replace('{size}', f'{samples}/{size} sample')
|
|
683
|
+
else:
|
|
684
|
+
serially = True
|
|
685
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
686
|
+
|
|
687
|
+
for token in msg.split(' '):
|
|
688
|
+
if '|' in token:
|
|
689
|
+
self.begin.append(token.split('|')[0])
|
|
690
|
+
if not sampling and not serially and not offloaded:
|
|
691
|
+
self.end.append(token.split('|')[1])
|
|
692
|
+
else:
|
|
693
|
+
self.begin.append(token)
|
|
694
|
+
if not sampling and not serially and not offloaded:
|
|
695
|
+
self.end.append(token)
|
|
696
|
+
|
|
697
|
+
if offloaded:
|
|
698
|
+
log2(f'{" ".join(self.begin)} offloaded...')
|
|
699
|
+
elif sampling or serially:
|
|
700
|
+
log2(f'{" ".join(self.begin)} serially...')
|
|
701
|
+
else:
|
|
702
|
+
log2(f'{" ".join(self.begin)} with {self.workers} workers...')
|
|
703
|
+
|
|
704
|
+
# parallelizers: dict[str, ParallelMapHandler] = {}
|
|
705
|
+
# parallelizer_lock = threading.Lock()
|
|
706
|
+
|
|
707
|
+
def parallelize(collection: list, workers: int = 0, samples = sys.maxsize, msg: str = None, collect = True, name = None):
|
|
708
|
+
return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect, name = name)
|
|
709
|
+
# if not name:
|
|
710
|
+
# return ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect)
|
|
711
|
+
|
|
712
|
+
# with parallelizer_lock:
|
|
713
|
+
# if name not in parallelizers:
|
|
714
|
+
# parallelizers[name] = ParallelMapHandler(collection, workers, samples = samples, msg = msg, collect = collect, name = name)
|
|
715
|
+
|
|
716
|
+
# return parallelizers[name]
|
|
717
|
+
|
|
718
|
+
class OffloadService:
|
|
719
|
+
def __init__(self, handler: 'OffloadHandler'):
|
|
720
|
+
self.handler = handler
|
|
721
|
+
|
|
722
|
+
def submit(self, fn: Callable[..., T], /, *args, **kwargs) -> Future[T]:
|
|
723
|
+
executor = self.handler.executor
|
|
724
|
+
|
|
725
|
+
if executor:
|
|
726
|
+
return executor.submit(fn, *args, **kwargs)
|
|
727
|
+
else:
|
|
728
|
+
future = Future()
|
|
729
|
+
|
|
730
|
+
future.set_result(fn(*args, **kwargs))
|
|
731
|
+
|
|
732
|
+
return future
|
|
733
|
+
|
|
734
|
+
class OffloadHandler(ParallelMapHandler):
|
|
735
|
+
def __init__(self, max_workers: int, msg: str = None, name: str = None):
|
|
736
|
+
super().__init__(None, max_workers, msg=msg, collect=False, name=f'offload-{name}')
|
|
737
|
+
|
|
738
|
+
def __enter__(self):
|
|
739
|
+
self.start_time = None
|
|
740
|
+
self.calc_msgs()
|
|
741
|
+
|
|
742
|
+
if self.workers > 1:
|
|
743
|
+
self.start_time = time.time()
|
|
744
|
+
|
|
745
|
+
self.executor = self.pool(thread_name_prefix='offload')
|
|
746
|
+
# self.executor = ThreadPoolExecutor(max_workers=self.workers, thread_name_prefix='offload')
|
|
747
|
+
self.executor.__enter__()
|
|
748
|
+
|
|
749
|
+
return OffloadService(self)
|
|
750
|
+
|
|
751
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
752
|
+
if not self.name and self.executor:
|
|
753
|
+
self.executor.__exit__(exc_type, exc_val, exc_tb)
|
|
754
|
+
|
|
755
|
+
if self.end:
|
|
756
|
+
log2(f'{" ".join(self.end)} in {elapsed_time(self.start_time)}.')
|
|
757
|
+
|
|
758
|
+
return False
|
|
759
|
+
|
|
760
|
+
def calc_msgs(self):
|
|
761
|
+
if not self.msg:
|
|
762
|
+
return
|
|
763
|
+
|
|
764
|
+
self.begin = []
|
|
765
|
+
self.end = []
|
|
766
|
+
size = self.size()
|
|
767
|
+
|
|
768
|
+
offloaded = False
|
|
769
|
+
serially = False
|
|
770
|
+
sampling = False
|
|
771
|
+
if size == 0:
|
|
772
|
+
offloaded = True
|
|
773
|
+
msg = self.msg.replace('{size}', '1')
|
|
774
|
+
elif self.workers > 1 and size > 1 and self.samples == sys.maxsize:
|
|
775
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
776
|
+
elif self.samples < sys.maxsize:
|
|
777
|
+
sampling = True
|
|
778
|
+
samples = self.samples
|
|
779
|
+
if samples > size:
|
|
780
|
+
samples = size
|
|
781
|
+
msg = self.msg.replace('{size}', f'{samples}/{size} sample')
|
|
782
|
+
else:
|
|
783
|
+
serially = True
|
|
784
|
+
msg = self.msg.replace('{size}', f'{size}')
|
|
785
|
+
|
|
786
|
+
for token in msg.split(' '):
|
|
787
|
+
if '|' in token:
|
|
788
|
+
self.begin.append(token.split('|')[0])
|
|
789
|
+
if not sampling and not serially and not offloaded:
|
|
790
|
+
self.end.append(token.split('|')[1])
|
|
791
|
+
else:
|
|
792
|
+
self.begin.append(token)
|
|
793
|
+
if not sampling and not serially and not offloaded:
|
|
794
|
+
self.end.append(token)
|
|
795
|
+
|
|
796
|
+
if offloaded:
|
|
797
|
+
log2(f'{" ".join(self.begin)} offloaded...')
|
|
798
|
+
elif sampling or serially:
|
|
799
|
+
log2(f'{" ".join(self.begin)} serially...')
|
|
800
|
+
else:
|
|
801
|
+
log2(f'{" ".join(self.begin)} with {self.workers} workers...')
|
|
802
|
+
|
|
803
|
+
def offload(max_workers: int = 3, msg: str = None, name: str = None):
|
|
804
|
+
return OffloadHandler(max_workers, msg = msg, name = name)
|
|
805
|
+
|
|
806
|
+
def kaqing_log_file_name(suffix = 'log'):
|
|
807
|
+
return f"{log_dir()}/{datetime.now().strftime('%d%H%M%S')}.{suffix}"
|
|
808
|
+
|
|
809
|
+
def log_dir():
|
|
810
|
+
return creating_dir(ConfigHolder().config.get('log-dir', '/tmp/qing-db/q/logs'))
|
|
811
|
+
|
|
812
|
+
class LogFileHandler:
|
|
813
|
+
def __init__(self, suffix = 'log', condition=True):
|
|
814
|
+
self.suffix = suffix
|
|
815
|
+
self.condition = condition
|
|
816
|
+
|
|
817
|
+
def __enter__(self):
|
|
818
|
+
self.f = None
|
|
819
|
+
if self.condition:
|
|
820
|
+
self.f = open(kaqing_log_file_name(suffix=self.suffix), 'w')
|
|
821
|
+
self.f.__enter__()
|
|
822
|
+
|
|
823
|
+
return self.f
|
|
824
|
+
|
|
825
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
826
|
+
if self.f:
|
|
827
|
+
self.f.__exit__(exc_type, exc_val, exc_tb)
|
|
828
|
+
|
|
829
|
+
if ConfigHolder().append_command_history:
|
|
830
|
+
ConfigHolder().append_command_history(f':cat {self.f.name}')
|
|
831
|
+
|
|
832
|
+
return False
|
|
833
|
+
|
|
834
|
+
def kaqing_log_file(suffix = 'log', condition=True):
|
|
835
|
+
return LogFileHandler(suffix = suffix, condition=condition)
|
|
836
|
+
|
|
837
|
+
class CommandLog:
|
|
838
|
+
log_file = None
|
|
839
|
+
|
|
840
|
+
def log(line: str, config: str = 'off'):
|
|
841
|
+
if config == 'file':
|
|
842
|
+
if not CommandLog.log_file:
|
|
843
|
+
try:
|
|
844
|
+
CommandLog.log_file = open(kaqing_log_file_name(suffix='cmd.log'), 'w')
|
|
845
|
+
except:
|
|
846
|
+
pass
|
|
847
|
+
|
|
848
|
+
try:
|
|
849
|
+
CommandLog.log_file.write(line + '\n')
|
|
850
|
+
except:
|
|
851
|
+
pass
|
|
852
|
+
elif config == 'on':
|
|
853
|
+
log2(line)
|
|
854
|
+
|
|
855
|
+
def close_log_file():
|
|
856
|
+
if CommandLog.log_file:
|
|
857
|
+
try:
|
|
858
|
+
CommandLog.log_file.close()
|
|
859
|
+
except:
|
|
860
|
+
pass
|
|
861
|
+
|
|
862
|
+
if ConfigHolder().append_command_history:
|
|
863
|
+
ConfigHolder().append_command_history(f':cat {CommandLog.log_file.name}')
|
|
864
|
+
|
|
865
|
+
CommandLog.log_file = None
|
|
866
|
+
|
|
867
|
+
class ExecResult(ABC):
|
|
868
|
+
def exit_code(self) -> int:
|
|
869
|
+
pass
|
|
870
|
+
|
|
871
|
+
def cat_log_file_cmd(self) -> str:
|
|
872
|
+
pass
|
|
873
|
+
|
|
874
|
+
_dirs_created = set()
|
|
875
|
+
|
|
876
|
+
def creating_dir(dir):
|
|
877
|
+
if dir not in _dirs_created:
|
|
878
|
+
_dirs_created.add(dir)
|
|
879
|
+
if not os.path.exists(dir):
|
|
880
|
+
os.makedirs(dir, exist_ok=True)
|
|
881
|
+
|
|
882
|
+
return dir
|