kaqing 2.0.200__py3-none-any.whl → 2.0.211__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaqing might be problematic. Click here for more details.

Files changed (61) hide show
  1. adam/batch.py +1 -1
  2. adam/commands/app/utils_app.py +1 -1
  3. adam/commands/cql/completions_c.py +1 -1
  4. adam/commands/cql/utils_cql.py +14 -13
  5. adam/commands/devices/device.py +1 -1
  6. adam/commands/download_cassandra_log.py +2 -2
  7. adam/commands/export/export_databases.py +13 -8
  8. adam/commands/export/export_sessions.py +12 -11
  9. adam/commands/export/exporter.py +140 -53
  10. adam/commands/export/import_session.py +0 -4
  11. adam/commands/export/importer.py +11 -11
  12. adam/commands/export/importer_athena.py +15 -6
  13. adam/commands/export/importer_sqlite.py +19 -8
  14. adam/commands/export/utils_export.py +37 -15
  15. adam/commands/postgres/postgres_databases.py +1 -1
  16. adam/commands/postgres/postgres_ls.py +1 -1
  17. adam/commands/postgres/utils_postgres.py +2 -1
  18. adam/commands/show/show_cassandra_status.py +3 -10
  19. adam/commands/show/show_processes.py +1 -1
  20. adam/commands/show/show_storage.py +2 -1
  21. adam/embedded_params.py +1 -1
  22. adam/repl_commands.py +13 -12
  23. adam/sso/cred_cache.py +2 -5
  24. adam/utils.py +122 -71
  25. adam/utils_k8s/app_clusters.py +10 -3
  26. adam/utils_k8s/app_pods.py +9 -3
  27. adam/utils_k8s/cassandra_clusters.py +4 -4
  28. adam/utils_k8s/cassandra_nodes.py +13 -7
  29. adam/{pod_exec_result.py → utils_k8s/pod_exec_result.py} +8 -2
  30. adam/utils_k8s/pods.py +34 -29
  31. adam/utils_local.py +78 -2
  32. adam/utils_repl/repl_completer.py +6 -2
  33. adam/utils_sqlite.py +3 -8
  34. adam/version.py +1 -1
  35. {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/METADATA +1 -1
  36. {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/RECORD +39 -61
  37. adam/commands/alter_tables.py +0 -66
  38. adam/commands/cassandra/download_cassandra_log.py +0 -45
  39. adam/commands/cassandra/nodetool.py +0 -64
  40. adam/commands/cassandra/nodetool_commands.py +0 -120
  41. adam/commands/cassandra/restart_cluster.py +0 -47
  42. adam/commands/cassandra/restart_node.py +0 -51
  43. adam/commands/cassandra/restart_nodes.py +0 -47
  44. adam/commands/cassandra/rollout.py +0 -88
  45. adam/commands/cat.py +0 -36
  46. adam/commands/cd.py +0 -41
  47. adam/commands/download_file.py +0 -47
  48. adam/commands/find_files.py +0 -51
  49. adam/commands/find_processes.py +0 -76
  50. adam/commands/head.py +0 -36
  51. adam/commands/ls.py +0 -41
  52. adam/commands/os/cat.py +0 -36
  53. adam/commands/os/download_file.py +0 -47
  54. adam/commands/os/find_files.py +0 -51
  55. adam/commands/os/find_processes.py +0 -76
  56. adam/commands/os/head.py +0 -36
  57. adam/commands/os/shell.py +0 -41
  58. adam/commands/shell.py +0 -41
  59. {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/WHEEL +0 -0
  60. {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/entry_points.txt +0 -0
  61. {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/top_level.txt +0 -0
adam/batch.py CHANGED
@@ -12,7 +12,7 @@ from adam.commands.deploy.undeploy import Undeploy, UndeployCommandHelper
12
12
  from adam.commands.issues import Issues
13
13
  from adam.commands.login import Login
14
14
  from adam.commands.download_cassandra_log import DownloadCassandraLog
15
- from adam.commands.ls import Ls
15
+ from adam.commands.fs.ls import Ls
16
16
  from adam.commands.medusa.medusa import Medusa
17
17
  from adam.commands.nodetool import NodeTool, NodeToolCommandHelper
18
18
  from adam.commands.postgres.postgres import Postgres, PostgresCommandHelper
@@ -3,7 +3,7 @@ from typing import Union
3
3
 
4
4
  from adam.app_session import AppSession
5
5
  from adam.apps import Apps
6
- from adam.pod_exec_result import PodExecResult
6
+ from adam.utils_k8s.pod_exec_result import PodExecResult
7
7
  from adam.repl_state import ReplState
8
8
  from adam.utils import log2
9
9
  from adam.utils_k8s.app_clusters import AppClusters
@@ -18,7 +18,7 @@ def completions_c(state: ReplState) -> dict[str, any]:
18
18
  'table-props': lambda x: {
19
19
  'GC_GRACE_SECONDS': ps
20
20
  },
21
- 'table-props-value': lambda x: {'GC_GRACE_SECONDS': ps}[x],
21
+ 'table-props-value': lambda x: {None: None, 'GC_GRACE_SECONDS': ps}[x],
22
22
  'export-database-types': lambda x: ['athena', 'sqlite', 'csv'],
23
23
  'export-databases': lambda x: ExportDatabases.database_names(),
24
24
  'export-sessions': lambda x: ExportSessions.export_session_names(state.sts, state.pod, state.namespace),
@@ -7,9 +7,9 @@ from adam.commands.commands_utils import show_table
7
7
  from adam.utils_k8s.cassandra_clusters import CassandraClusters
8
8
  from adam.utils_k8s.cassandra_nodes import CassandraNodes
9
9
  from adam.utils_k8s.secrets import Secrets
10
- from adam.pod_exec_result import PodExecResult
10
+ from adam.utils_k8s.pod_exec_result import PodExecResult
11
11
  from adam.repl_state import ReplState
12
- from adam.utils import log2, log_timing, wait_log
12
+ from adam.utils import ing, log2, log_timing, wait_log
13
13
  from adam.utils_k8s.statefulsets import StatefulSets
14
14
 
15
15
  def cd_dirs(state: ReplState) -> list[str]:
@@ -55,7 +55,7 @@ def table_spec(state: ReplState, table: str, on_any=False) -> 'TableSpec':
55
55
 
56
56
  return parse_cql_desc_table(r.stdout if state.pod else r[0].stdout)
57
57
 
58
- def run_cql(state: ReplState, cql: str, opts: list = [], show_out = False, show_query = False, use_single_quotes = False, on_any = False, backgrounded=False, log_file=None, via_sh=True) -> list[PodExecResult]:
58
+ def run_cql(state: ReplState, cql: str, opts: list = [], show_out = False, show_query = False, use_single_quotes = False, on_any = False, backgrounded=False, log_file=None, history=True) -> list[PodExecResult]:
59
59
  if show_query:
60
60
  log2(cql)
61
61
 
@@ -69,7 +69,7 @@ def run_cql(state: ReplState, cql: str, opts: list = [], show_out = False, show_
69
69
 
70
70
  with log_timing(cql):
71
71
  with cassandra(state) as pods:
72
- return pods.exec(command, action='cql', show_out=show_out, on_any=on_any, backgrounded=backgrounded, log_file=log_file, via_sh=via_sh)
72
+ return pods.exec(command, action='cql', show_out=show_out, on_any=on_any, backgrounded=backgrounded, log_file=log_file, history=history)
73
73
 
74
74
  def parse_cql_desc_tables(out: str):
75
75
  # Keyspace data_endpoint_auth
@@ -227,16 +227,16 @@ class CassandraPodService:
227
227
  def __init__(self, handler: 'CassandraExecHandler'):
228
228
  self.handler = handler
229
229
 
230
- def exec(self, command: str, action='bash', show_out = True, on_any = False, throw_err = False, shell = '/bin/sh', backgrounded = False, log_file = None, via_sh = True) -> Union[PodExecResult, list[PodExecResult]]:
230
+ def exec(self, command: str, action='bash', show_out = True, on_any = False, throw_err = False, shell = '/bin/sh', backgrounded = False, log_file = None, history=True) -> Union[PodExecResult, list[PodExecResult]]:
231
231
  state = self.handler.state
232
232
  pod = self.handler.pod
233
233
 
234
234
  if pod:
235
235
  return CassandraNodes.exec(pod, state.namespace, command,
236
- show_out=show_out, throw_err=throw_err, shell=shell, backgrounded=backgrounded, log_file=log_file, via_sh=via_sh)
236
+ show_out=show_out, throw_err=throw_err, shell=shell, backgrounded=backgrounded, log_file=log_file, history=history)
237
237
  elif state.sts:
238
238
  return CassandraClusters.exec(state.sts, state.namespace, command, action=action,
239
- show_out=show_out, on_any=on_any, shell=shell, backgrounded=backgrounded, log_file=log_file, via_sh=via_sh)
239
+ show_out=show_out, on_any=on_any, shell=shell, backgrounded=backgrounded, log_file=log_file, history=history)
240
240
 
241
241
  return []
242
242
 
@@ -267,14 +267,15 @@ class CassandraPodService:
267
267
 
268
268
  return run_cql(state, query, opts=opts, show_out=show_out, show_query=show_query, use_single_quotes=use_single_quotes, on_any=on_any, backgrounded=backgrounded, log_file=log_file)
269
269
 
270
- def display_table(self, cols: str, header: str, show_out = True, backgrounded = False):
270
+ def display_table(self, cols: str, header: str, show_out = True, backgrounded = False, msg: str = None):
271
271
  state = self.handler.state
272
272
 
273
- if state.pod:
274
- return show_table(state, [state.pod], cols, header, show_out=show_out, backgrounded = backgrounded)
275
- elif state.sts:
276
- pod_names = [pod.metadata.name for pod in StatefulSets.pods(state.sts, state.namespace)]
277
- return show_table(state, pod_names, cols, header, show_out=show_out, backgrounded = backgrounded)
273
+ with ing(msg=msg, condition=backgrounded and msg):
274
+ if state.pod:
275
+ return show_table(state, [state.pod], cols, header, show_out=show_out, backgrounded = backgrounded)
276
+ elif state.sts:
277
+ pod_names = [pod.metadata.name for pod in StatefulSets.pods(state.sts, state.namespace)]
278
+ return show_table(state, pod_names, cols, header, show_out=show_out, backgrounded = backgrounded)
278
279
 
279
280
  def nodetool(self, args: str, status = False, show_out = True, backgrounded = False) -> Union[PodExecResult, list[PodExecResult]]:
280
281
  state = self.handler.state
@@ -2,7 +2,7 @@ from abc import abstractmethod
2
2
 
3
3
  from adam.commands.command import Command
4
4
  from adam.config import Config
5
- from adam.pod_exec_result import PodExecResult
5
+ from adam.utils_k8s.pod_exec_result import PodExecResult
6
6
  from adam.repl_state import BashSession, ReplState
7
7
  from adam.utils import log2
8
8
  from adam.utils_k8s.pods import Pods
@@ -1,7 +1,7 @@
1
1
  from adam.commands.command import Command
2
2
  from adam.commands.devices.devices import Devices
3
3
  from adam.config import Config
4
- from adam.pod_exec_result import PodExecResult
4
+ from adam.utils_k8s.pod_exec_result import PodExecResult
5
5
  from adam.utils import log2
6
6
  from adam.utils_k8s.cassandra_nodes import CassandraNodes
7
7
  from adam.repl_state import ReplState, RequiredState
@@ -31,7 +31,7 @@ class DownloadCassandraLog(Command):
31
31
 
32
32
  with self.validate(args, state) as (args, state):
33
33
  path = Config().get('logs.path', '/c3/cassandra/logs/system.log')
34
- r: PodExecResult = CassandraNodes.exec(state.pod, state.namespace, f'cat {path}', backgrounded=True, no_history=True)
34
+ r: PodExecResult = CassandraNodes.exec(state.pod, state.namespace, f'cat {path}', backgrounded=True, history=False)
35
35
 
36
36
  to_file = Pods.download_file(state.pod, 'cassandra', state.namespace, path)
37
37
  log2(f'Downloaded to {to_file}.')
@@ -1,10 +1,12 @@
1
1
  from collections.abc import Callable
2
2
  from datetime import datetime
3
+ from functools import partial
3
4
  import os
4
5
  import boto3
5
6
 
6
7
  from adam.commands.export.export_sessions import ExportSessions
7
8
  from adam.commands.export.importer import Importer
9
+ from adam.commands.export.utils_export import export_log_dir
8
10
  from adam.config import Config
9
11
  from adam.repl_session import ReplSession
10
12
  from adam.repl_state import ReplState
@@ -31,7 +33,7 @@ class ExportDatabases:
31
33
 
32
34
  if Config().get('repl.history.push-cat-log-file', True):
33
35
  if log_file and ReplSession().prompt_session:
34
- ReplSession().prompt_session.history.append_string(f':sh cat {log_file}')
36
+ ReplSession().prompt_session.history.append_string(f':cat {log_file}')
35
37
 
36
38
  return cnt
37
39
 
@@ -192,20 +194,23 @@ class ExportDatabaseService:
192
194
  def __init__(self, handler: 'ExportDatabaseHandler'):
193
195
  self.handler = handler
194
196
 
195
- def sql(self, query: str, database: str = None, backgrounded = False):
197
+ def sql(self, query: str, database: str = None, backgrounded = False, export_log: str = None):
196
198
  if not database:
197
199
  database = self.handler.state.export_session
198
200
 
199
- def output(out: str):
200
- log_prefix = Config().get('export.log-prefix', '/tmp/qing')
201
- log_file = f'{log_prefix}-{datetime.now().strftime("%d%H%M%S")}-export.log'
201
+ def output(export_log: str, out: str):
202
+ # serving for 1. export log or 2. job log for the sampling select 10 rows afterward
203
+ flag = 'at'
204
+ if not export_log:
205
+ export_log = f'{export_log_dir()}/{datetime.now().strftime("%d%H%M%S")}-export.log'
206
+ flag = 'w'
202
207
 
203
- with open(log_file, 'w') as f:
208
+ with open(export_log, flag) as f:
204
209
  f.write(out)
205
210
 
206
- return log_file
211
+ return export_log
207
212
 
208
- ExportDatabases.run_query(query, database, output = output if backgrounded else None, show_query = not backgrounded)
213
+ ExportDatabases.run_query(query, database, output = partial(output, export_log) if backgrounded else None, show_query = not backgrounded)
209
214
 
210
215
  def drop(self, database: str):
211
216
  state = self.handler.state
@@ -3,14 +3,14 @@ import os
3
3
  import re
4
4
 
5
5
  from adam.commands.export.importer import Importer
6
- from adam.commands.export.utils_export import ExportTableStatus, csv_dir, find_files
6
+ from adam.commands.export.utils_export import ExportTableStatus, csv_dir, export_log_dir, find_files, os_system_exec
7
7
  from adam.config import Config
8
8
  from adam.repl_state import ReplState
9
9
  from adam.utils import log2, tabulize, log, parallelize
10
10
  from adam.utils_k8s.cassandra_nodes import CassandraNodes
11
- from adam.utils_k8s.pods import Pods, log_prefix
11
+ from adam.utils_k8s.pods import Pods
12
12
  from adam.utils_k8s.statefulsets import StatefulSets
13
- from adam.utils_local import local_tmp_dir
13
+ from adam.utils_local import local_downloads_dir
14
14
 
15
15
  class ExportSessions:
16
16
  def clear_export_session_cache():
@@ -36,13 +36,13 @@ class ExportSessions:
36
36
 
37
37
  prefix = Importer.prefix_from_importer(importer)
38
38
 
39
- log_files: list[str] = find_files(pod, namespace, f'{log_prefix()}-{prefix}*_*.log*')
39
+ log_files: list[str] = find_files(pod, namespace, f'{export_log_dir()}/{prefix}*_*.log*')
40
40
 
41
41
  if not log_files:
42
42
  return {}
43
43
 
44
44
  for log_file in log_files[:limit]:
45
- m = re.match(f'{log_prefix()}-([ces].*?)_.*\.log?(.*)', log_file)
45
+ m = re.match(f'{export_log_dir()}/([ces].*?)_.*\.log?(.*)', log_file)
46
46
  if m:
47
47
  s = m.group(1)
48
48
  state = m.group(2) # '', '.pending_import', '.done'
@@ -66,7 +66,8 @@ class ExportSessions:
66
66
  pod = StatefulSets.pod_names(sts, namespace)[0]
67
67
 
68
68
  CassandraNodes.exec(pod, namespace, f'rm -rf {csv_dir()}/*', show_out=Config().is_debug(), shell='bash')
69
- CassandraNodes.exec(pod, namespace, f'rm -rf {log_prefix()}-*.log*', show_out=Config().is_debug(), shell='bash')
69
+ cmd = f'rm -rf {export_log_dir()}/*.log*'
70
+ os_system_exec(cmd, show_out=Config().is_debug())
70
71
 
71
72
  return True
72
73
 
@@ -102,17 +103,18 @@ class ExportSessions:
102
103
  csv_cnt = 0
103
104
  log_cnt = 0
104
105
 
105
- log_files: list[str] = find_files(pod, namespace, f'{log_prefix()}-{session}_*.log*')
106
+ log_files: list[str] = find_files(pod, namespace, f'{export_log_dir()}/{session}_*.log*')
106
107
 
107
108
  for log_file in log_files:
108
- m = re.match(f'{log_prefix()}-{session}_(.*?)\.(.*?)\.log.*', log_file)
109
+ m = re.match(f'{export_log_dir()}/{session}_(.*?)\.(.*?)\.log.*', log_file)
109
110
  if m:
110
111
  table = m.group(2)
111
112
 
112
113
  CassandraNodes.exec(pod, namespace, f'rm -rf {csv_dir()}/{session}_{table}', show_out=not multi_tables, shell='bash')
113
114
  csv_cnt += 1
114
115
 
115
- CassandraNodes.exec(pod, namespace, f'rm -rf {log_file}', show_out=not multi_tables, shell='bash')
116
+ cmd = f'rm -rf {log_file}'
117
+ os_system_exec(cmd, show_out=not multi_tables)
116
118
  log_cnt += 1
117
119
 
118
120
  return csv_cnt, log_cnt
@@ -142,8 +144,7 @@ class ExportSessions:
142
144
  def download_csv(table):
143
145
  from_path: str = table.csv_file
144
146
 
145
- to_path = from_path.replace(csv_dir(), local_tmp_dir())
146
- os.makedirs(os.path.dirname(to_path), exist_ok=True)
147
+ to_path = from_path.replace(csv_dir(), local_downloads_dir())
147
148
  Pods.download_file(pod, 'cassandra', namespace, from_path, to_path)
148
149
 
149
150
  log2(f'[{session}] Downloaded to {to_path}.')
@@ -1,5 +1,7 @@
1
1
  from datetime import datetime
2
+ import os
2
3
  import time
4
+ import traceback
3
5
 
4
6
  from adam.commands.command import InvalidArgumentsException
5
7
  from adam.commands.cql.utils_cql import cassandra_table_names, run_cql, table_spec
@@ -8,13 +10,12 @@ from adam.commands.export.export_sessions import ExportSessions
8
10
  from adam.commands.export.importer import Importer
9
11
  from adam.commands.export.importer_athena import AthenaImporter
10
12
  from adam.commands.export.importer_sqlite import SqliteImporter
11
- from adam.commands.export.utils_export import ExportSpec, ExportTableStatus, ExportTableSpec, ImportSpec, csv_dir, find_files, state_with_pod
13
+ from adam.commands.export.utils_export import ExportSpec, ExportTableStatus, ExportTableSpec, ImportSpec, csv_dir, export_log_dir, find_files, os_system_exec, state_with_pod
12
14
  from adam.config import Config
13
- from adam.pod_exec_result import PodExecResult
15
+ from adam.repl_session import ReplSession
14
16
  from adam.repl_state import ReplState
15
- from adam.utils import debug, log, parallelize, log2, ing, log_exc
17
+ from adam.utils import debug, kaqing_log_file_name, log, offload, parallelize, log2, ing, log_exc
16
18
  from adam.utils_k8s.cassandra_nodes import CassandraNodes
17
- from adam.utils_k8s.pods import log_prefix
18
19
 
19
20
  class Exporter:
20
21
  def export_tables(args: list[str], state: ReplState, export_only: bool = False, max_workers = 0) -> tuple[list[str], ExportSpec]:
@@ -133,11 +134,9 @@ class Exporter:
133
134
 
134
135
  prefix = Importer.prefix_from_importer(spec.importer)
135
136
  if spec.session:
136
- spec.session = f'{prefix}{spec.session[1:]}'
137
+ state.export_session = f'{prefix}{spec.session[1:]}'
137
138
  else:
138
- spec.session = f'{prefix}{datetime.now().strftime("%Y%m%d%H%M%S")[3:]}'
139
-
140
- state.export_session = spec.session
139
+ state.export_session = f'{prefix}{datetime.now().strftime("%Y%m%d%H%M%S")[3:]}'
141
140
 
142
141
  return spec
143
142
 
@@ -154,89 +153,160 @@ class Exporter:
154
153
  if export_state == 'init':
155
154
  CassandraNodes.exec(state.pod, state.namespace, f'rm -rf {csv_dir()}/{spec.session}_*', show_out=Config().is_debug(), shell='bash')
156
155
 
157
- action = f'[{spec.session}] Exporting|Exported'
156
+ job_log = kaqing_log_file_name()
157
+
158
+ action = f'[{spec.session}] Triggering export of'
158
159
  if export_state == 'init':
159
160
  action = f'[{spec.session}] Preparing|Prepared'
160
161
  elif export_state == 'import':
161
162
  action = f'[{spec.session}] Importing|Imported'
162
163
 
163
- with parallelize(spec.tables, max_workers, msg=action + ' {size} Cassandra tables') as exec:
164
- return exec.map(lambda table: Exporter.export_table(table, state, spec.session, spec.importer, export_only, len(spec.tables) > 1, consistency=spec.consistency, export_state=export_state)), spec
165
-
166
- def export_table(spec: ExportTableSpec, state: ReplState, session: str, importer: str, export_only = False, multi_tables = True, consistency: str = None, export_state=None):
164
+ msg = action + ' {size} Cassandra tables'
165
+
166
+ if export_state != 'init':
167
+ log2(f'[{spec.session}] Logging to {job_log}...')
168
+ ReplSession().append_history(f':cat {job_log}')
169
+
170
+ pod = state.pod
171
+ with parallelize(spec.tables, max_workers, msg=msg, collect=export_state == 'init', name='exporter') as exec:
172
+ return exec.map(lambda table: Exporter.export_table(table,
173
+ state.with_pod(pod),
174
+ spec.session,
175
+ spec.importer,
176
+ export_only,
177
+ len(spec.tables) > 1,
178
+ consistency=spec.consistency,
179
+ export_state=export_state,
180
+ job_log=None if export_state == 'init' else job_log)), spec
181
+
182
+ def export_table(spec: ExportTableSpec,
183
+ state: ReplState,
184
+ session: str,
185
+ importer: str,
186
+ export_only = False,
187
+ multi_tables = True,
188
+ consistency: str = None,
189
+ export_state=None,
190
+ job_log: str = None):
167
191
  s: str = None
168
192
 
169
193
  table, target_table, columns = Exporter.resove_table_n_columns(spec, state, include_ks_in_target=False, importer=importer)
170
194
 
171
- log_file = f'{log_prefix()}-{session}_{spec.keyspace}.{target_table}.log'
195
+ log_file = f'{export_log_dir()}/{session}_{spec.keyspace}.{target_table}.log'
172
196
  create_db = not state.export_session
173
197
 
174
198
  if export_state == 'init':
175
199
  Exporter.create_table_log(spec, state, session, table, target_table)
176
200
  return 'table_log_created'
177
201
  else:
178
- if export_state == 'pending_export':
179
- Exporter.export_to_csv(spec, state, session, table, target_table, columns, multi_tables=multi_tables, consistency=consistency)
202
+ try:
203
+ if export_state == 'pending_export':
204
+ Exporter.export_to_csv(spec, state, session, table, target_table, columns, multi_tables=multi_tables, consistency=consistency, job_log=job_log)
180
205
 
181
- log_files: list[str] = find_files(state.pod, state.namespace, f'{log_file}*')
182
- if not log_files:
183
- return s
206
+ log_files: list[str] = find_files(state.pod, state.namespace, f'{log_file}*')
207
+ if not log_files:
208
+ return s
184
209
 
185
- log_file = log_files[0]
210
+ log_file = log_files[0]
186
211
 
187
- status: ExportTableStatus = ExportTableStatus.from_log_file(state.pod, state.namespace, session, log_file)
188
- while status.status != 'done':
189
- if status.status == 'export_in_pregress':
190
- debug('Exporting to CSV is still in progess, sleeping for 1 sec...')
191
- time.sleep(1)
192
- elif status.status == 'exported':
193
- log_file = Exporter.rename_to_pending_import(spec, state, session, target_table)
194
- if importer == 'csv' or export_only:
195
- return 'pending_import'
196
- elif status.status == 'pending_import':
197
- log_file, session = Exporter.import_from_csv(spec, state, session, importer, table, target_table, columns, multi_tables=multi_tables, create_db=create_db)
212
+ status: ExportTableStatus = ExportTableStatus.from_log_file(state.pod, state.namespace, session, log_file)
198
213
 
199
- status = ExportTableStatus.from_log_file(state.pod, state.namespace, session, log_file)
214
+ with offload(name='exporter') as exec:
215
+ ctx = ExportTableContext(spec, state, session, importer, export_only, multi_tables, table, target_table, columns, create_db, log_file, status, job_log)
216
+ exec.submit(lambda: Exporter.export_loop(ctx))
217
+ # Exporter.export_loop(ExportTableContext(spec, state, session, importer, export_only, multi_tables, table, target_table, columns, create_db, log_file, status))
218
+ except:
219
+ traceback.print_exc()
200
220
 
201
221
  return status.status
202
222
 
223
+ def export_loop(ctx: 'ExportTableContext'):
224
+ try:
225
+ while ctx.status.status != 'done':
226
+ if ctx.status.status == 'export_in_pregress':
227
+ debug('Exporting to CSV is still in progess, sleeping for 1 sec...')
228
+ time.sleep(1)
229
+ elif ctx.status.status == 'exported':
230
+ ctx.log_file = Exporter.rename_to_pending_import(ctx.spec, ctx.state, ctx.session, ctx.target_table)
231
+ ExportSessions.clear_export_session_cache()
232
+ if ctx.importer == 'csv' or ctx.export_only:
233
+ return 'pending_import'
234
+ elif ctx.status.status == 'pending_import':
235
+ ctx.log_file, ctx.session = Exporter.import_from_csv(ctx.spec,
236
+ ctx.state,
237
+ ctx.session,
238
+ ctx.importer,
239
+ ctx.table,
240
+ ctx.target_table,
241
+ ctx.columns,
242
+ multi_tables=ctx.multi_tables,
243
+ create_db=ctx.create_db,
244
+ job_log=ctx.f)
245
+
246
+ ctx.status = ExportTableStatus.from_log_file(ctx.state.pod, ctx.state.namespace, ctx.session, ctx.log_file)
247
+
248
+ return ctx.status.status
249
+ except:
250
+ traceback.print_exc()
251
+
203
252
  def create_table_log(spec: ExportTableSpec, state: ReplState, session: str, table: str, target_table: str):
204
- log_file = f'{log_prefix()}-{session}_{spec.keyspace}.{target_table}.log'
253
+ log_file = f'{export_log_dir()}/{session}_{spec.keyspace}.{target_table}.log'
254
+ dir = os.path.dirname(log_file)
205
255
 
206
- CassandraNodes.exec(state.pod, state.namespace, f'rm -f {log_file}* && touch {log_file}', show_out=Config().is_debug(), shell='bash')
256
+ cmd = f'rm -f {log_file}* && mkdir -p {dir} && touch {log_file}'
257
+ os_system_exec(cmd, show_out=Config().is_debug())
207
258
 
208
259
  return table
209
260
 
210
- def export_to_csv(spec: ExportTableSpec, state: ReplState, session: str, table: str, target_table: str, columns: str, multi_tables = True, consistency: str = None):
261
+ def export_to_csv(spec: ExportTableSpec,
262
+ state: ReplState,
263
+ session: str,
264
+ table: str,
265
+ target_table: str,
266
+ columns: str,
267
+ multi_tables = True,
268
+ consistency: str = None,
269
+ job_log: str = None):
211
270
  db = f'{session}_{target_table}'
212
271
 
213
272
  CassandraNodes.exec(state.pod, state.namespace, f'mkdir -p {csv_dir()}/{db}', show_out=Config().is_debug(), shell='bash')
214
273
  csv_file = f'{csv_dir()}/{db}/{table}.csv'
215
- log_file = f'{log_prefix()}-{session}_{spec.keyspace}.{target_table}.log'
274
+ table_log_file = f'{export_log_dir()}/{session}_{spec.keyspace}.{target_table}.log'
216
275
 
217
276
  suppress_ing_log = Config().is_debug() or multi_tables
218
277
  queries = []
219
278
  if consistency:
220
279
  queries.append(f'CONSISTENCY {consistency}')
221
280
  queries.append(f"COPY {spec.keyspace}.{table}({columns}) TO '{csv_file}' WITH HEADER = TRUE")
222
- r: PodExecResult = ing(
223
- f'[{session}] Dumping table {spec.keyspace}.{table}{f" with consistency {consistency}" if consistency else ""}',
224
- lambda: run_cql(state, ';'.join(queries), show_out=Config().is_debug(), backgrounded=True, log_file=log_file, via_sh=False),
225
- suppress_log=suppress_ing_log)
226
281
 
227
- return log_file
282
+ with ing(f'[{session}] Triggering dump of table {spec.keyspace}.{table}{f" with consistency {consistency}" if consistency else ""}',
283
+ suppress_log=suppress_ing_log,
284
+ job_log = job_log):
285
+ run_cql(state, ';'.join(queries), show_out=Config().is_debug(), backgrounded=True, log_file=table_log_file, history=False)
286
+
287
+ return table_log_file
228
288
 
229
289
  def rename_to_pending_import(spec: ExportTableSpec, state: ReplState, session: str, target_table: str):
230
- log_file = f'{log_prefix()}-{session}_{spec.keyspace}.{target_table}.log'
290
+ log_file = f'{export_log_dir()}/{session}_{spec.keyspace}.{target_table}.log'
231
291
  to = f'{log_file}.pending_import'
232
292
 
233
- CassandraNodes.exec(state.pod, state.namespace, f'mv {log_file} {to}', show_out=Config().is_debug(), shell='bash')
293
+ cmd =f'mv {log_file} {to}'
294
+ os_system_exec(cmd, show_out=Config().is_debug())
234
295
 
235
296
  return to
236
297
 
237
- def import_from_csv(spec: ExportTableSpec, state: ReplState, session: str, importer: str, table: str, target_table: str, columns: str, multi_tables = True, create_db = False):
298
+ def import_from_csv(spec: ExportTableSpec,
299
+ state: ReplState,
300
+ session: str,
301
+ importer: str,
302
+ table: str,
303
+ target_table: str,
304
+ columns: str,
305
+ multi_tables = True,
306
+ create_db = False,
307
+ job_log: str = None):
238
308
  im = AthenaImporter() if importer == 'athena' else SqliteImporter()
239
- return im.import_from_csv(state, session if session else state.export_session, spec.keyspace, table, target_table, columns, multi_tables, create_db)
309
+ return im.import_from_csv(state, session if session else state.export_session, spec.keyspace, table, target_table, columns, multi_tables, create_db, job_log=job_log)
240
310
 
241
311
  def resove_table_n_columns(spec: ExportTableSpec, state: ReplState, include_ks_in_target = False, importer = 'sqlite'):
242
312
  table = spec.table
@@ -262,6 +332,22 @@ class Exporter:
262
332
 
263
333
  return table, target_table, columns
264
334
 
335
+ class ExportTableContext:
336
+ def __init__(self, spec: ExportTableSpec, state: ReplState, session: str, importer: str, export_only = False, multi_tables = True, table: str = None, target_table: str = None, columns: str = None, create_db = False, log_file: str = None, status: ExportTableStatus = None, f: str = None):
337
+ self.spec = spec
338
+ self.state = state
339
+ self.session = session
340
+ self.importer = importer
341
+ self.export_only = export_only
342
+ self.multi_tables = multi_tables
343
+ self.table = table
344
+ self.target_table = target_table
345
+ self.columns = columns
346
+ self.create_db = create_db
347
+ self.log_file = log_file
348
+ self.status = status
349
+ self.f = f
350
+
265
351
  class ExportService:
266
352
  def __init__(self, handler: 'ExporterHandler'):
267
353
  self.handler = handler
@@ -279,16 +365,17 @@ class ExportService:
279
365
 
280
366
  ExportSessions.clear_export_session_cache()
281
367
 
282
- if spec.importer == 'csv' or export_only:
283
- ExportSessions.show_session(state.sts, state.pod, state.namespace, spec.session)
284
- else:
285
- log()
286
- with export_db(state) as dbs:
287
- dbs.show_database()
368
+ # if spec.importer == 'csv' or export_only:
369
+ # ExportSessions.show_session(state.sts, state.pod, state.namespace, spec.session)
370
+ # else:
371
+ # log()
372
+ # with export_db(state) as dbs:
373
+ # dbs.show_database()
288
374
  finally:
375
+ pass
289
376
  # if exporting to csv, do not bind the new session id to repl state
290
- if spec and spec.importer == 'csv':
291
- state.export_session = export_session
377
+ # if spec and spec.importer == 'csv':
378
+ # state.export_session = export_session
292
379
 
293
380
  return state
294
381
 
@@ -34,10 +34,6 @@ class ImportSession(Command):
34
34
  return exporter.import_session(spec)
35
35
 
36
36
  def completion(self, state: ReplState):
37
- # warm up cache
38
- # ExportSessions.export_session_names(state.sts, state.pod, state.namespace)
39
- # ExportSessions.export_session_names(state.sts, state.pod, state.namespace, export_state='pending_import')
40
-
41
37
  return {}
42
38
 
43
39
  def help(self, _: ReplState):
@@ -1,11 +1,9 @@
1
1
  from abc import abstractmethod
2
2
 
3
- from adam.commands.export.utils_export import csv_dir
3
+ from adam.commands.export.utils_export import csv_dir, export_log_dir, os_system_exec
4
4
  from adam.config import Config
5
5
  from adam.repl_state import ReplState
6
6
  from adam.utils import ing
7
- from adam.utils_k8s.cassandra_nodes import CassandraNodes
8
- from adam.utils_k8s.pods import log_prefix
9
7
 
10
8
  class Importer:
11
9
  @abstractmethod
@@ -13,7 +11,7 @@ class Importer:
13
11
  pass
14
12
 
15
13
  @abstractmethod
16
- def import_from_csv(self, state: ReplState, from_session: str, keyspace: str, table: str, target_table: str, columns: str, multi_tables = True, create_db = False):
14
+ def import_from_csv(self, state: ReplState, from_session: str, keyspace: str, table: str, target_table: str, columns: str, multi_tables = True, create_db = False, log_file: str = None):
17
15
  pass
18
16
 
19
17
  @abstractmethod
@@ -27,11 +25,12 @@ class Importer:
27
25
  pod = state.pod
28
26
  namespace = state.namespace
29
27
  to_session = state.export_session
30
- log_file = f'{log_prefix()}-{from_session}_{keyspace}.{target_table}.log.pending_import'
28
+ log_file = f'{export_log_dir()}/{from_session}_{keyspace}.{target_table}.log.pending_import'
31
29
 
32
- to = f'{log_prefix()}-{to_session}_{keyspace}.{target_table}.log.done'
30
+ to = f'{export_log_dir()}/{to_session}_{keyspace}.{target_table}.log.done'
33
31
 
34
- CassandraNodes.exec(pod, namespace, f'mv {log_file} {to}', show_out=Config().is_debug(), shell='bash')
32
+ cmd = f'mv {log_file} {to}'
33
+ os_system_exec(cmd, show_out=Config().is_debug())
35
34
 
36
35
  return to, to_session
37
36
 
@@ -41,12 +40,13 @@ class Importer:
41
40
 
42
41
  return session
43
42
 
44
- def remove_csv(self, state: ReplState, from_session: str, table: str, target_table: str, multi_tables = True):
43
+ def remove_csv(self, state: ReplState, from_session: str, table: str, target_table: str, multi_tables = True, job_log: str = None):
45
44
  pod = state.pod
46
45
  namespace = state.namespace
47
46
 
48
- with ing(f'[{from_session}] Cleaning up temporary files', suppress_log=multi_tables):
49
- CassandraNodes.exec(pod, namespace, f'rm -rf {self.csv_file(from_session, table, target_table)}', show_out=Config().is_debug(), shell='bash')
47
+ with ing(f'[{from_session}] Cleaning up temporary files', suppress_log=multi_tables, job_log=job_log):
48
+ cmd = f'rm -rf {self.csv_file(from_session, table, target_table)}'
49
+ os_system_exec(cmd, show_out=Config().is_debug())
50
50
 
51
51
  def db(self, session: str, keyspace: str):
52
52
  return f'{session}_{keyspace}'
@@ -78,4 +78,4 @@ class Importer:
78
78
  elif session.startswith('e'):
79
79
  importer = 'athena'
80
80
 
81
- return importer
81
+ return importer