kaqing 2.0.200__py3-none-any.whl → 2.0.211__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kaqing might be problematic. Click here for more details.
- adam/batch.py +1 -1
- adam/commands/app/utils_app.py +1 -1
- adam/commands/cql/completions_c.py +1 -1
- adam/commands/cql/utils_cql.py +14 -13
- adam/commands/devices/device.py +1 -1
- adam/commands/download_cassandra_log.py +2 -2
- adam/commands/export/export_databases.py +13 -8
- adam/commands/export/export_sessions.py +12 -11
- adam/commands/export/exporter.py +140 -53
- adam/commands/export/import_session.py +0 -4
- adam/commands/export/importer.py +11 -11
- adam/commands/export/importer_athena.py +15 -6
- adam/commands/export/importer_sqlite.py +19 -8
- adam/commands/export/utils_export.py +37 -15
- adam/commands/postgres/postgres_databases.py +1 -1
- adam/commands/postgres/postgres_ls.py +1 -1
- adam/commands/postgres/utils_postgres.py +2 -1
- adam/commands/show/show_cassandra_status.py +3 -10
- adam/commands/show/show_processes.py +1 -1
- adam/commands/show/show_storage.py +2 -1
- adam/embedded_params.py +1 -1
- adam/repl_commands.py +13 -12
- adam/sso/cred_cache.py +2 -5
- adam/utils.py +122 -71
- adam/utils_k8s/app_clusters.py +10 -3
- adam/utils_k8s/app_pods.py +9 -3
- adam/utils_k8s/cassandra_clusters.py +4 -4
- adam/utils_k8s/cassandra_nodes.py +13 -7
- adam/{pod_exec_result.py → utils_k8s/pod_exec_result.py} +8 -2
- adam/utils_k8s/pods.py +34 -29
- adam/utils_local.py +78 -2
- adam/utils_repl/repl_completer.py +6 -2
- adam/utils_sqlite.py +3 -8
- adam/version.py +1 -1
- {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/METADATA +1 -1
- {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/RECORD +39 -61
- adam/commands/alter_tables.py +0 -66
- adam/commands/cassandra/download_cassandra_log.py +0 -45
- adam/commands/cassandra/nodetool.py +0 -64
- adam/commands/cassandra/nodetool_commands.py +0 -120
- adam/commands/cassandra/restart_cluster.py +0 -47
- adam/commands/cassandra/restart_node.py +0 -51
- adam/commands/cassandra/restart_nodes.py +0 -47
- adam/commands/cassandra/rollout.py +0 -88
- adam/commands/cat.py +0 -36
- adam/commands/cd.py +0 -41
- adam/commands/download_file.py +0 -47
- adam/commands/find_files.py +0 -51
- adam/commands/find_processes.py +0 -76
- adam/commands/head.py +0 -36
- adam/commands/ls.py +0 -41
- adam/commands/os/cat.py +0 -36
- adam/commands/os/download_file.py +0 -47
- adam/commands/os/find_files.py +0 -51
- adam/commands/os/find_processes.py +0 -76
- adam/commands/os/head.py +0 -36
- adam/commands/os/shell.py +0 -41
- adam/commands/shell.py +0 -41
- {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/WHEEL +0 -0
- {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/entry_points.txt +0 -0
- {kaqing-2.0.200.dist-info → kaqing-2.0.211.dist-info}/top_level.txt +0 -0
|
@@ -19,9 +19,16 @@ class AthenaImporter(Importer):
|
|
|
19
19
|
def prefix(self):
|
|
20
20
|
return 'e'
|
|
21
21
|
|
|
22
|
-
def import_from_csv(self,
|
|
23
|
-
|
|
24
|
-
|
|
22
|
+
def import_from_csv(self,
|
|
23
|
+
state: ReplState,
|
|
24
|
+
from_session: str,
|
|
25
|
+
keyspace: str,
|
|
26
|
+
table: str,
|
|
27
|
+
target_table: str,
|
|
28
|
+
columns: str,
|
|
29
|
+
multi_tables = True,
|
|
30
|
+
create_db = False,
|
|
31
|
+
job_log: str = None):
|
|
25
32
|
csv_file = self.csv_file(from_session, table, target_table)
|
|
26
33
|
pod = state.pod
|
|
27
34
|
namespace = state.namespace
|
|
@@ -47,12 +54,14 @@ class AthenaImporter(Importer):
|
|
|
47
54
|
return to, to_session
|
|
48
55
|
finally:
|
|
49
56
|
if succeeded:
|
|
50
|
-
self.remove_csv(state, from_session, table, target_table, multi_tables)
|
|
57
|
+
self.remove_csv(state, from_session, table, target_table, multi_tables, job_log=job_log)
|
|
51
58
|
Athena.clear_cache()
|
|
52
59
|
|
|
53
|
-
if
|
|
60
|
+
if multi_tables:
|
|
61
|
+
log2(f'[{to_session}] {keyspace}.{target_table} OK', file=job_log)
|
|
62
|
+
else:
|
|
54
63
|
with export_db(state) as dbs:
|
|
55
|
-
dbs.sql(f'select * from {
|
|
64
|
+
dbs.sql(f'select * from {keyspace}.{target_table} limit 10', backgrounded=True, export_log=job_log)
|
|
56
65
|
|
|
57
66
|
def import_from_local_csv(self, state: ReplState,
|
|
58
67
|
keyspace: str, table: str, csv_file: str, multi_tables = True, create_db = False):
|
|
@@ -1,9 +1,10 @@
|
|
|
1
|
+
from typing import TextIO
|
|
1
2
|
import pandas
|
|
2
3
|
|
|
3
4
|
from adam.commands.export.export_databases import export_db
|
|
4
5
|
from adam.commands.export.importer import Importer
|
|
5
6
|
from adam.repl_state import ReplState
|
|
6
|
-
from adam.utils import GeneratorStream, bytes_generator_from_file, ing
|
|
7
|
+
from adam.utils import GeneratorStream, bytes_generator_from_file, ing, log2
|
|
7
8
|
from adam.utils_k8s.pods import Pods
|
|
8
9
|
from adam.utils_sqlite import SQLite, sqlite
|
|
9
10
|
|
|
@@ -11,9 +12,17 @@ class SqliteImporter(Importer):
|
|
|
11
12
|
def prefix(self):
|
|
12
13
|
return 's'
|
|
13
14
|
|
|
14
|
-
def import_from_csv(self,
|
|
15
|
-
|
|
16
|
-
|
|
15
|
+
def import_from_csv(self,
|
|
16
|
+
state: ReplState,
|
|
17
|
+
from_session: str,
|
|
18
|
+
keyspace: str,
|
|
19
|
+
table: str,
|
|
20
|
+
target_table: str,
|
|
21
|
+
columns: str,
|
|
22
|
+
multi_tables = True,
|
|
23
|
+
create_db = False,
|
|
24
|
+
job_log: str = None):
|
|
25
|
+
|
|
17
26
|
csv_file = self.csv_file(from_session, table, target_table)
|
|
18
27
|
pod = state.pod
|
|
19
28
|
namespace = state.namespace
|
|
@@ -21,7 +30,7 @@ class SqliteImporter(Importer):
|
|
|
21
30
|
|
|
22
31
|
succeeded = False
|
|
23
32
|
try:
|
|
24
|
-
with ing(f'[{to_session}] Uploading to Sqlite', suppress_log=multi_tables):
|
|
33
|
+
with ing(f'[{to_session}] Uploading to Sqlite', suppress_log=multi_tables, job_log=job_log):
|
|
25
34
|
# create a connection to single keyspace
|
|
26
35
|
with sqlite(to_session, keyspace) as conn:
|
|
27
36
|
bytes = Pods.read_file(pod, 'cassandra', namespace, csv_file)
|
|
@@ -35,12 +44,14 @@ class SqliteImporter(Importer):
|
|
|
35
44
|
return to, to_session
|
|
36
45
|
finally:
|
|
37
46
|
if succeeded:
|
|
38
|
-
self.remove_csv(state, from_session, table, target_table, multi_tables)
|
|
47
|
+
self.remove_csv(state, from_session, table, target_table, multi_tables, job_log=job_log)
|
|
39
48
|
SQLite.clear_cache()
|
|
40
49
|
|
|
41
|
-
if
|
|
50
|
+
if multi_tables:
|
|
51
|
+
log2(f'[{to_session}] {keyspace}.{target_table} OK', file=job_log)
|
|
52
|
+
else:
|
|
42
53
|
with export_db(state) as dbs:
|
|
43
|
-
dbs.sql(f'select * from {keyspace}.{target_table} limit 10')
|
|
54
|
+
dbs.sql(f'select * from {keyspace}.{target_table} limit 10', backgrounded=True, export_log=job_log)
|
|
44
55
|
|
|
45
56
|
def import_from_local_csv(self, state: ReplState,
|
|
46
57
|
keyspace: str, table: str, csv_file: str, multi_tables = True, create_db = False):
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
import io
|
|
2
|
+
import os
|
|
2
3
|
import re
|
|
3
4
|
|
|
4
5
|
from adam.config import Config
|
|
5
|
-
from adam.
|
|
6
|
+
from adam.utils import ExecResult, creating_dir, log2
|
|
6
7
|
from adam.repl_state import ReplState
|
|
7
8
|
from adam.utils_k8s.cassandra_nodes import CassandraNodes
|
|
8
|
-
from adam.utils_k8s.pods import log_prefix
|
|
9
9
|
from adam.utils_k8s.statefulsets import StatefulSets
|
|
10
|
+
from adam.utils_local import local_exec
|
|
10
11
|
|
|
11
12
|
class ImportSpec:
|
|
12
13
|
def __init__(self, table_name: str, session: str = None, files: list[str] = None, importer: str = None):
|
|
@@ -196,7 +197,7 @@ class ExportTableStatus:
|
|
|
196
197
|
statuses: list[ExportTableStatus] = []
|
|
197
198
|
|
|
198
199
|
status_in_whole = 'done'
|
|
199
|
-
log_files: list[str] = find_files(pod, namespace, f'{
|
|
200
|
+
log_files: list[str] = find_files(pod, namespace, f'{export_log_dir()}/{export_session}_*.log*')
|
|
200
201
|
|
|
201
202
|
for log_file in log_files:
|
|
202
203
|
status: ExportTableStatus = ExportTableStatus.from_log_file(pod, namespace, export_session, log_file)
|
|
@@ -211,7 +212,7 @@ class ExportTableStatus:
|
|
|
211
212
|
def get_csv_files_n_table(target_table: str):
|
|
212
213
|
db = f'{copy_session}_{target_table}'
|
|
213
214
|
csv_file = f'{csv_dir()}/{db}/*.csv'
|
|
214
|
-
csv_files: list[str] = find_files(pod, namespace, csv_file)
|
|
215
|
+
csv_files: list[str] = find_files(pod, namespace, csv_file, remote=True)
|
|
215
216
|
if csv_files:
|
|
216
217
|
table = target_table
|
|
217
218
|
m = re.match(f'{csv_dir()}/{db}/(.*).csv', csv_files[0])
|
|
@@ -221,7 +222,7 @@ class ExportTableStatus:
|
|
|
221
222
|
|
|
222
223
|
return csv_files, target_table
|
|
223
224
|
|
|
224
|
-
m = re.match(f'{
|
|
225
|
+
m = re.match(f'{export_log_dir()}/{copy_session}_(.*?)\.(.*?)\.log(.*)', log_file)
|
|
225
226
|
if m:
|
|
226
227
|
keyspace = m.group(1)
|
|
227
228
|
target_table = m.group(2)
|
|
@@ -234,7 +235,8 @@ class ExportTableStatus:
|
|
|
234
235
|
|
|
235
236
|
# 4 rows exported to 1 files in 0 day, 0 hour, 0 minute, and 1.335 seconds.
|
|
236
237
|
pattern = 'rows exported to'
|
|
237
|
-
r:
|
|
238
|
+
r: ExecResult = local_exec(['grep', pattern, log_file], show_out=Config().is_debug())
|
|
239
|
+
|
|
238
240
|
if r.exit_code() == 0:
|
|
239
241
|
csv_files, table = get_csv_files_n_table(target_table)
|
|
240
242
|
if csv_files:
|
|
@@ -246,17 +248,26 @@ class ExportTableStatus:
|
|
|
246
248
|
|
|
247
249
|
return ExportTableStatus(None, None, 'unknown')
|
|
248
250
|
|
|
249
|
-
def
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
251
|
+
def find_files(pod: str, namespace: str, pattern: str, mmin: int = 0, remote = False):
|
|
252
|
+
stdout = ''
|
|
253
|
+
if not remote:
|
|
254
|
+
# find . -maxdepth 1 -type f -name '*'
|
|
255
|
+
dir = os.path.dirname(pattern)
|
|
256
|
+
base = os.path.basename(pattern)
|
|
257
|
+
cmd = ['find', dir, '-name', base]
|
|
258
|
+
if mmin:
|
|
259
|
+
cmd += ['-mmin', f'-{mmin}']
|
|
260
|
+
|
|
261
|
+
stdout = local_exec(cmd, show_out=Config().is_debug()).stdout
|
|
255
262
|
else:
|
|
256
|
-
|
|
263
|
+
cmd = f'find {pattern}'
|
|
264
|
+
if mmin:
|
|
265
|
+
cmd = f'{cmd} -mmin -{mmin}'
|
|
266
|
+
|
|
267
|
+
stdout = CassandraNodes.exec(pod, namespace, cmd, show_out=Config().is_debug(), shell='bash').stdout
|
|
257
268
|
|
|
258
269
|
log_files = []
|
|
259
|
-
for line in
|
|
270
|
+
for line in stdout.split('\n'):
|
|
260
271
|
line = line.strip(' \r')
|
|
261
272
|
if line:
|
|
262
273
|
log_files.append(line)
|
|
@@ -341,4 +352,15 @@ class PodPushHandler:
|
|
|
341
352
|
return False
|
|
342
353
|
|
|
343
354
|
def state_with_pod(state: ReplState, pod: str = None):
|
|
344
|
-
return PodPushHandler(state, pod=pod)
|
|
355
|
+
return PodPushHandler(state, pod=pod)
|
|
356
|
+
|
|
357
|
+
def os_system_exec(cmd: str, show_out = False):
|
|
358
|
+
if show_out: log2(cmd)
|
|
359
|
+
|
|
360
|
+
os.system(cmd)
|
|
361
|
+
|
|
362
|
+
def csv_dir():
|
|
363
|
+
return Config().get('export.csv_dir', '/c3/cassandra/tmp')
|
|
364
|
+
|
|
365
|
+
def export_log_dir():
|
|
366
|
+
return creating_dir(Config().get('export.log-dir', '/tmp/qing-db/q/export/logs'))
|
|
@@ -145,7 +145,7 @@ class PostgresDatabases:
|
|
|
145
145
|
|
|
146
146
|
r = Pods.exec(pod_name, container_name, state.namespace, cmd, show_out=show_out, backgrounded=backgrounded, env_prefix=env_prefix)
|
|
147
147
|
if r and r.log_file:
|
|
148
|
-
ReplSession().append_history(f'
|
|
148
|
+
ReplSession().append_history(f':cat {r.log_file}')
|
|
149
149
|
|
|
150
150
|
return r
|
|
151
151
|
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import functools
|
|
2
2
|
|
|
3
3
|
from adam.commands.postgres.postgres_databases import PostgresDatabases, pg_path
|
|
4
|
+
from adam.config import Config
|
|
4
5
|
from adam.repl_state import ReplState
|
|
5
6
|
from adam.utils import log2, wait_log
|
|
6
7
|
from adam.utils_k8s.pods import Pods
|
|
@@ -62,7 +63,7 @@ class PostgresPodService:
|
|
|
62
63
|
if isinstance(args, list):
|
|
63
64
|
query = ' '.join(args)
|
|
64
65
|
|
|
65
|
-
PostgresDatabases.run_sql(state, query, backgrounded=backgrounded)
|
|
66
|
+
PostgresDatabases.run_sql(state, query, show_out=Config().is_debug(), backgrounded=backgrounded)
|
|
66
67
|
|
|
67
68
|
class PostgresExecHandler:
|
|
68
69
|
def __init__(self, state: ReplState, backgrounded=False):
|
|
@@ -8,13 +8,14 @@ from adam.checks.gossip import Gossip
|
|
|
8
8
|
from adam.columns.columns import Columns
|
|
9
9
|
from adam.commands import extract_options, extract_trailing_options
|
|
10
10
|
from adam.commands.command import Command
|
|
11
|
+
from adam.commands.commands_utils import write_to_kaqing_log_file
|
|
11
12
|
from adam.commands.cql.utils_cql import cassandra
|
|
12
13
|
from adam.config import Config
|
|
13
14
|
from adam.repl_session import ReplSession
|
|
14
15
|
from adam.utils_issues import IssuesUtils
|
|
15
16
|
from adam.utils_k8s.statefulsets import StatefulSets
|
|
16
17
|
from adam.repl_state import ReplState, RequiredState
|
|
17
|
-
from adam.utils import SORT, tabulize, log2, log_exc
|
|
18
|
+
from adam.utils import SORT, log_dir, tabulize, log2, log_exc
|
|
18
19
|
from adam.checks.status import parse_nodetool_status
|
|
19
20
|
|
|
20
21
|
class ShowCassandraStatus(Command):
|
|
@@ -105,15 +106,7 @@ class ShowCassandraStatus(Command):
|
|
|
105
106
|
r = tabulize(status, lambda s: ','.join([c.host_value(check_results, s) for c in columns]), header=header, separator=',', sorted=SORT, to = 0 if backgrounded else 1)
|
|
106
107
|
|
|
107
108
|
if backgrounded:
|
|
108
|
-
|
|
109
|
-
log_file = f'{log_prefix}-{datetime.now().strftime("%d%H%M%S")}.log'
|
|
110
|
-
|
|
111
|
-
with open(log_file, 'w') as f:
|
|
112
|
-
f.write(r)
|
|
113
|
-
|
|
114
|
-
ReplSession().append_history(f':sh cat {log_file}')
|
|
115
|
-
|
|
116
|
-
r = log_file
|
|
109
|
+
r = write_to_kaqing_log_file(r)
|
|
117
110
|
|
|
118
111
|
IssuesUtils.show(check_results)
|
|
119
112
|
|
|
@@ -37,7 +37,7 @@ class ShowProcesses(Command):
|
|
|
37
37
|
header = Config().get('processes-qing.header', 'POD_NAME,Q_CPU/TOTAL,MEM/LIMIT')
|
|
38
38
|
|
|
39
39
|
with cassandra(state) as pods:
|
|
40
|
-
pods.display_table(cols, header, show_out=show_out, backgrounded=backgrounded)
|
|
40
|
+
pods.display_table(cols, header, show_out=show_out, backgrounded=backgrounded, msg='Checking processes')
|
|
41
41
|
|
|
42
42
|
return state
|
|
43
43
|
|
|
@@ -3,6 +3,7 @@ from adam.commands.command import Command
|
|
|
3
3
|
from adam.commands.cql.utils_cql import cassandra
|
|
4
4
|
from adam.config import Config
|
|
5
5
|
from adam.repl_state import ReplState, RequiredState
|
|
6
|
+
from adam.utils import ing
|
|
6
7
|
|
|
7
8
|
class ShowStorage(Command):
|
|
8
9
|
COMMAND = 'show storage'
|
|
@@ -32,7 +33,7 @@ class ShowStorage(Command):
|
|
|
32
33
|
cols = Config().get('storage.columns', 'pod,volume_root,volume_cassandra,snapshots,data,compactions')
|
|
33
34
|
header = Config().get('storage.header', 'POD_NAME,VOLUME /,VOLUME CASS,SNAPSHOTS,DATA,COMPACTIONS')
|
|
34
35
|
with cassandra(state) as pods:
|
|
35
|
-
pods.display_table(cols, header, show_out=show_out, backgrounded=backgrounded)
|
|
36
|
+
pods.display_table(cols, header, show_out=show_out, backgrounded=backgrounded, msg='Checking storage')
|
|
36
37
|
|
|
37
38
|
return state
|
|
38
39
|
|
adam/embedded_params.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
def config():
|
|
2
|
-
return {'app': {'console-endpoint': 'https://{host}/{env}/{app}/static/console/index.html', 'container-name': 'c3-server', 'cr': {'cluster-regex': '(.*?-.*?)-.*', 'group': 'ops.c3.ai', 'v': 'v2', 'plural': 'c3cassandras'}, 'label': 'c3__app_id-0', 'login': {'admin-group': '{host}/C3.ClusterAdmin', 'ingress': '{app_id}-k8singr-appleader-001', 'timeout': 5, 'session-check-url': 'https://{host}/{env}/{app}/api/8/C3/userSessionToken', 'cache-creds': True, 'cache-username': True, 'url': 'https://{host}/{env}/{app}', 'another': "You're logged in to {has}. However, for this app, you need to log in to {need}.", 'token-server-url': 'http://localhost:{port}', 'password-max-length': 128}, 'strip': '0'}, 'audit': {'endpoint': 'https://4psvtaxlcb.execute-api.us-west-2.amazonaws.com/prod/', 'workers': 3, 'timeout': 10, 'log-audit-queries': False, 'athena': {'auto-repair': {'elapsed_hours': 12}, 'region': 'us-west-2', 'catalog': 'AwsDataCatalog', 'database': 'audit', 'repair-partition-tables': 'audit', 'output': 's3://s3.ops--audit/ddl/results', 'repair-cluster-tables': 'cluster'}, 'queries': {'last10': "SELECT * FROM audit\nWHERE drive <> 'z' and ({date_condition})\nORDER BY ts DESC LIMIT {limit}", 'slow10': "SELECT * FROM audit\nWHERE drive <> 'z' and ({date_condition})\nORDER BY CAST(duration AS REAL) DESC LIMIT {limit}", 'top10': "SELECT min(c) AS cluster, line, COUNT(*) AS cnt, avg(CAST(duration AS REAL)) AS duration\nFROM audit WHERE drive <> 'z' and ({date_condition})\nGROUP BY line ORDER BY cnt DESC LIMIT {limit}"}}, 'bash': {'workers': 32}, 'cassandra': {'service-name': 'all-pods-service'}, 'cql': {'workers': 32, 'samples': 3, 'secret': {'cluster-regex': '(.*?-.*?)-.*', 'name': '{cluster}-superuser', 'password-item': 'password'}, 'alter-tables': {'excludes': 'system_auth,system_traces,reaper_db,system_distributed,system_views,system,system_schema,system_virtual_schema', 'gc-grace-periods': '3600,86400,864000,7776000', 'batching': True}}, 'checks': {'compactions-threshold': 250, 'cpu-busy-threshold': 98.0, 'cpu-threshold': 0.0, 'cassandra-data-path': '/c3/cassandra', 'root-disk-threshold': 50, 'cassandra-disk-threshold': 50, 'snapshot-size-cmd': "ls /c3/cassandra/data/data/*/*/snapshots | grep snapshots | sed 's/:$//g' | xargs -I {} du -sk {} | awk '{print $1}' | awk '{s+=$1} END {print s}'", 'snapshot-size-threshold': '40G', 'table-sizes-cmd': "ls -Al /c3/cassandra/data/data/ | awk '{print $9}' | sed 's/\\^r//g' | xargs -I {} du -sk /c3/cassandra/data/data/{}"}, 'download': {'workers': 8}, 'export': {'workers': 8, 'csv_dir': '/c3/cassandra/tmp', 'column_counts_query': 'select id, count(id) as columns from {table} group by id order by columns desc limit 10', 'default-importer': 'sqlite', 'sqlite': {'workers': 8, 'columns': '<row-key>', 'local-db-dir': '/tmp/qing-db'}, 'athena': {'workers': 8, 'columns': '<keys>', 'bucket': 'c3.ops--qing'}, 'csv': {'workers': 8, 'columns': '<row-key>'}, 'log-
|
|
2
|
+
return {'app': {'console-endpoint': 'https://{host}/{env}/{app}/static/console/index.html', 'container-name': 'c3-server', 'cr': {'cluster-regex': '(.*?-.*?)-.*', 'group': 'ops.c3.ai', 'v': 'v2', 'plural': 'c3cassandras'}, 'label': 'c3__app_id-0', 'login': {'admin-group': '{host}/C3.ClusterAdmin', 'ingress': '{app_id}-k8singr-appleader-001', 'timeout': 5, 'session-check-url': 'https://{host}/{env}/{app}/api/8/C3/userSessionToken', 'cache-creds': True, 'cache-username': True, 'url': 'https://{host}/{env}/{app}', 'another': "You're logged in to {has}. However, for this app, you need to log in to {need}.", 'token-server-url': 'http://localhost:{port}', 'password-max-length': 128}, 'strip': '0'}, 'audit': {'endpoint': 'https://4psvtaxlcb.execute-api.us-west-2.amazonaws.com/prod/', 'workers': 3, 'timeout': 10, 'log-audit-queries': False, 'athena': {'auto-repair': {'elapsed_hours': 12}, 'region': 'us-west-2', 'catalog': 'AwsDataCatalog', 'database': 'audit', 'repair-partition-tables': 'audit', 'output': 's3://s3.ops--audit/ddl/results', 'repair-cluster-tables': 'cluster'}, 'queries': {'last10': "SELECT * FROM audit\nWHERE drive <> 'z' and ({date_condition})\nORDER BY ts DESC LIMIT {limit}", 'slow10': "SELECT * FROM audit\nWHERE drive <> 'z' and ({date_condition})\nORDER BY CAST(duration AS REAL) DESC LIMIT {limit}", 'top10': "SELECT min(c) AS cluster, line, COUNT(*) AS cnt, avg(CAST(duration AS REAL)) AS duration\nFROM audit WHERE drive <> 'z' and ({date_condition})\nGROUP BY line ORDER BY cnt DESC LIMIT {limit}"}}, 'bash': {'workers': 32}, 'cassandra': {'service-name': 'all-pods-service'}, 'cql': {'workers': 32, 'samples': 3, 'secret': {'cluster-regex': '(.*?-.*?)-.*', 'name': '{cluster}-superuser', 'password-item': 'password'}, 'alter-tables': {'excludes': 'system_auth,system_traces,reaper_db,system_distributed,system_views,system,system_schema,system_virtual_schema', 'gc-grace-periods': '3600,86400,864000,7776000', 'batching': True}}, 'checks': {'compactions-threshold': 250, 'cpu-busy-threshold': 98.0, 'cpu-threshold': 0.0, 'cassandra-data-path': '/c3/cassandra', 'root-disk-threshold': 50, 'cassandra-disk-threshold': 50, 'snapshot-size-cmd': "ls /c3/cassandra/data/data/*/*/snapshots | grep snapshots | sed 's/:$//g' | xargs -I {} du -sk {} | awk '{print $1}' | awk '{s+=$1} END {print s}'", 'snapshot-size-threshold': '40G', 'table-sizes-cmd': "ls -Al /c3/cassandra/data/data/ | awk '{print $9}' | sed 's/\\^r//g' | xargs -I {} du -sk /c3/cassandra/data/data/{}"}, 'download': {'workers': 8}, 'export': {'workers': 8, 'csv_dir': '/c3/cassandra/tmp', 'column_counts_query': 'select id, count(id) as columns from {table} group by id order by columns desc limit 10', 'default-importer': 'sqlite', 'sqlite': {'workers': 8, 'columns': '<row-key>', 'local-db-dir': '/tmp/qing-db/q/export/db'}, 'athena': {'workers': 8, 'columns': '<keys>', 'bucket': 'c3.ops--qing'}, 'csv': {'workers': 8, 'columns': '<row-key>'}, 'log-dir': '/tmp/qing-db/q/export/logs'}, 'get-host-id': {'workers': 32}, 'idps': {'ad': {'email-pattern': '.*@c3.ai', 'uri': 'https://login.microsoftonline.com/53ad779a-93e7-485c-ba20-ac8290d7252b/oauth2/v2.0/authorize?response_type=id_token&response_mode=form_post&client_id=00ff94a8-6b0a-4715-98e0-95490012d818&scope=openid+email+profile&redirect_uri=https%3A%2F%2Fplat.c3ci.cloud%2Fc3%2Fc3%2Foidc%2Flogin&nonce={nonce}&state=EMPTY', 'jwks-uri': 'https://login.microsoftonline.com/common/discovery/keys', 'contact': 'Please contact ted.tran@c3.ai.', 'whitelist-file': '/kaqing/members'}, 'okta': {'default': True, 'email-pattern': '.*@c3iot.com', 'uri': 'https://c3energy.okta.com/oauth2/v1/authorize?response_type=id_token&response_mode=form_post&client_id={client_id}&scope=openid+email+profile+groups&redirect_uri=https%3A%2F%2F{host}%2Fc3%2Fc3%2Foidc%2Flogin&nonce={nonce}&state=EMPTY', 'jwks-uri': 'https://c3energy.okta.com/oauth2/v1/keys'}}, 'issues': {'workers': 32}, 'local-qing-dir': '/tmp/qing-db/q', 'local-downloads-dir': '/tmp/qing-db/q/downloads', 'logs': {'path': '/c3/cassandra/logs/system.log'}, 'log-dir': '/tmp/qing-db/q/logs', 'nodetool': {'workers': 96, 'commands_in_line': 40, 'status': {'workers': 32, 'samples': 3, 'commands_in_line': 40}}, 'pg': {'name-pattern': '^{namespace}.*-k8spg-.*', 'excludes': '.helm., -admin-secret', 'agent': {'name': 'ops-pg-agent', 'just-in-time': False, 'timeout': 86400, 'image': 'seanahnsf/kaqing'}, 'default-db': 'postgres', 'default-schema': 'postgres', 'secret': {'endpoint-key': 'postgres-db-endpoint', 'port-key': 'postgres-db-port', 'username-key': 'postgres-admin-username', 'password-key': 'postgres-admin-password'}}, 'pod': {'name': 'ops', 'image': 'seanahnsf/kaqing-cloud', 'sa': {'name': 'ops', 'proto': 'c3', 'additional-cluster-roles': 'c3aiops-k8ssandra-operator'}, 'label-selector': 'run=ops'}, 'preview': {'rows': 10}, 'processes': {'columns': 'pod,cpu-metrics,mem', 'header': 'POD_NAME,M_CPU(USAGE/LIMIT),MEM/LIMIT'}, 'processes-qing': {'columns': 'pod,cpu,mem', 'header': 'POD_NAME,Q_CPU/TOTAL,MEM/LIMIT'}, 'reaper': {'service-name': 'reaper-service', 'port-forward': {'timeout': 86400, 'local-port': 9001}, 'abort-runs-batch': 10, 'show-runs-batch': 100, 'pod': {'cluster-regex': '(.*?-.*?-.*?-.*?)-.*', 'label-selector': 'k8ssandra.io/reaper={cluster}-reaper'}, 'secret': {'cluster-regex': '(.*?-.*?)-.*', 'name': '{cluster}-reaper-ui', 'password-item': 'password'}}, 'repair': {'log-path': '/home/cassrepair/logs/', 'image': 'ci-registry.c3iot.io/cloudops/cassrepair:2.0.14', 'secret': 'ciregistryc3iotio', 'env': {'interval': 24, 'timeout': 60, 'pr': False, 'runs': 1}}, 'repl': {'start-drive': 'c', 'a': {'auto-enter': 'c3/c3'}, 'c': {'auto-enter': 'cluster'}, 'x': {'auto-enter': 'latest'}, 'history': {'push-cat-log-file': True, 'push-cat-remote-log-file': True}, 'background-process': {'auto-nohup': True}}, 'status': {'columns': 'status,address,load,tokens,owns,host_id,gossip,compactions', 'header': '--,Address,Load,Tokens,Owns,Host ID,GOSSIP,COMPACTIONS'}, 'storage': {'columns': 'pod,volume_root,volume_cassandra,snapshots,data,compactions', 'header': 'POD_NAME,VOLUME /,VOLUME CASS,SNAPSHOTS,DATA,COMPACTIONS'}, 'watch': {'auto': 'rollout', 'timeout': 3600, 'interval': 10}, 'auto-complete': {'c': {'tables': 'lazy'}, 'x': {'tables': 'lazy'}, 'cli': {'cp': 'jit'}, 'export': {'databases': 'jit'}, 'medusa': {'backups': 'jit'}, 'reaper': {'schedules': 'jit'}}, 'debug': False, 'debugs': {'complete': False, 'timings': False, 'exit-on-error': False}}
|
adam/repl_commands.py
CHANGED
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
from adam.commands.alter_tables import AlterTables
|
|
2
1
|
from adam.commands.app.app import App
|
|
3
2
|
from adam.commands.app.app_ping import AppPing
|
|
4
3
|
from adam.commands.app.show_app_actions import ShowAppActions
|
|
5
4
|
from adam.commands.app.show_app_id import ShowAppId
|
|
6
5
|
from adam.commands.app.show_app_queues import ShowAppQueues
|
|
7
6
|
from adam.commands.audit.audit import Audit
|
|
8
|
-
from adam.commands.cat import Cat
|
|
7
|
+
from adam.commands.fs.cat import Cat
|
|
9
8
|
from adam.commands.code import Code
|
|
9
|
+
from adam.commands.cql.alter_tables import AlterTables
|
|
10
10
|
from adam.commands.debug.debug import Debug
|
|
11
|
-
from adam.commands.debug.debug_timings import DebugTimings
|
|
12
11
|
from adam.commands.download_cassandra_log import DownloadCassandraLog
|
|
13
|
-
from adam.commands.
|
|
12
|
+
from adam.commands.fs.cat_local import CatLocal
|
|
13
|
+
from adam.commands.fs.download_file import DownloadFile
|
|
14
14
|
from adam.commands.deploy.code_start import CodeStart
|
|
15
15
|
from adam.commands.deploy.code_stop import CodeStop
|
|
16
16
|
from adam.commands.deploy.deploy import Deploy
|
|
@@ -41,16 +41,17 @@ from adam.commands.export.show_column_counts import ShowColumnCounts
|
|
|
41
41
|
from adam.commands.export.show_export_databases import ShowExportDatabases
|
|
42
42
|
from adam.commands.export.show_export_session import ShowExportSession
|
|
43
43
|
from adam.commands.export.show_export_sessions import ShowExportSessions
|
|
44
|
-
from adam.commands.find_files import FindLocalFiles
|
|
45
|
-
from adam.commands.find_processes import FindProcesses
|
|
46
|
-
from adam.commands.head import Head
|
|
44
|
+
from adam.commands.fs.find_files import FindLocalFiles
|
|
45
|
+
from adam.commands.fs.find_processes import FindProcesses
|
|
46
|
+
from adam.commands.fs.head import Head
|
|
47
|
+
from adam.commands.fs.ls_local import LsLocal
|
|
47
48
|
from adam.commands.kubectl import Kubectl
|
|
48
49
|
from adam.commands.restart_cluster import RestartCluster
|
|
49
50
|
from adam.commands.restart_node import RestartNode
|
|
50
|
-
from adam.commands.shell import Shell
|
|
51
|
+
from adam.commands.fs.shell import Shell
|
|
51
52
|
from adam.commands.clipboard_copy import ClipboardCopy
|
|
52
53
|
from adam.commands.bash.bash import Bash
|
|
53
|
-
from adam.commands.cd import Cd
|
|
54
|
+
from adam.commands.fs.cd import Cd
|
|
54
55
|
from adam.commands.check import Check
|
|
55
56
|
from adam.commands.command import Command
|
|
56
57
|
from adam.commands.cql.cqlsh import Cqlsh
|
|
@@ -58,7 +59,7 @@ from adam.commands.exit import Exit
|
|
|
58
59
|
from adam.commands.medusa.medusa import Medusa
|
|
59
60
|
from adam.commands.param_get import GetParam
|
|
60
61
|
from adam.commands.issues import Issues
|
|
61
|
-
from adam.commands.ls import Ls
|
|
62
|
+
from adam.commands.fs.ls import Ls
|
|
62
63
|
from adam.commands.nodetool import NodeTool
|
|
63
64
|
from adam.commands.postgres.postgres import Postgres, PostgresPg
|
|
64
65
|
from adam.commands.preview_table import PreviewTable
|
|
@@ -104,8 +105,8 @@ class ReplCommands:
|
|
|
104
105
|
return deduped
|
|
105
106
|
|
|
106
107
|
def navigation() -> list[Command]:
|
|
107
|
-
return [Ls(), PreviewTable(), DeviceApp(), DevicePostgres(), DeviceCass(), DeviceAuditLog(), DeviceExport(),
|
|
108
|
-
Cd(), Cat(), Head(), DownloadFile(), FindLocalFiles(), FindProcesses(), Pwd(), ClipboardCopy(),
|
|
108
|
+
return [Ls(), LsLocal(), PreviewTable(), DeviceApp(), DevicePostgres(), DeviceCass(), DeviceAuditLog(), DeviceExport(),
|
|
109
|
+
Cd(), Cat(), CatLocal(), Head(), DownloadFile(), FindLocalFiles(), FindProcesses(), Pwd(), ClipboardCopy(),
|
|
109
110
|
GetParam(), SetParam(), ShowParams(), ShowKubectlCommands(), ShowLogin(), ShowAdam(), ShowHost()]
|
|
110
111
|
|
|
111
112
|
def cassandra_ops() -> list[Command]:
|
adam/sso/cred_cache.py
CHANGED
|
@@ -2,8 +2,7 @@ import os
|
|
|
2
2
|
from pathlib import Path
|
|
3
3
|
from dotenv import load_dotenv
|
|
4
4
|
|
|
5
|
-
from adam.
|
|
6
|
-
from adam.utils import debug, log_exc
|
|
5
|
+
from adam.utils import creating_dir, debug, log_exc
|
|
7
6
|
from adam.utils_k8s.kube_context import KubeContext
|
|
8
7
|
|
|
9
8
|
class CredCache:
|
|
@@ -15,7 +14,7 @@ class CredCache:
|
|
|
15
14
|
|
|
16
15
|
def __init__(self):
|
|
17
16
|
if not hasattr(self, 'env_f'):
|
|
18
|
-
self.dir = f'{Path.home()}/.kaqing'
|
|
17
|
+
self.dir = creating_dir(f'{Path.home()}/.kaqing')
|
|
19
18
|
self.env_f = f'{self.dir}/.credentials'
|
|
20
19
|
# immutable - cannot reload with different file content
|
|
21
20
|
load_dotenv(dotenv_path=self.env_f)
|
|
@@ -44,8 +43,6 @@ class CredCache:
|
|
|
44
43
|
updated.append(f'IDP_PASSWORD={password}')
|
|
45
44
|
|
|
46
45
|
if updated:
|
|
47
|
-
if not os.path.exists(self.env_f):
|
|
48
|
-
os.makedirs(self.dir, exist_ok=True)
|
|
49
46
|
with open(self.env_f, 'w') as file:
|
|
50
47
|
file.write('\n'.join(updated))
|
|
51
48
|
|