kaqing 1.98.87__py3-none-any.whl → 1.98.89__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaqing might be problematic. Click here for more details.

adam/batch.py CHANGED
@@ -6,8 +6,8 @@ from adam.commands.cp import ClipboardCopy, CopyCommandHelper
6
6
  from adam.commands.command import Command
7
7
  from adam.commands.command_helpers import ClusterCommandHelper, ClusterOrPodCommandHelper, PodCommandHelper
8
8
  from adam.commands.cqlsh import CqlCommandHelper, Cqlsh
9
- from adam.commands.frontend.setup import Setup, SetupCommandHelper
10
- from adam.commands.frontend.teardown import TearDown, TearDownCommandHelper
9
+ from adam.commands.deploy.deploy import Deploy, DeployCommandHelper
10
+ from adam.commands.deploy.undeploy import Undeploy, UndeployCommandHelper
11
11
  from adam.commands.issues import Issues
12
12
  from adam.commands.login import Login
13
13
  from adam.commands.logs import Logs
@@ -77,6 +77,16 @@ def cql(kubeconfig: str, config: str, param: list[str], cluster: str, namespace:
77
77
  run_command(Cqlsh(), kubeconfig, config, param, cluster, namespace, pod, extra_args)
78
78
 
79
79
 
80
+ @cli.command(context_settings=dict(ignore_unknown_options=True, allow_extra_args=True), cls=DeployCommandHelper, help='Setup.')
81
+ @click.option('--kubeconfig', '-k', required=False, metavar='path', help='path to kubeconfig file')
82
+ @click.option('--config', default='params.yaml', metavar='path', help='path to kaqing parameters file')
83
+ @click.option('--param', '-v', multiple=True, metavar='<key>=<value>', help='parameter override')
84
+ @click.option('--namespace', '-n', required=False, metavar='namespace', help='Kubernetes namespace')
85
+ @click.argument('extra_args', nargs=-1, metavar='<pod>', type=click.UNPROCESSED)
86
+ def deploy(kubeconfig: str, config: str, param: list[str], namespace: str, extra_args):
87
+ run_command(Deploy(), kubeconfig, config, param, None, namespace, None, extra_args)
88
+
89
+
80
90
  @cli.command(context_settings=dict(ignore_unknown_options=True, allow_extra_args=True), cls=ClusterOrPodCommandHelper, help="Print Qing's issues.")
81
91
  @click.option('--kubeconfig', '-k', required=False, metavar='path', help='path to kubeconfig file')
82
92
  @click.option('--config', default='params.yaml', metavar='path', help='path to kaqing parameters file')
@@ -229,16 +239,6 @@ def rollout(kubeconfig: str, config: str, param: list[str], cluster: str, namesp
229
239
  run_command(RollOut(), kubeconfig, config, param, cluster, namespace, None, extra_args)
230
240
 
231
241
 
232
- @cli.command(context_settings=dict(ignore_unknown_options=True, allow_extra_args=True), cls=SetupCommandHelper, help='Setup.')
233
- @click.option('--kubeconfig', '-k', required=False, metavar='path', help='path to kubeconfig file')
234
- @click.option('--config', default='params.yaml', metavar='path', help='path to kaqing parameters file')
235
- @click.option('--param', '-v', multiple=True, metavar='<key>=<value>', help='parameter override')
236
- @click.option('--namespace', '-n', required=False, metavar='namespace', help='Kubernetes namespace')
237
- @click.argument('extra_args', nargs=-1, metavar='<pod>', type=click.UNPROCESSED)
238
- def setup(kubeconfig: str, config: str, param: list[str], namespace: str, extra_args):
239
- run_command(Setup(), kubeconfig, config, param, None, namespace, None, extra_args)
240
-
241
-
242
242
  @cli.command(context_settings=dict(ignore_unknown_options=True, allow_extra_args=True), cls=ShowCommandHelper, help='Show configuration or kubectl commands.')
243
243
  @click.option('--kubeconfig', '-k', required=False, metavar='path', help='path to kubeconfig file')
244
244
  @click.option('--config', default='params.yaml', metavar='path', help='path to kaqing parameters file')
@@ -251,14 +251,14 @@ def show(kubeconfig: str, config: str, param: list[str], cluster: str, namespace
251
251
  run_command(Show(), kubeconfig, config, param, cluster, namespace, pod, extra_args)
252
252
 
253
253
 
254
- @cli.command(context_settings=dict(ignore_unknown_options=True, allow_extra_args=True), cls=TearDownCommandHelper, help='Teardown.')
254
+ @cli.command(context_settings=dict(ignore_unknown_options=True, allow_extra_args=True), cls=UndeployCommandHelper, help='Undeploy.')
255
255
  @click.option('--kubeconfig', '-k', required=False, metavar='path', help='path to kubeconfig file')
256
256
  @click.option('--config', default='params.yaml', metavar='path', help='path to kaqing parameters file')
257
257
  @click.option('--param', '-v', multiple=True, metavar='<key>=<value>', help='parameter override')
258
258
  @click.option('--namespace', '-n', required=False, metavar='namespace', help='Kubernetes namespace')
259
259
  @click.argument('extra_args', nargs=-1, metavar='<pod>', type=click.UNPROCESSED)
260
- def teardown(kubeconfig: str, config: str, param: list[str], namespace: str, extra_args):
261
- run_command(TearDown(), kubeconfig, config, param, None, namespace, None, extra_args)
260
+ def undeploy(kubeconfig: str, config: str, param: list[str], namespace: str, extra_args):
261
+ run_command(Undeploy(), kubeconfig, config, param, None, namespace, None, extra_args)
262
262
 
263
263
 
264
264
  @cli.command(context_settings=dict(ignore_unknown_options=True, allow_extra_args=True), cls=PodCommandHelper, help='Get cassandra log.')
@@ -1,5 +1,5 @@
1
1
  from adam.commands.command import Command
2
- from adam.commands.frontend.code_utils import start_user_code, stop_user_codes
2
+ from adam.commands.deploy.code_utils import start_user_code, stop_user_codes
3
3
  from adam.repl_state import ReplState, RequiredState
4
4
  from adam.utils import log2
5
5
 
@@ -1,5 +1,5 @@
1
1
  from adam.commands.command import Command
2
- from adam.commands.frontend.code_utils import stop_user_codes
2
+ from adam.commands.deploy.code_utils import stop_user_codes
3
3
  from adam.repl_state import ReplState, RequiredState
4
4
 
5
5
  class CodeStop(Command):
@@ -2,17 +2,18 @@ import click
2
2
 
3
3
  from adam.commands.command import Command
4
4
  from adam.commands.command_helpers import ClusterCommandHelper
5
- from .setup_frontend import SetupFrontend
5
+ from adam.commands.deploy.deploy_pod import DeployPod
6
+ from .deploy_frontend import DeployFrontend
6
7
  from adam.repl_state import ReplState
7
8
  from adam.utils import lines_to_tabular, log, log2
8
9
 
9
- class Setup(Command):
10
- COMMAND = 'setup'
10
+ class Deploy(Command):
11
+ COMMAND = 'deploy'
11
12
  reaper_login = None
12
13
 
13
14
  # the singleton pattern
14
15
  def __new__(cls, *args, **kwargs):
15
- if not hasattr(cls, 'instance'): cls.instance = super(Setup, cls).__new__(cls)
16
+ if not hasattr(cls, 'instance'): cls.instance = super(Deploy, cls).__new__(cls)
16
17
 
17
18
  return cls.instance
18
19
 
@@ -20,7 +21,7 @@ class Setup(Command):
20
21
  super().__init__(successor)
21
22
 
22
23
  def command(self):
23
- return Setup.COMMAND
24
+ return Deploy.COMMAND
24
25
 
25
26
  def run(self, cmd: str, state: ReplState):
26
27
  if not(args := self.args(cmd)):
@@ -29,18 +30,18 @@ class Setup(Command):
29
30
  state, args = self.apply_state(args, state)
30
31
 
31
32
  if state.in_repl:
32
- log(lines_to_tabular([c.help(ReplState()) for c in Setup.cmd_list()], separator=':'))
33
+ log(lines_to_tabular([c.help(ReplState()) for c in Deploy.cmd_list()], separator=':'))
33
34
 
34
35
  return 'command-missing'
35
36
  else:
36
37
  # head with the Chain of Responsibility pattern
37
- cmds = Command.chain(Setup.cmd_list())
38
+ cmds = Command.chain(Deploy.cmd_list())
38
39
  if not cmds.run(cmd, state):
39
40
  log2('* Command is missing.')
40
41
  Command.display_help()
41
42
 
42
43
  def cmd_list():
43
- return [SetupFrontend()]
44
+ return [DeployFrontend(), DeployPod()]
44
45
 
45
46
  def completion(self, state: ReplState):
46
47
  if state.sts:
@@ -51,10 +52,10 @@ class Setup(Command):
51
52
  def help(self, _: ReplState):
52
53
  return None
53
54
 
54
- class SetupCommandHelper(click.Command):
55
+ class DeployCommandHelper(click.Command):
55
56
  def get_help(self, ctx: click.Context):
56
57
  log(super().get_help(ctx))
57
58
  log()
58
59
  log('Sub-Commands:')
59
60
 
60
- log(lines_to_tabular([c.help(ReplState()).replace(f'{Setup.COMMAND} ', ' ', 1) for c in Setup.cmd_list()], separator=':'))
61
+ log(lines_to_tabular([c.help(ReplState()).replace(f'{Deploy.COMMAND} ', ' ', 1) for c in Deploy.cmd_list()], separator=':'))
@@ -1,16 +1,18 @@
1
1
  from adam.app_session import AppSession
2
2
  from adam.commands.command import Command
3
+ from adam.commands.deploy.deploy_utils import deploy_frontend
4
+ from adam.config import Config
3
5
  from adam.k8s_utils.ingresses import Ingresses
4
6
  from adam.k8s_utils.services import Services
5
7
  from adam.repl_state import ReplState, RequiredState
6
8
  from adam.utils import log2
7
9
 
8
- class SetupFrontend(Command):
9
- COMMAND = 'setup frontend'
10
+ class DeployFrontend(Command):
11
+ COMMAND = 'deploy frontend'
10
12
 
11
13
  # the singleton pattern
12
14
  def __new__(cls, *args, **kwargs):
13
- if not hasattr(cls, 'instance'): cls.instance = super(SetupFrontend, cls).__new__(cls)
15
+ if not hasattr(cls, 'instance'): cls.instance = super(DeployFrontend, cls).__new__(cls)
14
16
 
15
17
  return cls.instance
16
18
 
@@ -18,7 +20,7 @@ class SetupFrontend(Command):
18
20
  super().__init__(successor)
19
21
 
20
22
  def command(self):
21
- return SetupFrontend.COMMAND
23
+ return DeployFrontend.COMMAND
22
24
 
23
25
  def required(self):
24
26
  return RequiredState.NAMESPACE
@@ -33,19 +35,13 @@ class SetupFrontend(Command):
33
35
 
34
36
  log2('This will support c3/c3 only for demo.')
35
37
 
36
- app_session: AppSession = AppSession.create('c3', 'c3', state.namespace)
38
+ pod_name = Config().get('pod.name', 'ops')
39
+ label_selector = Config().get('pod.label-selector', 'run=ops')
37
40
  try:
38
- name = 'ops'
39
- port = 7678
40
- Services.create_service(name, state.namespace, port, {"run": "ops"})
41
- Ingresses.create_ingress(name, state.namespace, app_session.host, '/c3/c3/ops($|/)', port, annotations={
42
- 'kubernetes.io/ingress.class': 'nginx',
43
- 'nginx.ingress.kubernetes.io/use-regex': 'true',
44
- 'nginx.ingress.kubernetes.io/rewrite-target': '/'
45
- })
41
+ deploy_frontend(pod_name, state.namespace, label_selector)
46
42
  except Exception as e:
47
43
  if e.status == 409:
48
- log2(f"Error: '{name}' already exists in namespace '{state.namespace}'.")
44
+ log2(f"Error: '{pod_name}' already exists in namespace '{state.namespace}'.")
49
45
  else:
50
46
  log2(f"Error creating ingress or service: {e}")
51
47
 
@@ -55,4 +51,4 @@ class SetupFrontend(Command):
55
51
  return super().completion(state)
56
52
 
57
53
  def help(self, _: ReplState):
58
- return f'{SetupFrontend.COMMAND}\t sets up frontend'
54
+ return f'{DeployFrontend.COMMAND}\t deploy frontend'
@@ -0,0 +1,68 @@
1
+ from kubernetes import client
2
+
3
+ from adam.commands.command import Command
4
+ from adam.commands.deploy.deploy_utils import deploy_frontend, gen_labels
5
+ from adam.config import Config
6
+ from adam.k8s_utils.ingresses import Ingresses
7
+ from adam.k8s_utils.kube_context import KubeContext
8
+ from adam.k8s_utils.pods import Pods
9
+ from adam.k8s_utils.service_accounts import ServiceAccounts
10
+ from adam.k8s_utils.services import Services
11
+ from adam.repl_state import ReplState, RequiredState
12
+ from adam.utils import log2
13
+
14
+ class DeployPod(Command):
15
+ COMMAND = 'deploy pod'
16
+
17
+ # the singleton pattern
18
+ def __new__(cls, *args, **kwargs):
19
+ if not hasattr(cls, 'instance'): cls.instance = super(DeployPod, cls).__new__(cls)
20
+
21
+ return cls.instance
22
+
23
+ def __init__(self, successor: Command=None):
24
+ super().__init__(successor)
25
+
26
+ def command(self):
27
+ return DeployPod.COMMAND
28
+
29
+ def required(self):
30
+ return RequiredState.NAMESPACE
31
+
32
+ def run(self, cmd: str, state: ReplState):
33
+ if not(args := self.args(cmd)):
34
+ return super().run(cmd, state)
35
+
36
+ state, args = self.apply_state(args, state)
37
+ if not self.validate_state(state):
38
+ return state
39
+
40
+ if KubeContext.in_cluster():
41
+ log2('This is doable only from outside of the Kubernetes cluster.')
42
+ return state
43
+
44
+ sa_name = Config().get('pod.sa.name', 'ops')
45
+ sa_proto = Config().get('pod.sa.proto', 'c3')
46
+ additional_cluster_roles = Config().get('pod.sa.additional-cluster-roles', 'c3aiops-k8ssandra-operator').split(',')
47
+ label_selector = Config().get('pod.label-selector', 'run=ops')
48
+ labels = gen_labels(label_selector)
49
+ ServiceAccounts.replicate(sa_name, state.namespace, sa_proto, labels=labels, add_cluster_roles=additional_cluster_roles)
50
+
51
+ pod_name = Config().get('pod.name', 'ops')
52
+ image = Config().get('pod.image', 'seanahnsf/kaqing')
53
+ security_context = client.V1SecurityContext(
54
+ capabilities=client.V1Capabilities(
55
+ add=["SYS_PTRACE"]
56
+ )
57
+ )
58
+ Pods.create(state.namespace, pod_name, image, env={'NAMESPACE': state.namespace}, container_security_context=security_context, labels=labels, sa_name=sa_name)
59
+
60
+ deploy_frontend(pod_name, state.namespace, label_selector)
61
+
62
+ return state
63
+
64
+ def completion(self, state: ReplState):
65
+ return super().completion(state)
66
+
67
+ def help(self, _: ReplState):
68
+ return f'{DeployPod.COMMAND}\t deploy pod'
@@ -0,0 +1,26 @@
1
+ from adam.app_session import AppSession
2
+ from adam.k8s_utils.ingresses import Ingresses
3
+ from adam.k8s_utils.services import Services
4
+ from adam.utils import log2
5
+
6
+ def deploy_frontend(name: str, namespace: str, label_selector: str):
7
+ app_session: AppSession = AppSession.create('c3', 'c3', namespace)
8
+ port = 7678
9
+ labels = gen_labels(label_selector)
10
+ Services.create_service(name, namespace, port, labels, labels=labels)
11
+ # Services.create_service(name, namespace, port, {"run": "ops"})
12
+ Ingresses.create_ingress(name, namespace, app_session.host, '/c3/c3/ops($|/)', port, annotations={
13
+ 'kubernetes.io/ingress.class': 'nginx',
14
+ 'nginx.ingress.kubernetes.io/use-regex': 'true',
15
+ 'nginx.ingress.kubernetes.io/rewrite-target': '/'
16
+ }, labels=labels)
17
+
18
+ log2(f'Shortly, ops pod will become available at https://{app_session.host}/c3/c3/ops')
19
+
20
+ def undeploy_frontend(namespace: str, label_selector: str):
21
+ Ingresses.delete_ingresses(namespace, label_selector=label_selector)
22
+ Services.delete_services(namespace, label_selector=label_selector)
23
+
24
+ def gen_labels(label_selector: str):
25
+ kv = label_selector.split('=')
26
+ return {kv[0]: kv[1]}
@@ -2,18 +2,19 @@ import click
2
2
 
3
3
  from adam.commands.command import Command
4
4
  from adam.commands.command_helpers import ClusterCommandHelper
5
- from adam.commands.frontend.teardown_frontend import TearDownFrontend
6
- from .setup_frontend import SetupFrontend
5
+ from adam.commands.deploy.undeploy_frontend import UndeployFrontend
6
+ from adam.commands.deploy.undeploy_pod import UndeployPod
7
+ from .deploy_frontend import DeployFrontend
7
8
  from adam.repl_state import ReplState
8
9
  from adam.utils import lines_to_tabular, log, log2
9
10
 
10
- class TearDown(Command):
11
- COMMAND = 'teardown'
11
+ class Undeploy(Command):
12
+ COMMAND = 'undeploy'
12
13
  reaper_login = None
13
14
 
14
15
  # the singleton pattern
15
16
  def __new__(cls, *args, **kwargs):
16
- if not hasattr(cls, 'instance'): cls.instance = super(TearDown, cls).__new__(cls)
17
+ if not hasattr(cls, 'instance'): cls.instance = super(Undeploy, cls).__new__(cls)
17
18
 
18
19
  return cls.instance
19
20
 
@@ -21,7 +22,7 @@ class TearDown(Command):
21
22
  super().__init__(successor)
22
23
 
23
24
  def command(self):
24
- return TearDown.COMMAND
25
+ return Undeploy.COMMAND
25
26
 
26
27
  def run(self, cmd: str, state: ReplState):
27
28
  if not(args := self.args(cmd)):
@@ -30,18 +31,18 @@ class TearDown(Command):
30
31
  state, args = self.apply_state(args, state)
31
32
 
32
33
  if state.in_repl:
33
- log(lines_to_tabular([c.help(ReplState()) for c in TearDown.cmd_list()], separator=':'))
34
+ log(lines_to_tabular([c.help(ReplState()) for c in Undeploy.cmd_list()], separator=':'))
34
35
 
35
36
  return 'command-missing'
36
37
  else:
37
38
  # head with the Chain of Responsibility pattern
38
- cmds = Command.chain(TearDown.cmd_list())
39
+ cmds = Command.chain(Undeploy.cmd_list())
39
40
  if not cmds.run(cmd, state):
40
41
  log2('* Command is missing.')
41
42
  Command.display_help()
42
43
 
43
44
  def cmd_list():
44
- return [TearDownFrontend()]
45
+ return [UndeployFrontend(), UndeployPod()]
45
46
 
46
47
  def completion(self, state: ReplState):
47
48
  if state.sts:
@@ -52,10 +53,10 @@ class TearDown(Command):
52
53
  def help(self, _: ReplState):
53
54
  return None
54
55
 
55
- class TearDownCommandHelper(click.Command):
56
+ class UndeployCommandHelper(click.Command):
56
57
  def get_help(self, ctx: click.Context):
57
58
  log(super().get_help(ctx))
58
59
  log()
59
60
  log('Sub-Commands:')
60
61
 
61
- log(lines_to_tabular([c.help(ReplState()).replace(f'{TearDown.COMMAND} ', ' ', 1) for c in TearDown.cmd_list()], separator=':'))
62
+ log(lines_to_tabular([c.help(ReplState()).replace(f'{Undeploy.COMMAND} ', ' ', 1) for c in Undeploy.cmd_list()], separator=':'))
@@ -1,14 +1,16 @@
1
1
  from adam.commands.command import Command
2
+ from adam.commands.deploy.deploy_utils import undeploy_frontend
3
+ from adam.config import Config
2
4
  from adam.k8s_utils.ingresses import Ingresses
3
5
  from adam.k8s_utils.services import Services
4
6
  from adam.repl_state import ReplState, RequiredState
5
7
 
6
- class TearDownFrontend(Command):
7
- COMMAND = 'teardown frontend'
8
+ class UndeployFrontend(Command):
9
+ COMMAND = 'undeploy frontend'
8
10
 
9
11
  # the singleton pattern
10
12
  def __new__(cls, *args, **kwargs):
11
- if not hasattr(cls, 'instance'): cls.instance = super(TearDownFrontend, cls).__new__(cls)
13
+ if not hasattr(cls, 'instance'): cls.instance = super(UndeployFrontend, cls).__new__(cls)
12
14
 
13
15
  return cls.instance
14
16
 
@@ -16,7 +18,7 @@ class TearDownFrontend(Command):
16
18
  super().__init__(successor)
17
19
 
18
20
  def command(self):
19
- return TearDownFrontend.COMMAND
21
+ return UndeployFrontend.COMMAND
20
22
 
21
23
  def required(self):
22
24
  return RequiredState.NAMESPACE
@@ -29,9 +31,8 @@ class TearDownFrontend(Command):
29
31
  if not self.validate_state(state):
30
32
  return state
31
33
 
32
- name = 'ops'
33
- Ingresses.delete_ingress(name, state.namespace)
34
- Services.delete_service(name, state.namespace)
34
+ label_selector = Config().get('pod.label-selector', 'run=ops')
35
+ undeploy_frontend(state.namespace, label_selector)
35
36
 
36
37
  return state
37
38
 
@@ -39,4 +40,4 @@ class TearDownFrontend(Command):
39
40
  return super().completion(state)
40
41
 
41
42
  def help(self, _: ReplState):
42
- return f'{TearDownFrontend.COMMAND}\t tear down frontend'
43
+ return f'{UndeployFrontend.COMMAND}\t undeploy frontend'
@@ -0,0 +1,54 @@
1
+ from adam.app_session import AppSession
2
+ from adam.commands.command import Command
3
+ from adam.commands.deploy.deploy_utils import deploy_frontend, undeploy_frontend
4
+ from adam.config import Config
5
+ from adam.k8s_utils.ingresses import Ingresses
6
+ from adam.k8s_utils.kube_context import KubeContext
7
+ from adam.k8s_utils.pods import Pods
8
+ from adam.k8s_utils.service_accounts import ServiceAccounts
9
+ from adam.k8s_utils.services import Services
10
+ from adam.repl_state import ReplState, RequiredState
11
+ from adam.utils import log2
12
+
13
+ class UndeployPod(Command):
14
+ COMMAND = 'undeploy pod'
15
+
16
+ # the singleton pattern
17
+ def __new__(cls, *args, **kwargs):
18
+ if not hasattr(cls, 'instance'): cls.instance = super(UndeployPod, cls).__new__(cls)
19
+
20
+ return cls.instance
21
+
22
+ def __init__(self, successor: Command=None):
23
+ super().__init__(successor)
24
+
25
+ def command(self):
26
+ return UndeployPod.COMMAND
27
+
28
+ def required(self):
29
+ return RequiredState.NAMESPACE
30
+
31
+ def run(self, cmd: str, state: ReplState):
32
+ if not(args := self.args(cmd)):
33
+ return super().run(cmd, state)
34
+
35
+ state, args = self.apply_state(args, state)
36
+ if not self.validate_state(state):
37
+ return state
38
+
39
+ if KubeContext.in_cluster():
40
+ log2('This is doable only from outside of the Kubernetes cluster.')
41
+ return state
42
+
43
+ label_selector = Config().get('pod.label-selector', 'run=ops')
44
+ undeploy_frontend(state.namespace, label_selector)
45
+ Pods.delete_with_selector(state.namespace, label_selector)
46
+ ServiceAccounts.delete(state.namespace, label_selector=label_selector)
47
+
48
+ return state
49
+
50
+ def completion(self, state: ReplState):
51
+ return super().completion(state)
52
+
53
+ def help(self, _: ReplState):
54
+ return f'{UndeployPod.COMMAND}\t undeploy pod'
@@ -8,7 +8,7 @@ import threading
8
8
  import traceback
9
9
 
10
10
  from adam import log
11
- from adam.commands.frontend.code_utils import get_available_port
11
+ from adam.commands.deploy.code_utils import get_available_port
12
12
  from adam.config import Config
13
13
  from adam.sso.idp import Idp
14
14
  from adam.app_session import AppSession, IdpLogin
adam/embedded_params.py CHANGED
@@ -1,2 +1,2 @@
1
1
  def config():
2
- return {'app': {'console-endpoint': 'https://{host}/{env}/{app}/static/console/index.html', 'cr': {'cluster-regex': '(.*?-.*?)-.*', 'group': 'ops.c3.ai', 'v': 'v2', 'plural': 'c3cassandras'}, 'label': 'c3__app_id-0', 'login': {'admin-group': '{host}/C3.ClusterAdmin', 'ingress': '{app_id}-k8singr-appleader-001', 'timeout': 5, 'session-check-url': 'https://{host}/{env}/{app}/api/8/C3/userSessionToken', 'cache-creds': True, 'cache-username': True, 'url': 'https://{host}/{env}/{app}', 'another': "You're logged in to {has}. However, for this app, you need to log in to {need}.", 'token-server-url': 'http://localhost:{port}', 'password-max-length': 128}, 'strip': '0'}, 'bash': {'workers': 32}, 'cassandra': {'service-name': 'all-pods-service'}, 'cql': {'workers': 32, 'samples': 3, 'secret': {'cluster-regex': '(.*?-.*?)-.*', 'name': '{cluster}-superuser', 'password-item': 'password'}}, 'checks': {'compactions-threshold': 250, 'cpu-busy-threshold': 98.0, 'cpu-threshold': 0.0, 'cassandra-data-path': '/c3/cassandra', 'root-disk-threshold': 50, 'cassandra-disk-threshold': 50, 'snapshot-size-cmd': "ls /c3/cassandra/data/data/*/*/snapshots | grep snapshots | sed 's/:$//g' | xargs -I {} du -sk {} | awk '{print $1}' | awk '{s+=$1} END {print s}'", 'snapshot-size-threshold': '40G', 'table-sizes-cmd': "ls -Al /c3/cassandra/data/data/ | awk '{print $9}' | sed 's/\\^r//g' | xargs -I {} du -sk /c3/cassandra/data/data/{}"}, 'get-host-id': {'workers': 32}, 'idps': {'ad': {'email-pattern': '.*@c3.ai', 'uri': 'https://login.microsoftonline.com/53ad779a-93e7-485c-ba20-ac8290d7252b/oauth2/v2.0/authorize?response_type=id_token&response_mode=form_post&client_id=00ff94a8-6b0a-4715-98e0-95490012d818&scope=openid+email+profile&redirect_uri=https%3A%2F%2Fplat.c3ci.cloud%2Fc3%2Fc3%2Foidc%2Flogin&nonce={nonce}&state=EMPTY', 'jwks-uri': 'https://login.microsoftonline.com/common/discovery/keys', 'contact': 'Please contact ted.tran@c3.ai.', 'whitelist-file': '/kaqing/members'}, 'okta': {'default': True, 'email-pattern': '.*@c3iot.com', 'uri': 'https://c3energy.okta.com/oauth2/v1/authorize?response_type=id_token&response_mode=form_post&client_id={client_id}&scope=openid+email+profile+groups&redirect_uri=https%3A%2F%2F{host}%2Fc3%2Fc3%2Foidc%2Flogin&nonce={nonce}&state=EMPTY', 'jwks-uri': 'https://c3energy.okta.com/oauth2/v1/keys'}}, 'issues': {'workers': 32}, 'logs': {'path': '/c3/cassandra/logs/system.log'}, 'medusa': {'restore-auto-complete': False}, 'nodetool': {'workers': 32, 'samples': 3, 'commands_in_line': 40}, 'pg': {'name-pattern': '^{namespace}.*-k8spg-.*', 'excludes': '.helm., -admin-secret', 'agent': {'name': 'ops', 'just-in-time': False, 'timeout': 86400, 'image': 'seanahnsf/kaqing'}, 'default-db': 'postgres', 'default-schema': 'postgres', 'secret': {'endpoint-key': 'postgres-db-endpoint', 'port-key': 'postgres-db-port', 'username-key': 'postgres-admin-username', 'password-key': 'postgres-admin-password'}}, 'preview': {'rows': 10}, 'processes': {'columns': 'pod,cpu,mem', 'header': 'POD_NAME,CPU,MEM/LIMIT'}, 'reaper': {'service-name': 'reaper-service', 'port-forward': {'timeout': 86400, 'local-port': 9001}, 'abort-runs-batch': 10, 'show-runs-batch': 100, 'pod': {'cluster-regex': '(.*?-.*?-.*?-.*?)-.*', 'label-selector': 'k8ssandra.io/reaper={cluster}-reaper'}, 'secret': {'cluster-regex': '(.*?-.*?)-.*', 'name': '{cluster}-reaper-ui', 'password-item': 'password'}}, 'repair': {'log-path': '/home/cassrepair/logs/', 'image': 'ci-registry.c3iot.io/cloudops/cassrepair:2.0.13', 'secret': 'ciregistryc3iotio', 'env': {'interval': 24, 'timeout': 60, 'pr': False, 'runs': 1}}, 'repl': {'start-drive': 'a', 'auto-enter-app': 'c3/c3', 'auto-enter-only-cluster': True}, 'status': {'columns': 'status,address,load,tokens,owns,host_id,gossip,compactions', 'header': '--,Address,Load,Tokens,Owns,Host ID,GOSSIP,COMPACTIONS'}, 'storage': {'columns': 'pod,volume_root,volume_cassandra,snapshots,data,compactions', 'header': 'POD_NAME,VOLUME /,VOLUME CASS,SNAPSHOTS,DATA,COMPACTIONS'}, 'watch': {'auto': 'rollout', 'timeout': 3600, 'interval': 10}, 'debug': {'timings': False, 'exit-on-error': False, 'show-parallelism': False, 'show-out': False}}
2
+ return {'app': {'console-endpoint': 'https://{host}/{env}/{app}/static/console/index.html', 'cr': {'cluster-regex': '(.*?-.*?)-.*', 'group': 'ops.c3.ai', 'v': 'v2', 'plural': 'c3cassandras'}, 'label': 'c3__app_id-0', 'login': {'admin-group': '{host}/C3.ClusterAdmin', 'ingress': '{app_id}-k8singr-appleader-001', 'timeout': 5, 'session-check-url': 'https://{host}/{env}/{app}/api/8/C3/userSessionToken', 'cache-creds': True, 'cache-username': True, 'url': 'https://{host}/{env}/{app}', 'another': "You're logged in to {has}. However, for this app, you need to log in to {need}.", 'token-server-url': 'http://localhost:{port}', 'password-max-length': 128}, 'strip': '0'}, 'bash': {'workers': 32}, 'cassandra': {'service-name': 'all-pods-service'}, 'cql': {'workers': 32, 'samples': 3, 'secret': {'cluster-regex': '(.*?-.*?)-.*', 'name': '{cluster}-superuser', 'password-item': 'password'}}, 'checks': {'compactions-threshold': 250, 'cpu-busy-threshold': 98.0, 'cpu-threshold': 0.0, 'cassandra-data-path': '/c3/cassandra', 'root-disk-threshold': 50, 'cassandra-disk-threshold': 50, 'snapshot-size-cmd': "ls /c3/cassandra/data/data/*/*/snapshots | grep snapshots | sed 's/:$//g' | xargs -I {} du -sk {} | awk '{print $1}' | awk '{s+=$1} END {print s}'", 'snapshot-size-threshold': '40G', 'table-sizes-cmd': "ls -Al /c3/cassandra/data/data/ | awk '{print $9}' | sed 's/\\^r//g' | xargs -I {} du -sk /c3/cassandra/data/data/{}"}, 'get-host-id': {'workers': 32}, 'idps': {'ad': {'email-pattern': '.*@c3.ai', 'uri': 'https://login.microsoftonline.com/53ad779a-93e7-485c-ba20-ac8290d7252b/oauth2/v2.0/authorize?response_type=id_token&response_mode=form_post&client_id=00ff94a8-6b0a-4715-98e0-95490012d818&scope=openid+email+profile&redirect_uri=https%3A%2F%2Fplat.c3ci.cloud%2Fc3%2Fc3%2Foidc%2Flogin&nonce={nonce}&state=EMPTY', 'jwks-uri': 'https://login.microsoftonline.com/common/discovery/keys', 'contact': 'Please contact ted.tran@c3.ai.', 'whitelist-file': '/kaqing/members'}, 'okta': {'default': True, 'email-pattern': '.*@c3iot.com', 'uri': 'https://c3energy.okta.com/oauth2/v1/authorize?response_type=id_token&response_mode=form_post&client_id={client_id}&scope=openid+email+profile+groups&redirect_uri=https%3A%2F%2F{host}%2Fc3%2Fc3%2Foidc%2Flogin&nonce={nonce}&state=EMPTY', 'jwks-uri': 'https://c3energy.okta.com/oauth2/v1/keys'}}, 'issues': {'workers': 32}, 'logs': {'path': '/c3/cassandra/logs/system.log'}, 'medusa': {'restore-auto-complete': False}, 'nodetool': {'workers': 32, 'samples': 3, 'commands_in_line': 40}, 'pg': {'name-pattern': '^{namespace}.*-k8spg-.*', 'excludes': '.helm., -admin-secret', 'agent': {'name': 'ops', 'just-in-time': False, 'timeout': 86400, 'image': 'seanahnsf/kaqing'}, 'default-db': 'postgres', 'default-schema': 'postgres', 'secret': {'endpoint-key': 'postgres-db-endpoint', 'port-key': 'postgres-db-port', 'username-key': 'postgres-admin-username', 'password-key': 'postgres-admin-password'}}, 'pod': {'name': 'ops', 'image': 'seanahnsf/kaqing', 'sa': {'name': 'ops', 'proto': 'c3', 'additional-cluster-roles': 'c3aiops-k8ssandra-operator'}, 'label-selector': 'run=ops'}, 'preview': {'rows': 10}, 'processes': {'columns': 'pod,cpu,mem', 'header': 'POD_NAME,CPU,MEM/LIMIT'}, 'reaper': {'service-name': 'reaper-service', 'port-forward': {'timeout': 86400, 'local-port': 9001}, 'abort-runs-batch': 10, 'show-runs-batch': 100, 'pod': {'cluster-regex': '(.*?-.*?-.*?-.*?)-.*', 'label-selector': 'k8ssandra.io/reaper={cluster}-reaper'}, 'secret': {'cluster-regex': '(.*?-.*?)-.*', 'name': '{cluster}-reaper-ui', 'password-item': 'password'}}, 'repair': {'log-path': '/home/cassrepair/logs/', 'image': 'ci-registry.c3iot.io/cloudops/cassrepair:2.0.13', 'secret': 'ciregistryc3iotio', 'env': {'interval': 24, 'timeout': 60, 'pr': False, 'runs': 1}}, 'repl': {'start-drive': 'a', 'auto-enter-app': 'c3/c3', 'auto-enter-only-cluster': True}, 'status': {'columns': 'status,address,load,tokens,owns,host_id,gossip,compactions', 'header': '--,Address,Load,Tokens,Owns,Host ID,GOSSIP,COMPACTIONS'}, 'storage': {'columns': 'pod,volume_root,volume_cassandra,snapshots,data,compactions', 'header': 'POD_NAME,VOLUME /,VOLUME CASS,SNAPSHOTS,DATA,COMPACTIONS'}, 'watch': {'auto': 'rollout', 'timeout': 3600, 'interval': 10}, 'debug': {'timings': False, 'exit-on-error': False, 'show-parallelism': False, 'show-out': False}}
adam/k8s_utils/pods.py CHANGED
@@ -30,6 +30,13 @@ class Pods:
30
30
  except Exception as e:
31
31
  log2("Exception when calling CoreV1Api->delete_namespaced_pod: %s\n" % e)
32
32
 
33
+ def delete_with_selector(namespace: str, label_selector: str):
34
+ v1 = client.CoreV1Api()
35
+
36
+ ret = v1.list_namespaced_pod(namespace=namespace, label_selector=label_selector)
37
+ for i in ret.items:
38
+ v1.delete_namespaced_pod(name=i.metadata.name, namespace=namespace)
39
+
33
40
  def on_pods(pods: list[str],
34
41
  namespace: str,
35
42
  body: Callable[[ThreadPoolExecutor, str, str, bool], T],
@@ -158,12 +165,12 @@ class Pods:
158
165
  v1 = client.CoreV1Api()
159
166
  return v1.read_namespaced_pod(name=pod_name, namespace=namespace)
160
167
 
161
- def create_pod_spec(name: str, image: str, image_pull_secret: str, envs: list, volume_name: str, pvc_name:str, mount_path:str, command: list[str]=None, sa_name=None):
168
+ def create_pod_spec(name: str, image: str, image_pull_secret: str, envs: list, container_security_context: client.V1SecurityContext, volume_name: str, pvc_name:str, mount_path:str, command: list[str]=None, sa_name=None):
162
169
  volume_mounts = []
163
170
  if volume_name and pvc_name and mount_path:
164
171
  volume_mounts=[client.V1VolumeMount(mount_path=mount_path, name=volume_name)]
165
172
 
166
- container = client.V1Container(name=name, image=image, env=envs, command=command,
173
+ container = client.V1Container(name=name, image=image, env=envs, security_context=container_security_context, command=command,
167
174
  volume_mounts=volume_mounts)
168
175
 
169
176
  volumes = []
@@ -183,9 +190,12 @@ class Pods:
183
190
  volumes=volumes
184
191
  )
185
192
 
186
- def create(namespace: str, pod_name: str, image: str, command: list[str],
193
+ def create(namespace: str, pod_name: str, image: str,
194
+ command: list[str] = None,
187
195
  secret: str = None,
188
196
  env: dict[str, any] = {},
197
+ container_security_context: client.V1SecurityContext = None,
198
+ labels: dict[str, str] = {},
189
199
  volume_name: str = None,
190
200
  pvc_name: str = None,
191
201
  mount_path: str = None,
@@ -194,9 +204,14 @@ class Pods:
194
204
  envs = []
195
205
  for k, v in env.items():
196
206
  envs.append(client.V1EnvVar(name=str(k), value=str(v)))
197
- pod = Pods.create_pod_spec(pod_name, image, secret, envs, volume_name, pvc_name, mount_path, command=command, sa_name=sa_name)
198
- return v1.create_namespaced_pod(namespace=namespace,
199
- body=client.V1Pod(spec=pod, metadata=client.V1ObjectMeta(name=pod_name)))
207
+ pod = Pods.create_pod_spec(pod_name, image, secret, envs, container_security_context, volume_name, pvc_name, mount_path, command=command, sa_name=sa_name)
208
+ return v1.create_namespaced_pod(
209
+ namespace=namespace,
210
+ body=client.V1Pod(spec=pod, metadata=client.V1ObjectMeta(
211
+ name=pod_name,
212
+ labels=labels
213
+ ))
214
+ )
200
215
 
201
216
  def wait_for_running(namespace: str, pod_name: str):
202
217
  msged = False
@@ -0,0 +1,169 @@
1
+ from kubernetes import client, config
2
+
3
+ from adam.config import Config
4
+
5
+ # utility collection on service accounts; methods are all static
6
+ class ServiceAccounts:
7
+ def delete(namespace: str, label_selector: str):
8
+ ServiceAccounts.delete_cluster_role_bindings(label_selector)
9
+ ServiceAccounts.delete_role_bindings(namespace, label_selector)
10
+ ServiceAccounts.delete_service_account(namespace, label_selector)
11
+
12
+ def replicate(to_sa: str, namespace: str, from_sa: str, labels: dict[str, str] = {}, add_cluster_roles: list[str] = []):
13
+ ServiceAccounts.create_service_account(to_sa, namespace, labels=labels)
14
+ for b in ServiceAccounts.get_role_bindings(from_sa, namespace):
15
+ n = f'{to_sa}-{b.role_ref.name}'
16
+ ServiceAccounts.create_role_binding(n, namespace, to_sa, b.role_ref.name, labels=labels)
17
+
18
+ for b in ServiceAccounts.get_cluster_role_bindings(from_sa):
19
+ n = f'{to_sa}-{b.role_ref.name}'
20
+ ServiceAccounts.create_cluster_role_binding(n, namespace, to_sa, b.role_ref.name, labels=labels)
21
+
22
+ for cr in add_cluster_roles:
23
+ n = f'{to_sa}-{cr}'
24
+ ServiceAccounts.create_cluster_role_binding(n, namespace, to_sa, cr, labels=labels)
25
+
26
+ def create_service_account(name: str, namespace: str, labels: dict[str, str] = {}):
27
+ config.load_kube_config()
28
+
29
+ v1 = client.CoreV1Api()
30
+
31
+ service_account = client.V1ServiceAccount(
32
+ metadata=client.V1ObjectMeta(
33
+ name=name,
34
+ labels=labels)
35
+ )
36
+ api_response = v1.create_namespaced_service_account(
37
+ namespace=namespace,
38
+ body=service_account
39
+ )
40
+ Config().debug(f"Service Account '{api_response.metadata.name}' created in namespace '{namespace}'.")
41
+
42
+ def delete_service_account(namespace: str, label_selector: str) -> list:
43
+ refs = []
44
+
45
+ v1 = client.CoreV1Api()
46
+ sas = v1.list_namespaced_service_account(namespace=namespace, label_selector=label_selector).items
47
+ for sa in sas:
48
+ Config().debug(f'delete {sa.metadata.name}')
49
+ v1.delete_namespaced_service_account(name=sa.metadata.name, namespace=namespace)
50
+ refs.append(sa)
51
+
52
+ return refs
53
+
54
+ def create_role_binding(name: str, namespace: str, sa_name: str, role_name: str, labels: dict[str, str] = {}):
55
+ api = client.RbacAuthorizationV1Api()
56
+
57
+ metadata = client.V1ObjectMeta(
58
+ name=name,
59
+ namespace=namespace,
60
+ labels=labels
61
+ )
62
+ role_ref = client.V1RoleRef(
63
+ api_group="rbac.authorization.k8s.io",
64
+ kind="Role",
65
+ name=role_name
66
+ )
67
+
68
+ subjects = [
69
+ client.RbacV1Subject(
70
+ kind="ServiceAccount",
71
+ name=sa_name, # Name of the service account
72
+ namespace=namespace # Namespace of the service account
73
+ )
74
+ ]
75
+
76
+ role_binding = client.V1RoleBinding(
77
+ api_version="rbac.authorization.k8s.io/v1",
78
+ kind="RoleBinding",
79
+ metadata=metadata,
80
+ role_ref=role_ref,
81
+ subjects=subjects
82
+ )
83
+
84
+ api.create_namespaced_role_binding(namespace=namespace, body=role_binding)
85
+
86
+ def get_role_bindings(service_account_name: str, namespace: str) -> list:
87
+ refs = []
88
+
89
+ rbac_api = client.RbacAuthorizationV1Api()
90
+ role_bindings = rbac_api.list_namespaced_role_binding(namespace=namespace)
91
+ for binding in role_bindings.items:
92
+ if binding.subjects:
93
+ for subject in binding.subjects:
94
+ if subject.kind == "ServiceAccount" and subject.name == service_account_name:
95
+ refs.append(binding)
96
+
97
+ return refs
98
+
99
+ def delete_role_bindings(namespace: str, label_selector: str) -> list:
100
+ refs = []
101
+
102
+ v1_rbac = client.RbacAuthorizationV1Api()
103
+ cluster_role_bindings = v1_rbac.list_namespaced_role_binding(namespace=namespace, label_selector=label_selector).items
104
+ for binding in cluster_role_bindings:
105
+ Config().debug(f'delete {binding.metadata.name}')
106
+ v1_rbac.delete_namespaced_role_binding(name=binding.metadata.name, namespace=namespace)
107
+ refs.append(binding)
108
+
109
+ return refs
110
+
111
+ def create_cluster_role_binding(name: str, namespace: str, sa_name: str, role_name: str, labels: dict[str, str] = {}):
112
+ api = client.RbacAuthorizationV1Api()
113
+
114
+ metadata = client.V1ObjectMeta(
115
+ name=name,
116
+ namespace=namespace,
117
+ labels=labels
118
+ )
119
+ role_ref = client.V1RoleRef(
120
+ api_group="rbac.authorization.k8s.io",
121
+ kind="ClusterRole",
122
+ name=role_name
123
+ )
124
+
125
+ subjects = [
126
+ client.RbacV1Subject(
127
+ kind="ServiceAccount",
128
+ name=sa_name,
129
+ namespace=namespace
130
+ )
131
+ ]
132
+
133
+ role_binding = client.V1ClusterRoleBinding(
134
+ api_version="rbac.authorization.k8s.io/v1",
135
+ metadata=metadata,
136
+ role_ref=role_ref,
137
+ subjects=subjects
138
+ )
139
+
140
+ try:
141
+ api.create_cluster_role_binding(body=role_binding)
142
+ except client.ApiException as e:
143
+ print(f"Error creating ClusterRoleBinding: {e}")
144
+
145
+ def get_cluster_role_bindings(service_account_name: str) -> list:
146
+ refs = []
147
+
148
+ v1_rbac = client.RbacAuthorizationV1Api()
149
+ cluster_role_bindings = v1_rbac.list_cluster_role_binding().items
150
+ for binding in cluster_role_bindings:
151
+ if binding.subjects:
152
+ for subject in binding.subjects:
153
+ if subject.kind == "ServiceAccount" and subject.name == service_account_name:
154
+ refs.append(binding)
155
+
156
+ return refs
157
+
158
+
159
+ def delete_cluster_role_bindings(label_selector: str) -> list:
160
+ refs = []
161
+
162
+ v1_rbac = client.RbacAuthorizationV1Api()
163
+ cluster_role_bindings = v1_rbac.list_cluster_role_binding(label_selector=label_selector).items
164
+ for binding in cluster_role_bindings:
165
+ Config().debug(f'delete {binding.metadata.name}')
166
+ v1_rbac.delete_cluster_role_binding(binding.metadata.name)
167
+ refs.append(binding)
168
+
169
+ return refs
adam/repl_commands.py CHANGED
@@ -1,11 +1,13 @@
1
1
  from adam.commands.app import App
2
2
  from adam.commands.app_ping import AppPing
3
- from adam.commands.frontend.code_start import CodeStart
4
- from adam.commands.frontend.code_stop import CodeStop
5
- from adam.commands.frontend.setup import Setup
6
- from adam.commands.frontend.setup_frontend import SetupFrontend
7
- from adam.commands.frontend.teardown import TearDown
8
- from adam.commands.frontend.teardown_frontend import TearDownFrontend
3
+ from adam.commands.deploy.code_start import CodeStart
4
+ from adam.commands.deploy.code_stop import CodeStop
5
+ from adam.commands.deploy.deploy import Deploy
6
+ from adam.commands.deploy.deploy_frontend import DeployFrontend
7
+ from adam.commands.deploy.deploy_pod import DeployPod
8
+ from adam.commands.deploy.undeploy import Undeploy
9
+ from adam.commands.deploy.undeploy_frontend import UndeployFrontend
10
+ from adam.commands.deploy.undeploy_pod import UndeployPod
9
11
  from adam.commands.show.show_app_queues import ShowAppQueues
10
12
  from adam.commands.cp import ClipboardCopy
11
13
  from adam.commands.bash import Bash
@@ -48,7 +50,7 @@ class ReplCommands:
48
50
  cmds: list[Command] = ReplCommands.navigation() + ReplCommands.cassandra_check() + ReplCommands.cassandra_ops() + \
49
51
  ReplCommands.tools() + ReplCommands.app() + ReplCommands.exit()
50
52
 
51
- intermediate_cmds: list[Command] = [App(), Reaper(), Repair(), Setup(), Show(), TearDown()]
53
+ intermediate_cmds: list[Command] = [App(), Reaper(), Repair(), Deploy(), Show(), Undeploy()]
52
54
  ic = [c.command() for c in intermediate_cmds]
53
55
  # 1. dedup commands
54
56
  deduped = []
@@ -75,7 +77,7 @@ class ReplCommands:
75
77
  return Medusa.cmd_list() + [Restart(), RollOut(), Watch()] + Reaper.cmd_list() + Repair.cmd_list()
76
78
 
77
79
  def tools() -> list[Command]:
78
- return [Cqlsh(), Postgres(), Bash(), CodeStart(), CodeStop(), SetupFrontend(), TearDownFrontend()]
80
+ return [Cqlsh(), Postgres(), Bash(), CodeStart(), CodeStop(), DeployFrontend(), UndeployFrontend(), DeployPod(), UndeployPod()]
79
81
 
80
82
  def app() -> list[Command]:
81
83
  return [ShowAppActions(), ShowAppId(), ShowAppQueues(), AppPing(), App()]
adam/version.py CHANGED
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- __version__ = "1.98.87" #: the working version
4
+ __version__ = "1.98.89" #: the working version
5
5
  __release__ = "1.0.0" #: the release version
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: kaqing
3
- Version: 1.98.87
3
+ Version: 1.98.89
4
4
  Summary: UNKNOWN
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -1,20 +1,20 @@
1
1
  adam/__init__.py,sha256=oVw1FNd9HZPJ7wm6BNn5ybyNGJLjJ8kopMeBiwgMaOI,59
2
2
  adam/app_session.py,sha256=Klypm4JYHOlovaRCHAZ2P_Mj_nheMlcQgX403R0TJGk,6969
3
3
  adam/apps.py,sha256=UTpUJBAMRFvR8kJZwileGC0UmPvsOjJ_AgvWoGmnIFI,6701
4
- adam/batch.py,sha256=P_bQUPaS-38cVwdPnLwOwlirQuyHQiPNhYteV-y7B68,24108
4
+ adam/batch.py,sha256=vJRniGz0L8CXluVqP_EMFnthuZdILBMNUmNzxz9ImeI,24110
5
5
  adam/cli.py,sha256=03pIZdomAu7IL-GSP6Eun_PKwwISShRAmfx6eVRPGC0,458
6
6
  adam/cli_group.py,sha256=W3zy1BghCtVcEXizq8fBH-93ZRVVwgAyGPzy0sHno1Y,593
7
7
  adam/config.py,sha256=38UcmYRxf-Kq4iPbKS7tNPQqN64fam1bWNy6jhWREd0,2552
8
8
  adam/embedded_apps.py,sha256=lKPx63mKzJbNmwz0rgL4gF76M9fDGxraYTtNAIGnZ_s,419
9
- adam/embedded_params.py,sha256=Ol383-6KkjvhbMJj54SSpe5JIT5M5_BL20_G9Z3_43U,4184
9
+ adam/embedded_params.py,sha256=sHOxnZLjaDX7oN5VKeCzCpbEMb-1x6Nn9lRA7PE7mos,4362
10
10
  adam/log.py,sha256=gg5DK52wLPc9cjykeh0WFHyAk1qI3HEpGaAK8W2dzXY,1146
11
11
  adam/pod_exec_result.py,sha256=nq0xnCNOpUGBSijGF0H-YNrwBc9vUQs4DkvLMIFS5LQ,951
12
12
  adam/repl.py,sha256=wEzkXaFaT1PWWYI3AZ32j01efN7HpL2xvMfGLEmYIL4,7036
13
- adam/repl_commands.py,sha256=8OtR14WbCpFNS4CYH8g9xgarXvVr3fZa9XzAVmtBphI,3912
13
+ adam/repl_commands.py,sha256=iHR9LqUzBveRVuuPhLdF8M9CayboFCzYNmz6xP0byME,4046
14
14
  adam/repl_session.py,sha256=uIogcvWBh7wd8QQ-p_JgLsyJ8YJgINw5vOd6JIsd7Vo,472
15
15
  adam/repl_state.py,sha256=QarrUAwYWOz3YTemtaf2opbHLa5a3LEsyuonNwhvOhk,7131
16
16
  adam/utils.py,sha256=j7p7iruLuV11swa0z9ZLBgoJHu_nkTSVKtQe0q71gmk,7025
17
- adam/version.py,sha256=HuBZ-Db_NiPqwtS8rlERwa6S7uU0T4IqqGe5ZuhMheg,140
17
+ adam/version.py,sha256=zyh3kWz7_xPBLoMTmaNlBB9tomTiguqd8ESLhYbUurw,140
18
18
  adam/checks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  adam/checks/check.py,sha256=Qopr3huYcMu2bzQgb99dEUYjFzkjKHRI76S6KA9b9Rk,702
20
20
  adam/checks/check_context.py,sha256=FEHkQ32jY1EDopQ2uYWqy9v7aEEX1orLpJWhopwAlh4,402
@@ -75,16 +75,19 @@ adam/commands/pwd.py,sha256=VlgFjxFl66I7Df1YI6cuiEeY6Q13lEavMKfrzHLESKo,2340
75
75
  adam/commands/report.py,sha256=Ky45LIzSlB_X4V12JZWjU3SA2u4_FKRencRTq7psOWU,1944
76
76
  adam/commands/restart.py,sha256=Hik1t5rjH2ATZv4tzwrGB3e44b3dNuATgY327_Nb8Bs,2044
77
77
  adam/commands/rollout.py,sha256=52_4ijna3v-8Oug12et43DRHFDNhiN34p6xLTQmhdbQ,2959
78
- adam/commands/user_entry.py,sha256=4TaFM6pp2ZDmwE4f5yMizD9wve0CHOlKPS6x6RBg7cI,4742
78
+ adam/commands/user_entry.py,sha256=7PeRYPpGU4x3kyQnUBxVaOH47anx7YwVPSIz-0Rfpko,4740
79
79
  adam/commands/watch.py,sha256=mmBFpB8T1V7zrNs5b2YNyDDztMym_ILPDdkrbdAXTas,2438
80
- adam/commands/frontend/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
81
- adam/commands/frontend/code_start.py,sha256=nkk2tTmEFobtJ6TD9nRplXaGYovYBq-05PKxA8benvI,1372
82
- adam/commands/frontend/code_stop.py,sha256=stpADpZ1gvQsLEriCPRp0sE9h5CFxskbPWnrMs3me5U,1698
83
- adam/commands/frontend/code_utils.py,sha256=5Gp4U8HzKpPkbkHDU7whlvGOK-wWaAbCIIGzVoN9hio,3296
84
- adam/commands/frontend/setup.py,sha256=teboue6gvy72ABOz-8oQUXQIOKFm7RrKMuM7u6OETww,1776
85
- adam/commands/frontend/setup_frontend.py,sha256=4mgyA_K6ldYy_zzQ5ZpDUphvKxtkdyFToQzB0j9ci2k,2010
86
- adam/commands/frontend/teardown.py,sha256=VtFLU6Tt-lXiOK3mT8TQP3dcfpf3IMqncb7mSY5hXNE,1876
87
- adam/commands/frontend/teardown_frontend.py,sha256=u7QEqpaBu7-ZJjaQ1UwnnOMKB0Oz_m8OToNrYasFsEs,1257
80
+ adam/commands/deploy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
81
+ adam/commands/deploy/code_start.py,sha256=-iH8HThTNM83IfBxT_LqTByuHVatV9d-Il4OYOfrwLI,1370
82
+ adam/commands/deploy/code_stop.py,sha256=ch7ZMgosvTHsGaIcDwQY5XYh_5HYrUjBkZFOI-d2gOU,1696
83
+ adam/commands/deploy/code_utils.py,sha256=5Gp4U8HzKpPkbkHDU7whlvGOK-wWaAbCIIGzVoN9hio,3296
84
+ adam/commands/deploy/deploy.py,sha256=ymGprq2rBpAMsxaqRrVnrcYetAq9GDTguohlAWFuFa8,1855
85
+ adam/commands/deploy/deploy_frontend.py,sha256=kc4GnpRMa3a384tLN8d9FN1XUcRrbwkpVUY1O3YkJU0,1774
86
+ adam/commands/deploy/deploy_pod.py,sha256=cJCt4DlNrr_JuDb_X3h_8b2Zsh8k7uzu2F5FeFmAirQ,2522
87
+ adam/commands/deploy/deploy_utils.py,sha256=h6oh3JBII2bMP7EosYBHAiIoDdX8U-Ntg7rqckszxaU,1184
88
+ adam/commands/deploy/undeploy.py,sha256=JWRwYLiS7iD2utRr2Zb8HCg_rbgHTw-A17CCs2ezl00,1949
89
+ adam/commands/deploy/undeploy_frontend.py,sha256=P04l_DI6y1dAqHMQMG94MG7K4piCw84zhPszc4Lz28o,1349
90
+ adam/commands/deploy/undeploy_pod.py,sha256=xiqenETzyzvQK0fH6OPtfNpww9E8ArHHFMWp9rTC6aU,1842
88
91
  adam/commands/medusa/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
89
92
  adam/commands/medusa/medusa.py,sha256=Y_yyOZRb6u45wfTVBRp3kuklyYDTSlaAJQdAiymP_8M,2185
90
93
  adam/commands/medusa/medusa_backup.py,sha256=j4DTVWFT-4rzs4gG_pBvjE-JuPsVCJIsnyQjIzJ4EbA,1801
@@ -137,8 +140,9 @@ adam/k8s_utils/custom_resources.py,sha256=cIeaZRQET2DelTGU2f5QsMckh7TddPpWZDFeNK
137
140
  adam/k8s_utils/ingresses.py,sha256=ul3Z6fDGc_Cxcn-ExP0vXhZatoShCUZFtpwtCY4Qx7o,3460
138
141
  adam/k8s_utils/jobs.py,sha256=P7j3JiZ33TRnkjkPLLrGlypAPxK1BZQHvVpF8s7eHA8,2604
139
142
  adam/k8s_utils/kube_context.py,sha256=nocEyVNjXWG-N-GNnEYHDvnot7H5LQUFmpZIwIyE7As,3310
140
- adam/k8s_utils/pods.py,sha256=G0ePuYTYTeNIwNaSemPuinJR7fKy5GG6faWhv6KBlFQ,7986
143
+ adam/k8s_utils/pods.py,sha256=OnQo59k01lyZ_ZKBW2--8itZ5xgKoU17p5SFSm1le6o,8608
141
144
  adam/k8s_utils/secrets.py,sha256=pYaVKXTpx3-QgFtBjznWFq0N6ZcBdxnx21FRe5nBCCo,2305
145
+ adam/k8s_utils/service_accounts.py,sha256=v2oQSqCrNvt2uRnKlNwR3fjtpUG7oF5nqgzEB7NnT-U,6349
142
146
  adam/k8s_utils/services.py,sha256=EOJJGACVbbRvu5T3rMKqIJqgYic1_MSJ17EA0TJ6UOk,3156
143
147
  adam/k8s_utils/statefulsets.py,sha256=PZDEhy34aXxLkbW1-RsOC0E4D0w0pHyoIQGHvcAzSAk,4606
144
148
  adam/k8s_utils/volumes.py,sha256=MzYeH80NqKlhdadx6d0tW-j8vTOCUYWx7wRURIZWKZ8,843
@@ -152,8 +156,8 @@ adam/sso/idp.py,sha256=fvcwUw_URTgsO6ySaqTIw0zQT2qRO1IPSGhf6rPtybo,5804
152
156
  adam/sso/idp_login.py,sha256=t49CRlMyHA76BAj_kKq0Wa9URIYlzBsUCSmn7Jf5o6I,1721
153
157
  adam/sso/idp_session.py,sha256=9BUHNRf70u4rVKrVY1HKPOEmOviXvkjam8WJxmXSKIM,1735
154
158
  adam/sso/sso_config.py,sha256=5N8WZgIJQBtHUy585XLRWKjpU87_v6QluyNK9E27D5s,2459
155
- kaqing-1.98.87.dist-info/METADATA,sha256=cVM-64rWKILaPnXWOUQsm8kMxjC-WWPRNpqzvWuiefo,133
156
- kaqing-1.98.87.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
157
- kaqing-1.98.87.dist-info/entry_points.txt,sha256=SkzhuQJUWsXOzHeZ5TgQ2c3_g53UGK23zzJU_JTZOZI,39
158
- kaqing-1.98.87.dist-info/top_level.txt,sha256=8_2PZkwBb-xDcnc8a2rAbQeJhXKXskc7zTP7pSPa1fw,5
159
- kaqing-1.98.87.dist-info/RECORD,,
159
+ kaqing-1.98.89.dist-info/METADATA,sha256=1W4E8DVV36Pd6KS6eZ8L8x0ZMGlgabfD2KrZecRLP-M,133
160
+ kaqing-1.98.89.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
161
+ kaqing-1.98.89.dist-info/entry_points.txt,sha256=SkzhuQJUWsXOzHeZ5TgQ2c3_g53UGK23zzJU_JTZOZI,39
162
+ kaqing-1.98.89.dist-info/top_level.txt,sha256=8_2PZkwBb-xDcnc8a2rAbQeJhXKXskc7zTP7pSPa1fw,5
163
+ kaqing-1.98.89.dist-info/RECORD,,
File without changes
File without changes