kaqing 1.98.139__py3-none-any.whl → 1.98.140__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,6 @@
1
1
  import click
2
2
 
3
3
  from adam.commands.command import Command
4
- from adam.commands.command_helpers import ClusterCommandHelper
5
4
  from adam.commands.deploy.deploy_pod import DeployPod
6
5
  from .deploy_frontend import DeployFrontend
7
6
  from adam.repl_state import ReplState
@@ -1,9 +1,6 @@
1
- from adam.app_session import AppSession
2
1
  from adam.commands.command import Command
3
2
  from adam.commands.deploy.deploy_utils import deploy_frontend
4
3
  from adam.config import Config
5
- from adam.k8s_utils.ingresses import Ingresses
6
- from adam.k8s_utils.services import Services
7
4
  from adam.repl_state import ReplState, RequiredState
8
5
  from adam.utils import log2
9
6
 
@@ -3,11 +3,10 @@ from kubernetes import client
3
3
  from adam.commands.command import Command
4
4
  from adam.commands.deploy.deploy_utils import deploy_frontend, gen_labels
5
5
  from adam.config import Config
6
- from adam.k8s_utils.ingresses import Ingresses
6
+ from adam.k8s_utils.deployment import Deployments
7
7
  from adam.k8s_utils.kube_context import KubeContext
8
8
  from adam.k8s_utils.pods import Pods
9
9
  from adam.k8s_utils.service_accounts import ServiceAccounts
10
- from adam.k8s_utils.services import Services
11
10
  from adam.repl_state import ReplState, RequiredState
12
11
  from adam.utils import log2
13
12
 
@@ -55,11 +54,12 @@ class DeployPod(Command):
55
54
  add=["SYS_PTRACE"]
56
55
  )
57
56
  )
58
- Pods.create(state.namespace, pod_name, image, env={'NAMESPACE': state.namespace}, container_security_context=security_context, labels=labels, sa_name=sa_name)
57
+ Deployments.create(state.namespace, pod_name, image, env={'NAMESPACE': state.namespace}, container_security_context=security_context, labels=labels, sa_name=sa_name)
59
58
 
60
59
  uri = deploy_frontend(pod_name, state.namespace, label_selector)
61
60
 
62
- Pods.wait_for_running(state.namespace, pod_name, msg=f'Ops pod is starting up; it will be available at {uri}.')
61
+ # Pods.wait_for_running(state.namespace, label_selector, msg=f'Ops pod is starting up; it will be available at {uri}.')
62
+ Pods.wait_for_running(state.namespace, pod_name, msg=f'Ops pod is starting up; it will be available at {uri}.', label_selector=label_selector)
63
63
 
64
64
  return state
65
65
 
@@ -1,14 +1,12 @@
1
1
  from adam.app_session import AppSession
2
2
  from adam.k8s_utils.ingresses import Ingresses
3
3
  from adam.k8s_utils.services import Services
4
- from adam.utils import log2
5
4
 
6
5
  def deploy_frontend(name: str, namespace: str, label_selector: str):
7
6
  app_session: AppSession = AppSession.create('c3', 'c3', namespace)
8
7
  port = 7678
9
8
  labels = gen_labels(label_selector)
10
9
  Services.create_service(name, namespace, port, labels, labels=labels)
11
- # Services.create_service(name, namespace, port, {"run": "ops"})
12
10
  Ingresses.create_ingress(name, namespace, app_session.host, '/c3/c3/ops($|/)', port, annotations={
13
11
  'kubernetes.io/ingress.class': 'nginx',
14
12
  'nginx.ingress.kubernetes.io/use-regex': 'true',
@@ -1,10 +1,8 @@
1
1
  import click
2
2
 
3
3
  from adam.commands.command import Command
4
- from adam.commands.command_helpers import ClusterCommandHelper
5
4
  from adam.commands.deploy.undeploy_frontend import UndeployFrontend
6
5
  from adam.commands.deploy.undeploy_pod import UndeployPod
7
- from .deploy_frontend import DeployFrontend
8
6
  from adam.repl_state import ReplState
9
7
  from adam.utils import lines_to_tabular, log, log2
10
8
 
@@ -1,8 +1,6 @@
1
1
  from adam.commands.command import Command
2
2
  from adam.commands.deploy.deploy_utils import undeploy_frontend
3
3
  from adam.config import Config
4
- from adam.k8s_utils.ingresses import Ingresses
5
- from adam.k8s_utils.services import Services
6
4
  from adam.repl_state import ReplState, RequiredState
7
5
 
8
6
  class UndeployFrontend(Command):
@@ -1,12 +1,10 @@
1
- from adam.app_session import AppSession
2
1
  from adam.commands.command import Command
3
- from adam.commands.deploy.deploy_utils import deploy_frontend, undeploy_frontend
2
+ from adam.commands.deploy.deploy_utils import undeploy_frontend
4
3
  from adam.config import Config
5
- from adam.k8s_utils.ingresses import Ingresses
4
+ from adam.k8s_utils.deployment import Deployments
6
5
  from adam.k8s_utils.kube_context import KubeContext
7
6
  from adam.k8s_utils.pods import Pods
8
7
  from adam.k8s_utils.service_accounts import ServiceAccounts
9
- from adam.k8s_utils.services import Services
10
8
  from adam.repl_state import ReplState, RequiredState
11
9
  from adam.utils import log2
12
10
 
@@ -41,9 +39,11 @@ class UndeployPod(Command):
41
39
  return state
42
40
 
43
41
  label_selector = Config().get('pod.label-selector', 'run=ops')
44
- undeploy_frontend(state.namespace, label_selector)
45
- Pods.delete_with_selector(state.namespace, label_selector, grace_period_seconds=0)
46
42
  ServiceAccounts.delete(state.namespace, label_selector=label_selector)
43
+ Deployments.delete_with_selector(state.namespace, label_selector, grace_period_seconds=0)
44
+ # instantly destroy the pod
45
+ Pods.delete_with_selector(state.namespace, label_selector, grace_period_seconds=0)
46
+ undeploy_frontend(state.namespace, label_selector)
47
47
 
48
48
  return state
49
49
 
@@ -0,0 +1,219 @@
1
+ from collections.abc import Callable
2
+ from concurrent.futures import ThreadPoolExecutor, as_completed
3
+ import sys
4
+ import time
5
+ from typing import TypeVar, cast
6
+ from kubernetes import client
7
+ from kubernetes.stream import stream
8
+ from kubernetes.stream.ws_client import ERROR_CHANNEL
9
+
10
+ from adam.config import Config
11
+ from adam.k8s_utils.pods import Pods
12
+ from adam.pod_exec_result import PodExecResult
13
+ from adam.utils import elapsed_time, log2
14
+ from .kube_context import KubeContext
15
+
16
+ T = TypeVar('T')
17
+ _TEST_POD_EXEC_OUTS: PodExecResult = None
18
+
19
+ # utility collection on deployments; methods are all static
20
+ class Deployments:
21
+ def set_test_pod_exec_outs(outs: PodExecResult):
22
+ global _TEST_POD_EXEC_OUTS
23
+ _TEST_POD_EXEC_OUTS = outs
24
+
25
+ return _TEST_POD_EXEC_OUTS
26
+
27
+ def delete(pod_name: str, namespace: str):
28
+ try:
29
+ v1 = client.CoreV1Api()
30
+ api_response = v1.delete_namespaced_pod(pod_name, namespace)
31
+ except Exception as e:
32
+ log2("Exception when calling CoreV1Api->delete_namespaced_pod: %s\n" % e)
33
+
34
+ def delete_with_selector(namespace: str, label_selector: str, grace_period_seconds: int = None):
35
+ v1 = client.AppsV1Api()
36
+
37
+ ret = v1.list_namespaced_deployment(namespace=namespace, label_selector=label_selector)
38
+ for i in ret.items:
39
+ v1.delete_namespaced_deployment(name=i.metadata.name, namespace=namespace, grace_period_seconds=grace_period_seconds)
40
+
41
+ def on_pods(pods: list[str],
42
+ namespace: str,
43
+ body: Callable[[ThreadPoolExecutor, str, str, bool], T],
44
+ post: Callable[[T], T] = None,
45
+ action: str = 'action', max_workers=0, show_out=True) -> list[T]:
46
+ show_out = KubeContext.show_out(show_out)
47
+
48
+ if not max_workers:
49
+ max_workers = Config().action_workers(action, 0)
50
+ if max_workers > 0:
51
+ # if parallel, node sampling is suppressed
52
+ if KubeContext.show_parallelism():
53
+ log2(f'Executing on all nodes from statefulset in parallel...')
54
+ start_time = time.time()
55
+ try:
56
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
57
+ # disable stdout from the pod_exec, then show the output in a for loop
58
+ futures = [body(executor, pod, namespace, show_out) for pod in pods]
59
+ if len(futures) == 0:
60
+ return cast(list[T], [])
61
+
62
+ rs = [future.result() for future in as_completed(futures)]
63
+ if post:
64
+ rs = [post(r, show_out=show_out) for r in rs]
65
+
66
+ return rs
67
+ finally:
68
+ if KubeContext.show_parallelism():
69
+ log2(f"Parallel {action} elapsed time: {elapsed_time(start_time)} with {max_workers} workers")
70
+ else:
71
+ results: list[T] = []
72
+
73
+ samples = Config().action_node_samples(action, sys.maxsize)
74
+ l = min(len(pods), samples)
75
+ adj = 'all'
76
+ if l < len(pods):
77
+ adj = f'{l} sample'
78
+ if show_out:
79
+ log2(f'Executing on {adj} nodes from statefulset...')
80
+ for pod_name in pods:
81
+ try:
82
+ result = body(None, pod_name, namespace, show_out)
83
+ if post:
84
+ result = post(result, show_out=show_out)
85
+ results.append(result)
86
+ if result:
87
+ l -= 1
88
+ if not l:
89
+ break
90
+ except Exception as e:
91
+ log2(e)
92
+
93
+ return results
94
+
95
+ def exec(pod_name: str, container: str, namespace: str, command: str, show_out = True, throw_err = False, interaction: Callable[[any, list[str]], any] = None):
96
+ if _TEST_POD_EXEC_OUTS:
97
+ return _TEST_POD_EXEC_OUTS
98
+
99
+ show_out = KubeContext.show_out(show_out)
100
+
101
+ api = client.CoreV1Api()
102
+
103
+ exec_command = ["/bin/sh", "-c", command]
104
+ k_command = f'kubectl exec {pod_name} -c {container} -n {namespace} -- {command}'
105
+ if show_out:
106
+ print(k_command)
107
+
108
+ resp = stream(
109
+ api.connect_get_namespaced_pod_exec,
110
+ pod_name,
111
+ namespace,
112
+ command=exec_command,
113
+ container=container,
114
+ stderr=True,
115
+ stdin=True,
116
+ stdout=True,
117
+ tty=True,
118
+ _preload_content=False,
119
+ )
120
+
121
+ stdout = []
122
+ stderr = []
123
+ error_output = None
124
+ try:
125
+ while resp.is_open():
126
+ resp.update(timeout=1)
127
+ if resp.peek_stdout():
128
+ frag = resp.read_stdout()
129
+ stdout.append(frag)
130
+ if show_out: print(frag, end="")
131
+
132
+ if interaction:
133
+ interaction(resp, stdout)
134
+ if resp.peek_stderr():
135
+ frag = resp.read_stderr()
136
+ stderr.append(frag)
137
+ if show_out: print(frag, end="")
138
+
139
+ try:
140
+ # get the exit code from server
141
+ error_output = resp.read_channel(ERROR_CHANNEL)
142
+ except Exception:
143
+ pass
144
+ except Exception as e:
145
+ if throw_err:
146
+ raise e
147
+ else:
148
+ log2(e)
149
+ finally:
150
+ resp.close()
151
+
152
+ return PodExecResult("".join(stdout), "".join(stderr), k_command, error_output)
153
+
154
+ def get_container(namespace: str, pod_name: str, container_name: str):
155
+ pod = Pods.get(namespace, pod_name)
156
+ if not pod:
157
+ return None
158
+
159
+ for container in pod.spec.containers:
160
+ if container_name == container.name:
161
+ return container
162
+
163
+ return None
164
+
165
+ def get(namespace: str, pod_name: str):
166
+ v1 = client.CoreV1Api()
167
+ return v1.read_namespaced_pod(name=pod_name, namespace=namespace)
168
+
169
+ def create_deployment_spec(name: str, image: str, image_pull_secret: str,
170
+ envs: list, container_security_context: client.V1SecurityContext,
171
+ volume_name: str, pvc_name:str, mount_path:str,
172
+ command: list[str]=None, sa_name=None, labels: dict[str, str] = {}):
173
+ return client.V1DeploymentSpec(
174
+ replicas=1,
175
+ selector=client.V1LabelSelector(match_labels=labels),
176
+ template=client.V1PodTemplateSpec(
177
+ metadata=client.V1ObjectMeta(labels=labels),
178
+ spec=Pods.create_pod_spec(name, image, image_pull_secret, envs, container_security_context,
179
+ volume_name, pvc_name, mount_path, command=command, sa_name=sa_name,
180
+ restart_policy="Always"),
181
+ ),
182
+ )
183
+
184
+ def create(namespace: str, deployment_name: str, image: str,
185
+ command: list[str] = None,
186
+ secret: str = None,
187
+ env: dict[str, any] = {},
188
+ container_security_context: client.V1SecurityContext = None,
189
+ labels: dict[str, str] = {},
190
+ volume_name: str = None,
191
+ pvc_name: str = None,
192
+ mount_path: str = None,
193
+ sa_name=None):
194
+ v1 = client.AppsV1Api()
195
+ envs = []
196
+ for k, v in env.items():
197
+ envs.append(client.V1EnvVar(name=str(k), value=str(v)))
198
+ deployment = Deployments.create_deployment_spec(deployment_name, image, secret, envs, container_security_context, volume_name, pvc_name, mount_path, command=command, sa_name=sa_name, labels=labels)
199
+ return v1.create_namespaced_deployment(
200
+ namespace=namespace,
201
+ body=client.V1Deployment(spec=deployment, metadata=client.V1ObjectMeta(
202
+ name=deployment_name,
203
+ labels=labels
204
+ ))
205
+ )
206
+
207
+ def wait_for_running(namespace: str, pod_name: str, msg: str=None):
208
+ msged = False
209
+
210
+ while Pods.get(namespace, pod_name).status.phase != 'Running':
211
+ if not msged:
212
+ if not msg:
213
+ msg = f'Waiting for the {pod_name} pod to start up...'
214
+ log2(msg)
215
+ msged = True
216
+ time.sleep(5)
217
+
218
+ def completed(namespace: str, pod_name: str):
219
+ return Pods.get(namespace, pod_name).status.phase in ['Succeeded', 'Failed']
adam/k8s_utils/pods.py CHANGED
@@ -165,7 +165,17 @@ class Pods:
165
165
  v1 = client.CoreV1Api()
166
166
  return v1.read_namespaced_pod(name=pod_name, namespace=namespace)
167
167
 
168
- def create_pod_spec(name: str, image: str, image_pull_secret: str, envs: list, container_security_context: client.V1SecurityContext, volume_name: str, pvc_name:str, mount_path:str, command: list[str]=None, sa_name=None):
168
+ def get_with_selector(namespace: str, label_selector: str):
169
+ v1 = client.CoreV1Api()
170
+
171
+ ret = v1.list_namespaced_pod(namespace=namespace, label_selector=label_selector)
172
+ for i in ret.items:
173
+ return v1.read_namespaced_pod(name=i.metadata.name, namespace=namespace)
174
+
175
+ def create_pod_spec(name: str, image: str, image_pull_secret: str,
176
+ envs: list, container_security_context: client.V1SecurityContext,
177
+ volume_name: str, pvc_name:str, mount_path:str,
178
+ command: list[str]=None, sa_name=None, restart_policy="Never"):
169
179
  volume_mounts = []
170
180
  if volume_name and pvc_name and mount_path:
171
181
  volume_mounts=[client.V1VolumeMount(mount_path=mount_path, name=volume_name)]
@@ -182,7 +192,7 @@ class Pods:
182
192
  security_context=client.V1PodSecurityContext(run_as_user=1001, run_as_group=1001, fs_group=1001)
183
193
 
184
194
  return client.V1PodSpec(
185
- restart_policy="Never",
195
+ restart_policy=restart_policy,
186
196
  containers=[container],
187
197
  image_pull_secrets=[client.V1LocalObjectReference(name=image_pull_secret)],
188
198
  security_context=security_context,
@@ -213,10 +223,10 @@ class Pods:
213
223
  ))
214
224
  )
215
225
 
216
- def wait_for_running(namespace: str, pod_name: str, msg: str=None):
226
+ def wait_for_running(namespace: str, pod_name: str, msg: str=None, label_selector: str = None):
217
227
  msged = False
218
228
 
219
- while Pods.get(namespace, pod_name).status.phase != 'Running':
229
+ while (Pods.get_with_selector(namespace, label_selector) if label_selector else Pods.get(namespace, pod_name)).status.phase != 'Running':
220
230
  if not msged:
221
231
  if not msg:
222
232
  msg = f'Waiting for the {pod_name} pod to start up...'
adam/version.py CHANGED
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- __version__ = "1.98.139" #: the working version
4
+ __version__ = "1.98.140" #: the working version
5
5
  __release__ = "1.0.0" #: the release version
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: kaqing
3
- Version: 1.98.139
3
+ Version: 1.98.140
4
4
  Summary: UNKNOWN
5
5
  Home-page: UNKNOWN
6
6
  License: UNKNOWN
@@ -14,7 +14,7 @@ adam/repl_commands.py,sha256=ionPWjPhzrzm59zbhO1WsGGcLVba3bnPl6uPjvQ4C34,4093
14
14
  adam/repl_session.py,sha256=uIogcvWBh7wd8QQ-p_JgLsyJ8YJgINw5vOd6JIsd7Vo,472
15
15
  adam/repl_state.py,sha256=QarrUAwYWOz3YTemtaf2opbHLa5a3LEsyuonNwhvOhk,7131
16
16
  adam/utils.py,sha256=j7p7iruLuV11swa0z9ZLBgoJHu_nkTSVKtQe0q71gmk,7025
17
- adam/version.py,sha256=rQTb_obJrUOP5N1EJzEqcCJkxbkdKobYc_9ZyhKgA7A,141
17
+ adam/version.py,sha256=aHmraJ9rwWAccgp_8uuEZLVAOkQ32VkuKXEpe6zSLEA,141
18
18
  adam/checks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  adam/checks/check.py,sha256=Qopr3huYcMu2bzQgb99dEUYjFzkjKHRI76S6KA9b9Rk,702
20
20
  adam/checks/check_context.py,sha256=FEHkQ32jY1EDopQ2uYWqy9v7aEEX1orLpJWhopwAlh4,402
@@ -81,13 +81,13 @@ adam/commands/deploy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
81
81
  adam/commands/deploy/code_start.py,sha256=-iH8HThTNM83IfBxT_LqTByuHVatV9d-Il4OYOfrwLI,1370
82
82
  adam/commands/deploy/code_stop.py,sha256=ch7ZMgosvTHsGaIcDwQY5XYh_5HYrUjBkZFOI-d2gOU,1696
83
83
  adam/commands/deploy/code_utils.py,sha256=5Gp4U8HzKpPkbkHDU7whlvGOK-wWaAbCIIGzVoN9hio,3296
84
- adam/commands/deploy/deploy.py,sha256=ymGprq2rBpAMsxaqRrVnrcYetAq9GDTguohlAWFuFa8,1855
85
- adam/commands/deploy/deploy_frontend.py,sha256=_HBekXvRAXIzHf4SgAsY_OgKdnesuM3THgdbbwyAARY,1832
86
- adam/commands/deploy/deploy_pod.py,sha256=mLl6jTII7JAz_ES4zWtsu8WF8MvlgevU-hes-y1s2Ec,2649
87
- adam/commands/deploy/deploy_utils.py,sha256=L-UONUT_Ew8hRw2ozdWMx3tr0kmBInxX8xDXr4d4EO8,1143
88
- adam/commands/deploy/undeploy.py,sha256=JWRwYLiS7iD2utRr2Zb8HCg_rbgHTw-A17CCs2ezl00,1949
89
- adam/commands/deploy/undeploy_frontend.py,sha256=P04l_DI6y1dAqHMQMG94MG7K4piCw84zhPszc4Lz28o,1349
90
- adam/commands/deploy/undeploy_pod.py,sha256=D9dNMWyFK3hzOd4tXIfVQ7tMM8me17_fHfDFbooU4OQ,1866
84
+ adam/commands/deploy/deploy.py,sha256=_y_pSBHIEVMTgjDqEZwvmGfLQBirHBH4sCnsl0CKOM8,1792
85
+ adam/commands/deploy/deploy_frontend.py,sha256=TSvMTy6uUfOBIV96hKFy-M1hNCE6OeRW9M_dj1ZJsv0,1700
86
+ adam/commands/deploy/deploy_pod.py,sha256=IaDmrxpYw_YqkK599Vq4N7_4tINq5ihBlccVPEFcpCk,2773
87
+ adam/commands/deploy/deploy_utils.py,sha256=-YB-XUwr8zVsf9HKYWx_TP-dB2WgOpEmUGyYNcoie0U,1046
88
+ adam/commands/deploy/undeploy.py,sha256=CNgbB00TR1YRgX0xGgdGAT8YNPlcLssMsRX-pSby-Ao,1842
89
+ adam/commands/deploy/undeploy_frontend.py,sha256=mbHlWb5UHUVpR1trfflCS_TDip7eAqxqoZ47RzRZcno,1257
90
+ adam/commands/deploy/undeploy_pod.py,sha256=Notj7_MNU-sHxulJPnWKi1QUDUE8cWArZZEko8hKw3A,1901
91
91
  adam/commands/medusa/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
92
92
  adam/commands/medusa/medusa.py,sha256=Y_yyOZRb6u45wfTVBRp3kuklyYDTSlaAJQdAiymP_8M,2185
93
93
  adam/commands/medusa/medusa_backup.py,sha256=j4DTVWFT-4rzs4gG_pBvjE-JuPsVCJIsnyQjIzJ4EbA,1801
@@ -137,10 +137,11 @@ adam/k8s_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
137
137
  adam/k8s_utils/cassandra_clusters.py,sha256=9gUn31SsyW32asSFM8cKdkDSw5LYqPY2Iv0r4fDg_fU,2017
138
138
  adam/k8s_utils/cassandra_nodes.py,sha256=jQlaxIQKnavvsXH7lMM8WPriusp7KO698UygytIA8Xw,1134
139
139
  adam/k8s_utils/custom_resources.py,sha256=cIeaZRQET2DelTGU2f5QsMckh7TddPpWZDFeNK3txeQ,7647
140
+ adam/k8s_utils/deployment.py,sha256=ai5DwdU0y6s6i0jmOKnfydbXSfZZCkG3f-XTf7P7-64,8504
140
141
  adam/k8s_utils/ingresses.py,sha256=ul3Z6fDGc_Cxcn-ExP0vXhZatoShCUZFtpwtCY4Qx7o,3460
141
142
  adam/k8s_utils/jobs.py,sha256=P7j3JiZ33TRnkjkPLLrGlypAPxK1BZQHvVpF8s7eHA8,2604
142
143
  adam/k8s_utils/kube_context.py,sha256=nocEyVNjXWG-N-GNnEYHDvnot7H5LQUFmpZIwIyE7As,3310
143
- adam/k8s_utils/pods.py,sha256=I_xjOOJsbiW2_ZUz7Q-8sogyBzJqzVbrNZuRAHX1GmA,8758
144
+ adam/k8s_utils/pods.py,sha256=zSCqW-13kWt11yT7iYwPCFxPnyrJP5F_avW6BFvikfk,9264
144
145
  adam/k8s_utils/secrets.py,sha256=pYaVKXTpx3-QgFtBjznWFq0N6ZcBdxnx21FRe5nBCCo,2305
145
146
  adam/k8s_utils/service_accounts.py,sha256=v2oQSqCrNvt2uRnKlNwR3fjtpUG7oF5nqgzEB7NnT-U,6349
146
147
  adam/k8s_utils/services.py,sha256=EOJJGACVbbRvu5T3rMKqIJqgYic1_MSJ17EA0TJ6UOk,3156
@@ -156,8 +157,8 @@ adam/sso/idp.py,sha256=fvcwUw_URTgsO6ySaqTIw0zQT2qRO1IPSGhf6rPtybo,5804
156
157
  adam/sso/idp_login.py,sha256=QAtCUeDTVWliJy40RK_oac8Vgybr13xH8wzeBoxPaa8,1754
157
158
  adam/sso/idp_session.py,sha256=9BUHNRf70u4rVKrVY1HKPOEmOviXvkjam8WJxmXSKIM,1735
158
159
  adam/sso/sso_config.py,sha256=5N8WZgIJQBtHUy585XLRWKjpU87_v6QluyNK9E27D5s,2459
159
- kaqing-1.98.139.dist-info/METADATA,sha256=piyMN5GQFcQcua01ZJ4CDsD7yi_qlSINzeMBOaJaiDs,134
160
- kaqing-1.98.139.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
161
- kaqing-1.98.139.dist-info/entry_points.txt,sha256=SkzhuQJUWsXOzHeZ5TgQ2c3_g53UGK23zzJU_JTZOZI,39
162
- kaqing-1.98.139.dist-info/top_level.txt,sha256=8_2PZkwBb-xDcnc8a2rAbQeJhXKXskc7zTP7pSPa1fw,5
163
- kaqing-1.98.139.dist-info/RECORD,,
160
+ kaqing-1.98.140.dist-info/METADATA,sha256=6GDFpkMOLNGQdRq63lPYleF-ifTzXW_GVOO9d4tBywI,134
161
+ kaqing-1.98.140.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
162
+ kaqing-1.98.140.dist-info/entry_points.txt,sha256=SkzhuQJUWsXOzHeZ5TgQ2c3_g53UGK23zzJU_JTZOZI,39
163
+ kaqing-1.98.140.dist-info/top_level.txt,sha256=8_2PZkwBb-xDcnc8a2rAbQeJhXKXskc7zTP7pSPa1fw,5
164
+ kaqing-1.98.140.dist-info/RECORD,,