kubernetes-watch 0.1.5__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kube_watch/enums/kube.py CHANGED
@@ -1,6 +1,6 @@
1
- from enum import Enum
2
-
3
-
4
- class Hosts(str, Enum):
5
- LOCAL = 'local'
1
+ from enum import Enum
2
+
3
+
4
+ class Hosts(str, Enum):
5
+ LOCAL = 'local'
6
6
  REMOTE = 'remote'
kube_watch/enums/logic.py CHANGED
@@ -1,9 +1,9 @@
1
- from enum import Enum
2
-
3
- class Operations(str, Enum):
4
- OR = 'or'
5
- AND = 'and'
6
- SUM = 'sum'
7
- AVG = 'avg'
8
- MAX = 'max'
1
+ from enum import Enum
2
+
3
+ class Operations(str, Enum):
4
+ OR = 'or'
5
+ AND = 'and'
6
+ SUM = 'sum'
7
+ AVG = 'avg'
8
+ MAX = 'max'
9
9
  MIN = 'min'
@@ -1,13 +1,13 @@
1
- from enum import Enum
2
-
3
- class Providers(str, Enum):
4
- AWS = "aws"
5
- AZURE = "azure"
6
- GCP = "gcp"
7
- VAULT = "vault"
8
-
9
-
10
- class AwsResources(str, Enum):
11
- ECR = "ecr" # elastic container registry
12
- S3 = "s3"
1
+ from enum import Enum
2
+
3
+ class Providers(str, Enum):
4
+ AWS = "aws"
5
+ AZURE = "azure"
6
+ GCP = "gcp"
7
+ VAULT = "vault"
8
+
9
+
10
+ class AwsResources(str, Enum):
11
+ ECR = "ecr" # elastic container registry
12
+ S3 = "s3"
13
13
  IAM = "iam"
@@ -1,18 +1,18 @@
1
- from enum import Enum
2
-
3
- class ParameterType(str, Enum):
4
- STATIC = 'static'
5
- FROM_ENV = 'env'
6
- FROM_FLOW = 'flow'
7
-
8
-
9
- class TaskRunners(str, Enum):
10
- SEQUENTIAL = 'sequential'
11
- CONCURRENT = 'concurrent'
12
- DASK = 'dask'
13
- RAY = 'ray'
14
-
15
-
16
- class TaskInputsType(str, Enum):
17
- ARG = 'arg'
1
+ from enum import Enum
2
+
3
+ class ParameterType(str, Enum):
4
+ STATIC = 'static'
5
+ FROM_ENV = 'env'
6
+ FROM_FLOW = 'flow'
7
+
8
+
9
+ class TaskRunners(str, Enum):
10
+ SEQUENTIAL = 'sequential'
11
+ CONCURRENT = 'concurrent'
12
+ DASK = 'dask'
13
+ RAY = 'ray'
14
+
15
+
16
+ class TaskInputsType(str, Enum):
17
+ ARG = 'arg'
18
18
  DICT = 'dict'
@@ -1,17 +1,17 @@
1
- from pydantic import BaseModel, ConfigDict
2
- from humps.camel import case
3
-
4
- def to_camel(string):
5
- if string == "id":
6
- return "_id"
7
- if string.startswith("_"): # "_id"
8
- return string
9
- return case(string)
10
-
11
- class CamelModel(BaseModel):
12
- """
13
- Replacement for pydanitc BaseModel which simply adds a camel case alias to every field
14
- NOTE: This has been updated for Pydantic 2 to remove some common encoding helpers
15
- """
16
-
1
+ from pydantic import BaseModel, ConfigDict
2
+ from humps.camel import case
3
+
4
+ def to_camel(string):
5
+ if string == "id":
6
+ return "_id"
7
+ if string.startswith("_"): # "_id"
8
+ return string
9
+ return case(string)
10
+
11
+ class CamelModel(BaseModel):
12
+ """
13
+ Replacement for pydanitc BaseModel which simply adds a camel case alias to every field
14
+ NOTE: This has been updated for Pydantic 2 to remove some common encoding helpers
15
+ """
16
+
17
17
  model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
@@ -1,60 +1,60 @@
1
- from typing import List, Optional, Dict, Any
2
- from kube_watch.enums.workflow import ParameterType, TaskRunners, TaskInputsType
3
- from kube_watch.enums.logic import Operations
4
-
5
- from .common import CamelModel
6
-
7
- class Parameter(CamelModel):
8
- name: str
9
- value: Any
10
- type: Optional[ParameterType] = ParameterType.STATIC
11
-
12
- class Artifact(CamelModel):
13
- path: str
14
-
15
- class Inputs(CamelModel):
16
- parameters: Optional[List[Parameter]] = []
17
- artifacts: Optional[List[Artifact]] = []
18
-
19
- class Dependency(CamelModel):
20
- taskName: str
21
- inputParamName: Optional[str] = None
22
-
23
- class Condition(CamelModel):
24
- tasks: List[str]
25
- operation: Optional[Operations] = Operations.AND
26
-
27
- class Task(CamelModel):
28
- """
29
- :param plugin_path: define if referring to an external module outside the library.
30
- """
31
- module: str
32
- task: str
33
- name: str
34
- plugin_path: Optional[str] = ""
35
- inputsArgType: Optional[TaskInputsType] = TaskInputsType.ARG # @TODO refactor inputsArgType to inputs_arg_type
36
- inputs: Optional[Inputs] = None
37
- dependency: Optional[List[Dependency]] = None
38
- conditional: Optional[Condition] = None
39
- outputs: Optional[List[str]] = None
40
-
41
- class WorkflowConfig(CamelModel):
42
- name: str
43
- runner: TaskRunners = TaskRunners.CONCURRENT
44
- parameters: Optional[List[Parameter]] = []
45
- tasks: List[Task]
46
-
47
- class WorkflowOutput(CamelModel):
48
- flow_run: Any
49
- config: Any
50
-
51
- class BatchFlowItem(CamelModel):
52
- path: str
53
-
54
- class BatchFlowConfig(CamelModel):
55
- # Only possible runners are concurrent and sequential
56
- runner: TaskRunners = TaskRunners.CONCURRENT
57
- items: List[BatchFlowItem]
58
-
59
-
60
-
1
+ from typing import List, Optional, Dict, Any
2
+ from kube_watch.enums.workflow import ParameterType, TaskRunners, TaskInputsType
3
+ from kube_watch.enums.logic import Operations
4
+
5
+ from .common import CamelModel
6
+
7
+ class Parameter(CamelModel):
8
+ name: str
9
+ value: Any
10
+ type: Optional[ParameterType] = ParameterType.STATIC
11
+
12
+ class Artifact(CamelModel):
13
+ path: str
14
+
15
+ class Inputs(CamelModel):
16
+ parameters: Optional[List[Parameter]] = []
17
+ artifacts: Optional[List[Artifact]] = []
18
+
19
+ class Dependency(CamelModel):
20
+ taskName: str
21
+ inputParamName: Optional[str] = None
22
+
23
+ class Condition(CamelModel):
24
+ tasks: List[str]
25
+ operation: Optional[Operations] = Operations.AND
26
+
27
+ class Task(CamelModel):
28
+ """
29
+ :param plugin_path: define if referring to an external module outside the library.
30
+ """
31
+ module: str
32
+ task: str
33
+ name: str
34
+ plugin_path: Optional[str] = ""
35
+ inputsArgType: Optional[TaskInputsType] = TaskInputsType.ARG # @TODO refactor inputsArgType to inputs_arg_type
36
+ inputs: Optional[Inputs] = None
37
+ dependency: Optional[List[Dependency]] = None
38
+ conditional: Optional[Condition] = None
39
+ outputs: Optional[List[str]] = None
40
+
41
+ class WorkflowConfig(CamelModel):
42
+ name: str
43
+ runner: TaskRunners = TaskRunners.CONCURRENT
44
+ parameters: Optional[List[Parameter]] = []
45
+ tasks: List[Task]
46
+
47
+ class WorkflowOutput(CamelModel):
48
+ flow_run: Any
49
+ config: Any
50
+
51
+ class BatchFlowItem(CamelModel):
52
+ path: str
53
+
54
+ class BatchFlowConfig(CamelModel):
55
+ # Only possible runners are concurrent and sequential
56
+ runner: TaskRunners = TaskRunners.CONCURRENT
57
+ items: List[BatchFlowItem]
58
+
59
+
60
+
@@ -1,186 +1,186 @@
1
- from prefect import get_run_logger
2
- from typing import List
3
- from kubernetes import config
4
- from kubernetes import client
5
- from kubernetes.client.rest import ApiException
6
- import base64
7
- import datetime
8
-
9
- from kube_watch.enums.kube import Hosts
10
-
11
- logger = get_run_logger()
12
-
13
-
14
- def setup(host=Hosts.REMOTE, context=None):
15
- if host == Hosts.LOCAL:
16
- # Running outside a Kubernetes cluster (e.g., local development)
17
- config.load_kube_config(context=context) # You can specify the context here if necessary
18
- else:
19
- # Running inside a Kubernetes cluster
20
- config.load_incluster_config()
21
-
22
-
23
-
24
- def create_or_update_configmap(config_name, namespace, data):
25
- """
26
- Create or update a ConfigMap in a specified namespace if the data is different.
27
-
28
- :param config_name: The name of the ConfigMap.
29
- :param namespace: The namespace of the ConfigMap.
30
- :param data: A dictionary containing the data for the ConfigMap.
31
- :return: True if the ConfigMap was created or updated, False otherwise.
32
- """
33
- v1 = client.CoreV1Api()
34
- configmap_metadata = client.V1ObjectMeta(name=config_name, namespace=namespace)
35
- configmap = client.V1ConfigMap(api_version="v1", kind="ConfigMap", metadata=configmap_metadata, data=data)
36
-
37
- try:
38
- existing_configmap = v1.read_namespaced_config_map(name=config_name, namespace=namespace)
39
- # Compare the existing ConfigMap's data with the new data
40
- if existing_configmap.data == data:
41
- logger.info("No update needed for ConfigMap: {}".format(config_name))
42
- return False
43
- else:
44
- # Data is different, update the ConfigMap
45
- api_response = v1.replace_namespaced_config_map(name=config_name, namespace=namespace, body=configmap)
46
- logger.info("ConfigMap updated. Name: {}".format(api_response.metadata.name))
47
- return True
48
- except ApiException as e:
49
- if e.status == 404: # ConfigMap not found, create it
50
- try:
51
- api_response = v1.create_namespaced_config_map(namespace=namespace, body=configmap)
52
- logger.info("ConfigMap created. Name: {}".format(api_response.metadata.name))
53
- return {'trigger_restart': True}
54
- except ApiException as e:
55
- logger.error("Exception when creating ConfigMap: {}".format(e))
56
- raise ValueError
57
- else:
58
- logger.error("Failed to get or create ConfigMap: {}".format(e))
59
- raise ValueError
60
-
61
-
62
- def create_or_update_secret(secret_name, namespace, data, secret_type = None):
63
- """
64
- Create or update a Secret in a specified namespace if the data is different.
65
-
66
- :param name: The name of the Secret.
67
- :param namespace: The namespace of the Secret.
68
- :param data: A dictionary containing the data for the Secret. Values must be strings (not Base64 encoded).
69
- :return: True if the secret was created or updated, False otherwise.
70
- """
71
- if secret_type == None:
72
- secret_type = "Opaque"
73
-
74
- v1 = client.CoreV1Api()
75
- secret_metadata = client.V1ObjectMeta(name=secret_name, namespace=namespace)
76
- secret = client.V1Secret(
77
- api_version="v1",
78
- kind="Secret",
79
- metadata=secret_metadata,
80
- string_data=data,
81
- type=secret_type
82
- )
83
-
84
- try:
85
- existing_secret = v1.read_namespaced_secret(name=secret_name, namespace=namespace)
86
- # Encode the new data to compare with the existing Secret
87
- encoded_data = {k: base64.b64encode(v.encode()).decode() for k, v in data.items()}
88
-
89
- # Check if the existing secret's data matches the new data
90
- if existing_secret.data == encoded_data:
91
- logger.info("No update needed for Secret: {}".format(secret_name))
92
- return False
93
- else:
94
- # Data is different, update the Secret
95
- api_response = v1.replace_namespaced_secret(name=secret_name, namespace=namespace, body=secret)
96
- logger.info("Secret updated. Name: {}".format(api_response.metadata.name))
97
- return True
98
-
99
- except ApiException as e:
100
- if e.status == 404: # Secret not found, create it
101
- try:
102
- api_response = v1.create_namespaced_secret(namespace=namespace, body=secret)
103
- logger.info("Secret created. Name: {}".format(api_response.metadata.name))
104
- return {'trigger_restart': True}
105
- except ApiException as e:
106
- logger.error("Exception when creating Secret: {}".format(e))
107
- raise ValueError
108
- else:
109
- logger.error("Failed to get or create Secret: {}".format(e))
110
- raise ValueError
111
-
112
-
113
- def get_kubernetes_secret(secret_name, namespace):
114
- # Assuming that the Kubernetes configuration is already set
115
- v1 = client.CoreV1Api()
116
- try:
117
- secret = v1.read_namespaced_secret(secret_name, namespace)
118
- # Decoding the base64 encoded data
119
- decoded_data = {key: base64.b64decode(value).decode('utf-8') for key, value in secret.data.items()}
120
- return decoded_data
121
- except ApiException as e:
122
- logger.error(f"Failed to get secret: {e}")
123
- return None
124
-
125
-
126
- def restart_deployment(deployment, namespace):
127
- """
128
- Trigger a rollout restart of a deployment in a specified namespace.
129
-
130
- :param name: The name of the deployment.
131
- :param namespace: The namespace of the deployment.
132
- """
133
-
134
- v1 = client.AppsV1Api()
135
- body = {
136
- 'spec': {
137
- 'template': {
138
- 'metadata': {
139
- 'annotations': {
140
- 'kubectl.kubernetes.io/restartedAt': datetime.datetime.utcnow().isoformat()
141
- }
142
- }
143
- }
144
- }
145
- }
146
- try:
147
- api_response = v1.patch_namespaced_deployment(name=deployment, namespace=namespace, body=body)
148
- logger.info(f"Deployment restarted. Name: {api_response.metadata.name}")
149
- except ApiException as e:
150
- logger.error(f"Exception when restarting deployment: {e}")
151
-
152
-
153
- def has_mismatch_image_digest(repo_digest, label_selector, namespace):
154
- """
155
- Check all pods in the given namespace and matching the label selector for any
156
- mismatch between the latest image digest and the current image digest.
157
-
158
- parameters:
159
- - namespace: The namespace to search for pods.
160
- - label_selector: The label selector to identify the relevant pods.
161
- - repo_digest: The latest image digest to compare against.
162
-
163
- Returns:
164
- - True if any pod is found with an image digest mismatch.
165
- - False if all pods match the latest image digest.
166
- """
167
- core_v1_api = client.CoreV1Api()
168
-
169
- # Fetch pods based on namespace and label selector
170
- pods = core_v1_api.list_namespaced_pod(namespace, label_selector=label_selector)
171
-
172
- # Iterate over pods and their containers
173
- for pod in pods.items:
174
- for container_status in pod.status.container_statuses:
175
- current_image_id = container_status.image_id
176
- # Check for digest mismatch
177
- if current_image_id.split('@')[-1] != repo_digest:
178
- logger.info(f"Mismatch found in pod: {pod.metadata.name}, container: {container_status.name}")
179
- logger.info(f"Repo digest: {repo_digest}")
180
- logger.info(f"Curr digest: {current_image_id.split('@')[-1]}")
181
- return True
182
-
183
- logger.info("Images are in-sync.")
184
- logger.info(f"Repo digest: {repo_digest}")
185
- logger.info(f"Curr digest: {current_image_id.split('@')[-1]}")
1
+ from prefect import get_run_logger
2
+ from typing import List
3
+ from kubernetes import config
4
+ from kubernetes import client
5
+ from kubernetes.client.rest import ApiException
6
+ import base64
7
+ import datetime
8
+
9
+ from kube_watch.enums.kube import Hosts
10
+
11
+ logger = get_run_logger()
12
+
13
+
14
+ def setup(host=Hosts.REMOTE, context=None):
15
+ if host == Hosts.LOCAL:
16
+ # Running outside a Kubernetes cluster (e.g., local development)
17
+ config.load_kube_config(context=context) # You can specify the context here if necessary
18
+ else:
19
+ # Running inside a Kubernetes cluster
20
+ config.load_incluster_config()
21
+
22
+
23
+
24
+ def create_or_update_configmap(config_name, namespace, data):
25
+ """
26
+ Create or update a ConfigMap in a specified namespace if the data is different.
27
+
28
+ :param config_name: The name of the ConfigMap.
29
+ :param namespace: The namespace of the ConfigMap.
30
+ :param data: A dictionary containing the data for the ConfigMap.
31
+ :return: True if the ConfigMap was created or updated, False otherwise.
32
+ """
33
+ v1 = client.CoreV1Api()
34
+ configmap_metadata = client.V1ObjectMeta(name=config_name, namespace=namespace)
35
+ configmap = client.V1ConfigMap(api_version="v1", kind="ConfigMap", metadata=configmap_metadata, data=data)
36
+
37
+ try:
38
+ existing_configmap = v1.read_namespaced_config_map(name=config_name, namespace=namespace)
39
+ # Compare the existing ConfigMap's data with the new data
40
+ if existing_configmap.data == data:
41
+ logger.info("No update needed for ConfigMap: {}".format(config_name))
42
+ return False
43
+ else:
44
+ # Data is different, update the ConfigMap
45
+ api_response = v1.replace_namespaced_config_map(name=config_name, namespace=namespace, body=configmap)
46
+ logger.info("ConfigMap updated. Name: {}".format(api_response.metadata.name))
47
+ return True
48
+ except ApiException as e:
49
+ if e.status == 404: # ConfigMap not found, create it
50
+ try:
51
+ api_response = v1.create_namespaced_config_map(namespace=namespace, body=configmap)
52
+ logger.info("ConfigMap created. Name: {}".format(api_response.metadata.name))
53
+ return {'trigger_restart': True}
54
+ except ApiException as e:
55
+ logger.error("Exception when creating ConfigMap: {}".format(e))
56
+ raise ValueError
57
+ else:
58
+ logger.error("Failed to get or create ConfigMap: {}".format(e))
59
+ raise ValueError
60
+
61
+
62
+ def create_or_update_secret(secret_name, namespace, data, secret_type = None):
63
+ """
64
+ Create or update a Secret in a specified namespace if the data is different.
65
+
66
+ :param name: The name of the Secret.
67
+ :param namespace: The namespace of the Secret.
68
+ :param data: A dictionary containing the data for the Secret. Values must be strings (not Base64 encoded).
69
+ :return: True if the secret was created or updated, False otherwise.
70
+ """
71
+ if secret_type == None:
72
+ secret_type = "Opaque"
73
+
74
+ v1 = client.CoreV1Api()
75
+ secret_metadata = client.V1ObjectMeta(name=secret_name, namespace=namespace)
76
+ secret = client.V1Secret(
77
+ api_version="v1",
78
+ kind="Secret",
79
+ metadata=secret_metadata,
80
+ string_data=data,
81
+ type=secret_type
82
+ )
83
+
84
+ try:
85
+ existing_secret = v1.read_namespaced_secret(name=secret_name, namespace=namespace)
86
+ # Encode the new data to compare with the existing Secret
87
+ encoded_data = {k: base64.b64encode(v.encode()).decode() for k, v in data.items()}
88
+
89
+ # Check if the existing secret's data matches the new data
90
+ if existing_secret.data == encoded_data:
91
+ logger.info("No update needed for Secret: {}".format(secret_name))
92
+ return False
93
+ else:
94
+ # Data is different, update the Secret
95
+ api_response = v1.replace_namespaced_secret(name=secret_name, namespace=namespace, body=secret)
96
+ logger.info("Secret updated. Name: {}".format(api_response.metadata.name))
97
+ return True
98
+
99
+ except ApiException as e:
100
+ if e.status == 404: # Secret not found, create it
101
+ try:
102
+ api_response = v1.create_namespaced_secret(namespace=namespace, body=secret)
103
+ logger.info("Secret created. Name: {}".format(api_response.metadata.name))
104
+ return {'trigger_restart': True}
105
+ except ApiException as e:
106
+ logger.error("Exception when creating Secret: {}".format(e))
107
+ raise ValueError
108
+ else:
109
+ logger.error("Failed to get or create Secret: {}".format(e))
110
+ raise ValueError
111
+
112
+
113
+ def get_kubernetes_secret(secret_name, namespace):
114
+ # Assuming that the Kubernetes configuration is already set
115
+ v1 = client.CoreV1Api()
116
+ try:
117
+ secret = v1.read_namespaced_secret(secret_name, namespace)
118
+ # Decoding the base64 encoded data
119
+ decoded_data = {key: base64.b64decode(value).decode('utf-8') for key, value in secret.data.items()}
120
+ return decoded_data
121
+ except ApiException as e:
122
+ logger.error(f"Failed to get secret: {e}")
123
+ return None
124
+
125
+
126
+ def restart_deployment(deployment, namespace):
127
+ """
128
+ Trigger a rollout restart of a deployment in a specified namespace.
129
+
130
+ :param name: The name of the deployment.
131
+ :param namespace: The namespace of the deployment.
132
+ """
133
+
134
+ v1 = client.AppsV1Api()
135
+ body = {
136
+ 'spec': {
137
+ 'template': {
138
+ 'metadata': {
139
+ 'annotations': {
140
+ 'kubectl.kubernetes.io/restartedAt': datetime.datetime.utcnow().isoformat()
141
+ }
142
+ }
143
+ }
144
+ }
145
+ }
146
+ try:
147
+ api_response = v1.patch_namespaced_deployment(name=deployment, namespace=namespace, body=body)
148
+ logger.info(f"Deployment restarted. Name: {api_response.metadata.name}")
149
+ except ApiException as e:
150
+ logger.error(f"Exception when restarting deployment: {e}")
151
+
152
+
153
+ def has_mismatch_image_digest(repo_digest, label_selector, namespace):
154
+ """
155
+ Check all pods in the given namespace and matching the label selector for any
156
+ mismatch between the latest image digest and the current image digest.
157
+
158
+ parameters:
159
+ - namespace: The namespace to search for pods.
160
+ - label_selector: The label selector to identify the relevant pods.
161
+ - repo_digest: The latest image digest to compare against.
162
+
163
+ Returns:
164
+ - True if any pod is found with an image digest mismatch.
165
+ - False if all pods match the latest image digest.
166
+ """
167
+ core_v1_api = client.CoreV1Api()
168
+
169
+ # Fetch pods based on namespace and label selector
170
+ pods = core_v1_api.list_namespaced_pod(namespace, label_selector=label_selector)
171
+
172
+ # Iterate over pods and their containers
173
+ for pod in pods.items:
174
+ for container_status in pod.status.container_statuses:
175
+ current_image_id = container_status.image_id
176
+ # Check for digest mismatch
177
+ if current_image_id.split('@')[-1] != repo_digest:
178
+ logger.info(f"Mismatch found in pod: {pod.metadata.name}, container: {container_status.name}")
179
+ logger.info(f"Repo digest: {repo_digest}")
180
+ logger.info(f"Curr digest: {current_image_id.split('@')[-1]}")
181
+ return True
182
+
183
+ logger.info("Images are in-sync.")
184
+ logger.info(f"Repo digest: {repo_digest}")
185
+ logger.info(f"Curr digest: {current_image_id.split('@')[-1]}")
186
186
  return False
File without changes
@@ -0,0 +1,12 @@
1
+ from typing import Union
2
+ from pydantic import BaseModel
3
+
4
+ class TableQuery(BaseModel):
5
+ name: str
6
+ column_name: str
7
+ db_host: str
8
+ db_port: int
9
+ db_name: str
10
+ db_user: str
11
+ db_pass: str
12
+