kubernetes-watch 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. kubernetes_watch-0.1.0/LICENSE +21 -0
  2. kubernetes_watch-0.1.0/PKG-INFO +134 -0
  3. kubernetes_watch-0.1.0/README.md +111 -0
  4. kubernetes_watch-0.1.0/kube_watch/__init__.py +0 -0
  5. kubernetes_watch-0.1.0/kube_watch/enums/__init__.py +0 -0
  6. kubernetes_watch-0.1.0/kube_watch/enums/kube.py +6 -0
  7. kubernetes_watch-0.1.0/kube_watch/enums/logic.py +9 -0
  8. kubernetes_watch-0.1.0/kube_watch/enums/providers.py +13 -0
  9. kubernetes_watch-0.1.0/kube_watch/enums/workflow.py +17 -0
  10. kubernetes_watch-0.1.0/kube_watch/models/__init__.py +0 -0
  11. kubernetes_watch-0.1.0/kube_watch/models/common.py +17 -0
  12. kubernetes_watch-0.1.0/kube_watch/models/workflow.py +55 -0
  13. kubernetes_watch-0.1.0/kube_watch/modules/__init__.py +0 -0
  14. kubernetes_watch-0.1.0/kube_watch/modules/clusters/__init__.py +0 -0
  15. kubernetes_watch-0.1.0/kube_watch/modules/clusters/kube.py +186 -0
  16. kubernetes_watch-0.1.0/kube_watch/modules/logic/actions.py +26 -0
  17. kubernetes_watch-0.1.0/kube_watch/modules/logic/checks.py +8 -0
  18. kubernetes_watch-0.1.0/kube_watch/modules/logic/load.py +8 -0
  19. kubernetes_watch-0.1.0/kube_watch/modules/logic/merge.py +31 -0
  20. kubernetes_watch-0.1.0/kube_watch/modules/logic/scheduler.py +74 -0
  21. kubernetes_watch-0.1.0/kube_watch/modules/logic/trasnform.py +0 -0
  22. kubernetes_watch-0.1.0/kube_watch/modules/mock/__init__.py +0 -0
  23. kubernetes_watch-0.1.0/kube_watch/modules/mock/mock_generator.py +24 -0
  24. kubernetes_watch-0.1.0/kube_watch/modules/providers/__init__.py +0 -0
  25. kubernetes_watch-0.1.0/kube_watch/modules/providers/aws.py +154 -0
  26. kubernetes_watch-0.1.0/kube_watch/modules/providers/git.py +33 -0
  27. kubernetes_watch-0.1.0/kube_watch/modules/providers/github.py +126 -0
  28. kubernetes_watch-0.1.0/kube_watch/modules/providers/vault.py +113 -0
  29. kubernetes_watch-0.1.0/kube_watch/standalone/metarecogen/ckan_to_gn.py +132 -0
  30. kubernetes_watch-0.1.0/kube_watch/watch/__init__.py +1 -0
  31. kubernetes_watch-0.1.0/kube_watch/watch/helpers.py +126 -0
  32. kubernetes_watch-0.1.0/kube_watch/watch/workflow.py +107 -0
  33. kubernetes_watch-0.1.0/pyproject.toml +25 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Benyamin Motevalli
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,134 @@
1
+ Metadata-Version: 2.1
2
+ Name: kubernetes-watch
3
+ Version: 0.1.0
4
+ Summary:
5
+ License: MIT
6
+ Author: bmotevalli
7
+ Author-email: b.motevalli@gmail.com
8
+ Requires-Python: >=3.11,<4.0
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.11
12
+ Requires-Dist: boto3 (==1.34.68)
13
+ Requires-Dist: cryptography (==42.0.5)
14
+ Requires-Dist: gitpython (==3.1.43)
15
+ Requires-Dist: humps (==0.2.2)
16
+ Requires-Dist: hvac (==2.1.0)
17
+ Requires-Dist: kubernetes (==29.0.0)
18
+ Requires-Dist: prefect (==2.18.0)
19
+ Requires-Dist: pyyaml (==6.0.1)
20
+ Requires-Dist: requests (==2.31.0)
21
+ Description-Content-Type: text/markdown
22
+
23
+ # kube_watch
24
+
25
+ # To setup the project:
26
+
27
+ - `poetry install`
28
+ - `poetry shell`
29
+
30
+ # To install package to your environment:
31
+
32
+ python setup.py install
33
+
34
+
35
+ # Description
36
+ The kube_watch library is build on top of <a href='https://docs.prefect.io/latest/'>Prefect</a>. The library is designed to define workflows in a declaritive and flexible fashion. Originally, workflows in Prefect are defined via decorators such as @flow and @task. In kube_watch, workflows can be defined in a declaritive form via yaml files. The library is mainly focused on running scheduled workflows in kubernetes environment. However, it can easily be extended to be used for any purpose requiring a workflow. The workflow manifest has the following generic structure:
37
+
38
+ ```
39
+ workflow:
40
+ name: Dummy Workflow
41
+ runner: concurrent
42
+ tasks:
43
+ - name: Task_A
44
+ module: <module_path>
45
+ task: <func_name>
46
+ inputsArgType: arg
47
+ inputs:
48
+ parameters:
49
+ - name: x1
50
+ value: y1
51
+ - name: x2
52
+ value: y2
53
+ - name: x3
54
+ type: env
55
+ value: Y3
56
+
57
+ - name: Task_B
58
+ module: <module_path>
59
+ task: <func_name>
60
+ inputsArgType: arg
61
+ inputs:
62
+ parameters:
63
+ - name: xx1
64
+ value: yy1
65
+ - name: xx2
66
+ value: yy2
67
+ dependency:
68
+ - taskName: Task_A
69
+ inputParamName: xx3
70
+
71
+ - name: Task_C
72
+ module: <module_path>
73
+ task: <func_name>
74
+ inputsArgType: arg
75
+ conditional:
76
+ tasks: ["Task_B"]
77
+ ```
78
+
79
+
80
+ **runner**: concurrent | sequential: if concurrent selected, tasks will be run concurrently.
81
+
82
+ **module**: all modules are located in 'modules' directory in kube_watch. This is where you can extend the library and add new tasks / modules. Below modules, there are submodules such as providers, clusters, and logic. Within each of this submodules, specific modules are defined. For example: providers.aws contains a series of tasks related to AWS. In this case, <module_path> = providers.aws. To add new tasks, add a new module with a similar pattern and refer the path in your task block.
83
+
84
+ **task**: task is simply the name function that you put in the <module_path>. i.e. as you define a function in a module, you can simply start to use it in your manifests.
85
+
86
+ **inputArgType**: arg | dict | list: if the task functions accept known-fixed number of parameters, then use arg.
87
+
88
+ **dependency**: this block defines dependency of a child task to its parent. If **inputParamName** is defined, then OUTPUT of the parent task is passed to the child with an argument name defined by inputParamName.
89
+
90
+ **IMPORTATN NOTE**: A strict assumption is that task functions return a single output. If there are cases with multiple output, wrap them into a dictionary and unwrap them in the child task.
91
+
92
+ **conditional**: These are blocks where you can define when a task runs depending on the outcome of its parent. The parent task should return True or False.
93
+
94
+
95
+ Parameters have also a type entry: env | static. static is default value. If type is defined as env, the parameter value is loaded from Environment Variables. In this case, value should be the name of the corresponding env var.
96
+
97
+ In above examples:
98
+
99
+ ```
100
+ def Task_A(x1, x2, x3):
101
+ # do something
102
+ return output_A
103
+
104
+ def Task_B(xx1, xx2, xx3):
105
+ # do something else
106
+ return output_B
107
+
108
+ def Task_C():
109
+ # do another thing
110
+ return output_C
111
+ ```
112
+
113
+
114
+
115
+ # Batch workflows
116
+ kube_watch also enables to run workflows in batch. A separate manifest with following form is required:
117
+
118
+ batchFlows:
119
+ runner: sequential
120
+ items:
121
+ - path: path_to_flow_A.yaml
122
+ - path: path_to_flow_B.yaml
123
+ - path: path_to_flow_C.yaml
124
+
125
+
126
+ # cron_app
127
+ The cron_app folder contains an example use case of kube_watch library. The cron_app can be used to deploy a CronJob in a kubernetes environment. The app assumes the manifests are located in a separate repository. It will clone the repo and read the manifests and runs the workflows.
128
+
129
+ # Connect to a server
130
+ ## Start Server
131
+ `prefect server start`
132
+ ## To Connect
133
+ To connect to a server, simply set the following environment variable: `PREFECT_API_URL`
134
+
@@ -0,0 +1,111 @@
1
+ # kube_watch
2
+
3
+ # To setup the project:
4
+
5
+ - `poetry install`
6
+ - `poetry shell`
7
+
8
+ # To install package to your environment:
9
+
10
+ python setup.py install
11
+
12
+
13
+ # Description
14
+ The kube_watch library is build on top of <a href='https://docs.prefect.io/latest/'>Prefect</a>. The library is designed to define workflows in a declaritive and flexible fashion. Originally, workflows in Prefect are defined via decorators such as @flow and @task. In kube_watch, workflows can be defined in a declaritive form via yaml files. The library is mainly focused on running scheduled workflows in kubernetes environment. However, it can easily be extended to be used for any purpose requiring a workflow. The workflow manifest has the following generic structure:
15
+
16
+ ```
17
+ workflow:
18
+ name: Dummy Workflow
19
+ runner: concurrent
20
+ tasks:
21
+ - name: Task_A
22
+ module: <module_path>
23
+ task: <func_name>
24
+ inputsArgType: arg
25
+ inputs:
26
+ parameters:
27
+ - name: x1
28
+ value: y1
29
+ - name: x2
30
+ value: y2
31
+ - name: x3
32
+ type: env
33
+ value: Y3
34
+
35
+ - name: Task_B
36
+ module: <module_path>
37
+ task: <func_name>
38
+ inputsArgType: arg
39
+ inputs:
40
+ parameters:
41
+ - name: xx1
42
+ value: yy1
43
+ - name: xx2
44
+ value: yy2
45
+ dependency:
46
+ - taskName: Task_A
47
+ inputParamName: xx3
48
+
49
+ - name: Task_C
50
+ module: <module_path>
51
+ task: <func_name>
52
+ inputsArgType: arg
53
+ conditional:
54
+ tasks: ["Task_B"]
55
+ ```
56
+
57
+
58
+ **runner**: concurrent | sequential: if concurrent selected, tasks will be run concurrently.
59
+
60
+ **module**: all modules are located in 'modules' directory in kube_watch. This is where you can extend the library and add new tasks / modules. Below modules, there are submodules such as providers, clusters, and logic. Within each of this submodules, specific modules are defined. For example: providers.aws contains a series of tasks related to AWS. In this case, <module_path> = providers.aws. To add new tasks, add a new module with a similar pattern and refer the path in your task block.
61
+
62
+ **task**: task is simply the name function that you put in the <module_path>. i.e. as you define a function in a module, you can simply start to use it in your manifests.
63
+
64
+ **inputArgType**: arg | dict | list: if the task functions accept known-fixed number of parameters, then use arg.
65
+
66
+ **dependency**: this block defines dependency of a child task to its parent. If **inputParamName** is defined, then OUTPUT of the parent task is passed to the child with an argument name defined by inputParamName.
67
+
68
+ **IMPORTATN NOTE**: A strict assumption is that task functions return a single output. If there are cases with multiple output, wrap them into a dictionary and unwrap them in the child task.
69
+
70
+ **conditional**: These are blocks where you can define when a task runs depending on the outcome of its parent. The parent task should return True or False.
71
+
72
+
73
+ Parameters have also a type entry: env | static. static is default value. If type is defined as env, the parameter value is loaded from Environment Variables. In this case, value should be the name of the corresponding env var.
74
+
75
+ In above examples:
76
+
77
+ ```
78
+ def Task_A(x1, x2, x3):
79
+ # do something
80
+ return output_A
81
+
82
+ def Task_B(xx1, xx2, xx3):
83
+ # do something else
84
+ return output_B
85
+
86
+ def Task_C():
87
+ # do another thing
88
+ return output_C
89
+ ```
90
+
91
+
92
+
93
+ # Batch workflows
94
+ kube_watch also enables to run workflows in batch. A separate manifest with following form is required:
95
+
96
+ batchFlows:
97
+ runner: sequential
98
+ items:
99
+ - path: path_to_flow_A.yaml
100
+ - path: path_to_flow_B.yaml
101
+ - path: path_to_flow_C.yaml
102
+
103
+
104
+ # cron_app
105
+ The cron_app folder contains an example use case of kube_watch library. The cron_app can be used to deploy a CronJob in a kubernetes environment. The app assumes the manifests are located in a separate repository. It will clone the repo and read the manifests and runs the workflows.
106
+
107
+ # Connect to a server
108
+ ## Start Server
109
+ `prefect server start`
110
+ ## To Connect
111
+ To connect to a server, simply set the following environment variable: `PREFECT_API_URL`
File without changes
File without changes
@@ -0,0 +1,6 @@
1
+ from enum import Enum
2
+
3
+
4
+ class Hosts(str, Enum):
5
+ LOCAL = 'local'
6
+ REMOTE = 'remote'
@@ -0,0 +1,9 @@
1
+ from enum import Enum
2
+
3
+ class Operations(str, Enum):
4
+ OR = 'or'
5
+ AND = 'and'
6
+ SUM = 'sum'
7
+ AVG = 'avg'
8
+ MAX = 'max'
9
+ MIN = 'min'
@@ -0,0 +1,13 @@
1
+ from enum import Enum
2
+
3
+ class Providers(str, Enum):
4
+ AWS = "aws"
5
+ AZURE = "azure"
6
+ GCP = "gcp"
7
+ VAULT = "vault"
8
+
9
+
10
+ class AwsResources(str, Enum):
11
+ ECR = "ecr" # elastic container registry
12
+ S3 = "s3"
13
+ IAM = "iam"
@@ -0,0 +1,17 @@
1
+ from enum import Enum
2
+
3
+ class ParameterType(str, Enum):
4
+ STATIC = 'static'
5
+ FROM_ENV = 'env'
6
+
7
+
8
+ class TaskRunners(str, Enum):
9
+ SEQUENTIAL = 'sequential'
10
+ CONCURRENT = 'concurrent'
11
+ DASK = 'dask'
12
+ RAY = 'ray'
13
+
14
+
15
+ class TaskInputsType(str, Enum):
16
+ ARG = 'arg'
17
+ DICT = 'dict'
File without changes
@@ -0,0 +1,17 @@
1
+ from pydantic import BaseModel, ConfigDict
2
+ from humps.camel import case
3
+
4
+ def to_camel(string):
5
+ if string == "id":
6
+ return "_id"
7
+ if string.startswith("_"): # "_id"
8
+ return string
9
+ return case(string)
10
+
11
+ class CamelModel(BaseModel):
12
+ """
13
+ Replacement for pydanitc BaseModel which simply adds a camel case alias to every field
14
+ NOTE: This has been updated for Pydantic 2 to remove some common encoding helpers
15
+ """
16
+
17
+ model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
@@ -0,0 +1,55 @@
1
+ from typing import List, Optional, Dict, Any
2
+ from kube_watch.enums.workflow import ParameterType, TaskRunners, TaskInputsType
3
+ from kube_watch.enums.logic import Operations
4
+
5
+ from .common import CamelModel
6
+
7
+ class Parameter(CamelModel):
8
+ name: str
9
+ value: Any
10
+ type: Optional[ParameterType] = ParameterType.STATIC
11
+
12
+ class Artifact(CamelModel):
13
+ path: str
14
+
15
+ class Inputs(CamelModel):
16
+ parameters: Optional[List[Parameter]] = []
17
+ artifacts: Optional[List[Artifact]] = []
18
+
19
+ class Dependency(CamelModel):
20
+ taskName: str
21
+ inputParamName: Optional[str] = None
22
+
23
+ class Condition(CamelModel):
24
+ tasks: List[str]
25
+ operation: Optional[Operations] = Operations.AND
26
+
27
+ class Task(CamelModel):
28
+ module: str
29
+ task: str
30
+ name: str
31
+ inputsArgType: Optional[TaskInputsType] = TaskInputsType.ARG
32
+ inputs: Optional[Inputs] = None
33
+ dependency: Optional[List[Dependency]] = None
34
+ conditional: Optional[Condition] = None
35
+ outputs: Optional[List[str]] = None
36
+
37
+ class WorkflowConfig(CamelModel):
38
+ name: str
39
+ runner: TaskRunners = TaskRunners.CONCURRENT
40
+ tasks: List[Task]
41
+
42
+ class WorkflowOutput(CamelModel):
43
+ flow_run: Any
44
+ config: Any
45
+
46
+ class BatchFlowItem(CamelModel):
47
+ path: str
48
+
49
+ class BatchFlowConfig(CamelModel):
50
+ # Only possible runners are concurrent and sequential
51
+ runner: TaskRunners = TaskRunners.CONCURRENT
52
+ items: List[BatchFlowItem]
53
+
54
+
55
+
File without changes
@@ -0,0 +1,186 @@
1
+ from prefect import get_run_logger
2
+ from typing import List
3
+ from kubernetes import config
4
+ from kubernetes import client
5
+ from kubernetes.client.rest import ApiException
6
+ import base64
7
+ import datetime
8
+
9
+ from kube_watch.enums.kube import Hosts
10
+
11
+ logger = get_run_logger()
12
+
13
+
14
+ def setup(host=Hosts.REMOTE, context=None):
15
+ if host == Hosts.LOCAL:
16
+ # Running outside a Kubernetes cluster (e.g., local development)
17
+ config.load_kube_config(context=context) # You can specify the context here if necessary
18
+ else:
19
+ # Running inside a Kubernetes cluster
20
+ config.load_incluster_config()
21
+
22
+
23
+
24
+ def create_or_update_configmap(config_name, namespace, data):
25
+ """
26
+ Create or update a ConfigMap in a specified namespace if the data is different.
27
+
28
+ :param config_name: The name of the ConfigMap.
29
+ :param namespace: The namespace of the ConfigMap.
30
+ :param data: A dictionary containing the data for the ConfigMap.
31
+ :return: True if the ConfigMap was created or updated, False otherwise.
32
+ """
33
+ v1 = client.CoreV1Api()
34
+ configmap_metadata = client.V1ObjectMeta(name=config_name, namespace=namespace)
35
+ configmap = client.V1ConfigMap(api_version="v1", kind="ConfigMap", metadata=configmap_metadata, data=data)
36
+
37
+ try:
38
+ existing_configmap = v1.read_namespaced_config_map(name=config_name, namespace=namespace)
39
+ # Compare the existing ConfigMap's data with the new data
40
+ if existing_configmap.data == data:
41
+ logger.info("No update needed for ConfigMap: {}".format(config_name))
42
+ return False
43
+ else:
44
+ # Data is different, update the ConfigMap
45
+ api_response = v1.replace_namespaced_config_map(name=config_name, namespace=namespace, body=configmap)
46
+ logger.info("ConfigMap updated. Name: {}".format(api_response.metadata.name))
47
+ return True
48
+ except ApiException as e:
49
+ if e.status == 404: # ConfigMap not found, create it
50
+ try:
51
+ api_response = v1.create_namespaced_config_map(namespace=namespace, body=configmap)
52
+ logger.info("ConfigMap created. Name: {}".format(api_response.metadata.name))
53
+ return {'trigger_restart': True}
54
+ except ApiException as e:
55
+ logger.error("Exception when creating ConfigMap: {}".format(e))
56
+ raise ValueError
57
+ else:
58
+ logger.error("Failed to get or create ConfigMap: {}".format(e))
59
+ raise ValueError
60
+
61
+
62
+ def create_or_update_secret(secret_name, namespace, data, secret_type = None):
63
+ """
64
+ Create or update a Secret in a specified namespace if the data is different.
65
+
66
+ :param name: The name of the Secret.
67
+ :param namespace: The namespace of the Secret.
68
+ :param data: A dictionary containing the data for the Secret. Values must be strings (not Base64 encoded).
69
+ :return: True if the secret was created or updated, False otherwise.
70
+ """
71
+ if secret_type == None:
72
+ secret_type = "Opaque"
73
+
74
+ v1 = client.CoreV1Api()
75
+ secret_metadata = client.V1ObjectMeta(name=secret_name, namespace=namespace)
76
+ secret = client.V1Secret(
77
+ api_version="v1",
78
+ kind="Secret",
79
+ metadata=secret_metadata,
80
+ string_data=data,
81
+ type=secret_type
82
+ )
83
+
84
+ try:
85
+ existing_secret = v1.read_namespaced_secret(name=secret_name, namespace=namespace)
86
+ # Encode the new data to compare with the existing Secret
87
+ encoded_data = {k: base64.b64encode(v.encode()).decode() for k, v in data.items()}
88
+
89
+ # Check if the existing secret's data matches the new data
90
+ if existing_secret.data == encoded_data:
91
+ logger.info("No update needed for Secret: {}".format(secret_name))
92
+ return False
93
+ else:
94
+ # Data is different, update the Secret
95
+ api_response = v1.replace_namespaced_secret(name=secret_name, namespace=namespace, body=secret)
96
+ logger.info("Secret updated. Name: {}".format(api_response.metadata.name))
97
+ return True
98
+
99
+ except ApiException as e:
100
+ if e.status == 404: # Secret not found, create it
101
+ try:
102
+ api_response = v1.create_namespaced_secret(namespace=namespace, body=secret)
103
+ logger.info("Secret created. Name: {}".format(api_response.metadata.name))
104
+ return {'trigger_restart': True}
105
+ except ApiException as e:
106
+ logger.error("Exception when creating Secret: {}".format(e))
107
+ raise ValueError
108
+ else:
109
+ logger.error("Failed to get or create Secret: {}".format(e))
110
+ raise ValueError
111
+
112
+
113
+ def get_kubernetes_secret(secret_name, namespace):
114
+ # Assuming that the Kubernetes configuration is already set
115
+ v1 = client.CoreV1Api()
116
+ try:
117
+ secret = v1.read_namespaced_secret(secret_name, namespace)
118
+ # Decoding the base64 encoded data
119
+ decoded_data = {key: base64.b64decode(value).decode('utf-8') for key, value in secret.data.items()}
120
+ return decoded_data
121
+ except ApiException as e:
122
+ logger.error(f"Failed to get secret: {e}")
123
+ return None
124
+
125
+
126
+ def restart_deployment(deployment, namespace):
127
+ """
128
+ Trigger a rollout restart of a deployment in a specified namespace.
129
+
130
+ :param name: The name of the deployment.
131
+ :param namespace: The namespace of the deployment.
132
+ """
133
+
134
+ v1 = client.AppsV1Api()
135
+ body = {
136
+ 'spec': {
137
+ 'template': {
138
+ 'metadata': {
139
+ 'annotations': {
140
+ 'kubectl.kubernetes.io/restartedAt': datetime.datetime.utcnow().isoformat()
141
+ }
142
+ }
143
+ }
144
+ }
145
+ }
146
+ try:
147
+ api_response = v1.patch_namespaced_deployment(name=deployment, namespace=namespace, body=body)
148
+ logger.info(f"Deployment restarted. Name: {api_response.metadata.name}")
149
+ except ApiException as e:
150
+ logger.error(f"Exception when restarting deployment: {e}")
151
+
152
+
153
+ def has_mismatch_image_digest(repo_digest, label_selector, namespace):
154
+ """
155
+ Check all pods in the given namespace and matching the label selector for any
156
+ mismatch between the latest image digest and the current image digest.
157
+
158
+ parameters:
159
+ - namespace: The namespace to search for pods.
160
+ - label_selector: The label selector to identify the relevant pods.
161
+ - repo_digest: The latest image digest to compare against.
162
+
163
+ Returns:
164
+ - True if any pod is found with an image digest mismatch.
165
+ - False if all pods match the latest image digest.
166
+ """
167
+ core_v1_api = client.CoreV1Api()
168
+
169
+ # Fetch pods based on namespace and label selector
170
+ pods = core_v1_api.list_namespaced_pod(namespace, label_selector=label_selector)
171
+
172
+ # Iterate over pods and their containers
173
+ for pod in pods.items:
174
+ for container_status in pod.status.container_statuses:
175
+ current_image_id = container_status.image_id
176
+ # Check for digest mismatch
177
+ if current_image_id.split('@')[-1] != repo_digest:
178
+ logger.info(f"Mismatch found in pod: {pod.metadata.name}, container: {container_status.name}")
179
+ logger.info(f"Repo digest: {repo_digest}")
180
+ logger.info(f"Curr digest: {current_image_id.split('@')[-1]}")
181
+ return True
182
+
183
+ logger.info("Images are in-sync.")
184
+ logger.info(f"Repo digest: {repo_digest}")
185
+ logger.info(f"Curr digest: {current_image_id.split('@')[-1]}")
186
+ return False
@@ -0,0 +1,26 @@
1
+ import subprocess
2
+ import os
3
+ from prefect import get_run_logger
4
+ logger = get_run_logger()
5
+
6
+ def run_standalone_script(package_name, package_run, package_exec):
7
+ script_dir = os.path.dirname(os.path.realpath(__file__))
8
+ # script_path = os.path.join(script_dir, package_name.replace('.', os.sep))
9
+ target_dir = os.path.join(script_dir, os.pardir, os.pardir, *package_name.split('.'))
10
+
11
+ # Change the current working directory to the script directory
12
+ full_command = f"{package_run} {os.path.join(target_dir, package_exec)}"
13
+
14
+ # Execute the command
15
+ try:
16
+ result = subprocess.run(full_command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
17
+ if result.stdout:
18
+ logger.info(result.stdout)
19
+ if result.stderr:
20
+ logger.error(result.stderr)
21
+ # logger.info(f"Output: {result.stdout}")
22
+ result.check_returncode()
23
+ except subprocess.CalledProcessError as e:
24
+ # All logs should have already been handled above, now just raise an exception
25
+ logger.error("The subprocess encountered an error: %s", e)
26
+ raise Exception("Subprocess failed with exit code {}".format(e.returncode))
@@ -0,0 +1,8 @@
1
+
2
+
3
+ def dicts_has_diff(dict_a, dict_b):
4
+ return dict_a != dict_b
5
+
6
+
7
+ def remove_keys(d, keys):
8
+ return {k: v for k, v in d.items() if k not in keys}
@@ -0,0 +1,8 @@
1
+ import os
2
+ from prefect import get_run_logger
3
+ logger = get_run_logger()
4
+
5
+ def load_secrets_to_env(data):
6
+ for key, value in data.items():
7
+ os.environ[key] = value
8
+ # logger.info(f"ENV VAR: {key} loaded")
@@ -0,0 +1,31 @@
1
+ from typing import Any, List, Dict
2
+ from kube_watch.enums.logic import Operations
3
+
4
+
5
+ def merge_logical_outputs(inp_dict: Dict):
6
+ if 'operation' not in inp_dict.keys():
7
+ raise TypeError("Missing required parameters: 'operation'")
8
+ operation = inp_dict.get('operation')
9
+ del inp_dict['operation']
10
+
11
+ inputs = [v for k,v in inp_dict.items()]
12
+ return merge_logical_list(inputs, operation)
13
+
14
+
15
+ def merge_logical_list(inp_list: List, operation: Operations):
16
+ if operation == Operations.OR:
17
+ return any(inp_list)
18
+ if operation == Operations.AND:
19
+ return all(inp_list)
20
+ raise ValueError("Invalid logical operation")
21
+
22
+
23
+ def partial_dict_update(orig_data, new_data):
24
+ """
25
+ This function is used when some key value pairs in orig_data should
26
+ be updated from new_data.
27
+ """
28
+ for k, v in new_data.items():
29
+ orig_data[k] = v
30
+
31
+ return orig_data