kubernetes-watch 0.1.1__tar.gz → 0.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/PKG-INFO +16 -13
  2. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/README.md +117 -111
  3. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/enums/workflow.py +1 -0
  4. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/models/workflow.py +6 -1
  5. kubernetes_watch-0.1.3/kube_watch/modules/logic/actions.py +56 -0
  6. kubernetes_watch-0.1.3/kube_watch/modules/mock/mock_generator.py +53 -0
  7. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/providers/aws.py +62 -5
  8. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/watch/helpers.py +26 -8
  9. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/watch/workflow.py +6 -13
  10. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/pyproject.toml +10 -12
  11. kubernetes_watch-0.1.1/kube_watch/modules/logic/actions.py +0 -26
  12. kubernetes_watch-0.1.1/kube_watch/modules/mock/mock_generator.py +0 -24
  13. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/LICENSE +0 -0
  14. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/__init__.py +0 -0
  15. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/enums/__init__.py +0 -0
  16. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/enums/kube.py +0 -0
  17. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/enums/logic.py +0 -0
  18. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/enums/providers.py +0 -0
  19. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/models/__init__.py +0 -0
  20. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/models/common.py +0 -0
  21. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/__init__.py +0 -0
  22. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/clusters/__init__.py +0 -0
  23. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/clusters/kube.py +0 -0
  24. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/logic/checks.py +0 -0
  25. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/logic/load.py +0 -0
  26. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/logic/merge.py +0 -0
  27. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/logic/scheduler.py +0 -0
  28. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/logic/trasnform.py +0 -0
  29. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/mock/__init__.py +0 -0
  30. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/providers/__init__.py +0 -0
  31. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/providers/git.py +0 -0
  32. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/providers/github.py +0 -0
  33. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/modules/providers/vault.py +0 -0
  34. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/standalone/metarecogen/ckan_to_gn.py +0 -0
  35. {kubernetes_watch-0.1.1 → kubernetes_watch-0.1.3}/kube_watch/watch/__init__.py +0 -0
@@ -1,25 +1,22 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: kubernetes-watch
3
- Version: 0.1.1
3
+ Version: 0.1.3
4
4
  Summary:
5
- License: MIT
6
5
  Author: bmotevalli
7
6
  Author-email: b.motevalli@gmail.com
8
7
  Requires-Python: >=3.10,<4
9
- Classifier: License :: OSI Approved :: MIT License
10
8
  Classifier: Programming Language :: Python :: 3
11
9
  Classifier: Programming Language :: Python :: 3.10
12
10
  Classifier: Programming Language :: Python :: 3.11
13
11
  Classifier: Programming Language :: Python :: 3.12
14
- Requires-Dist: boto3 (==1.34.68)
15
- Requires-Dist: cryptography (==42.0.5)
16
- Requires-Dist: gitpython (==3.1.43)
17
- Requires-Dist: humps (==0.2.2)
18
- Requires-Dist: hvac (==2.1.0)
19
- Requires-Dist: kubernetes (==29.0.0)
20
- Requires-Dist: prefect (==2.18.0)
21
- Requires-Dist: pyyaml (==6.0.1)
22
- Requires-Dist: requests (==2.31.0)
12
+ Requires-Dist: GitPython (>=3.1.43,<4.0.0)
13
+ Requires-Dist: PyYAML (>=6.0.1,<7.0.0)
14
+ Requires-Dist: boto3 (>=1.34.68,<2.0.0)
15
+ Requires-Dist: humps (>=0.2.2,<0.3.0)
16
+ Requires-Dist: hvac (>=2.1.0,<3.0.0)
17
+ Requires-Dist: kubernetes (>=29.0.0,<30.0.0)
18
+ Requires-Dist: prefect (>=2.18.0,<3.0.0)
19
+ Requires-Dist: requests (>=2.32.3,<3.0.0)
23
20
  Description-Content-Type: text/markdown
24
21
 
25
22
  # kube_watch
@@ -29,10 +26,16 @@ Description-Content-Type: text/markdown
29
26
  - `poetry install`
30
27
  - `poetry shell`
31
28
 
32
- # To install package to your environment:
29
+ # To install package to your environment locally:
33
30
 
34
31
  python setup.py install
35
32
 
33
+ # To publish
34
+
35
+ `poetry config pypi-token.pypi your-api-token`
36
+ `poetry build`
37
+ `poetry publish`
38
+
36
39
 
37
40
  # Description
38
41
  The kube_watch library is build on top of <a href='https://docs.prefect.io/latest/'>Prefect</a>. The library is designed to define workflows in a declaritive and flexible fashion. Originally, workflows in Prefect are defined via decorators such as @flow and @task. In kube_watch, workflows can be defined in a declaritive form via yaml files. The library is mainly focused on running scheduled workflows in kubernetes environment. However, it can easily be extended to be used for any purpose requiring a workflow. The workflow manifest has the following generic structure:
@@ -1,111 +1,117 @@
1
- # kube_watch
2
-
3
- # To setup the project:
4
-
5
- - `poetry install`
6
- - `poetry shell`
7
-
8
- # To install package to your environment:
9
-
10
- python setup.py install
11
-
12
-
13
- # Description
14
- The kube_watch library is build on top of <a href='https://docs.prefect.io/latest/'>Prefect</a>. The library is designed to define workflows in a declaritive and flexible fashion. Originally, workflows in Prefect are defined via decorators such as @flow and @task. In kube_watch, workflows can be defined in a declaritive form via yaml files. The library is mainly focused on running scheduled workflows in kubernetes environment. However, it can easily be extended to be used for any purpose requiring a workflow. The workflow manifest has the following generic structure:
15
-
16
- ```
17
- workflow:
18
- name: Dummy Workflow
19
- runner: concurrent
20
- tasks:
21
- - name: Task_A
22
- module: <module_path>
23
- task: <func_name>
24
- inputsArgType: arg
25
- inputs:
26
- parameters:
27
- - name: x1
28
- value: y1
29
- - name: x2
30
- value: y2
31
- - name: x3
32
- type: env
33
- value: Y3
34
-
35
- - name: Task_B
36
- module: <module_path>
37
- task: <func_name>
38
- inputsArgType: arg
39
- inputs:
40
- parameters:
41
- - name: xx1
42
- value: yy1
43
- - name: xx2
44
- value: yy2
45
- dependency:
46
- - taskName: Task_A
47
- inputParamName: xx3
48
-
49
- - name: Task_C
50
- module: <module_path>
51
- task: <func_name>
52
- inputsArgType: arg
53
- conditional:
54
- tasks: ["Task_B"]
55
- ```
56
-
57
-
58
- **runner**: concurrent | sequential: if concurrent selected, tasks will be run concurrently.
59
-
60
- **module**: all modules are located in 'modules' directory in kube_watch. This is where you can extend the library and add new tasks / modules. Below modules, there are submodules such as providers, clusters, and logic. Within each of this submodules, specific modules are defined. For example: providers.aws contains a series of tasks related to AWS. In this case, <module_path> = providers.aws. To add new tasks, add a new module with a similar pattern and refer the path in your task block.
61
-
62
- **task**: task is simply the name function that you put in the <module_path>. i.e. as you define a function in a module, you can simply start to use it in your manifests.
63
-
64
- **inputArgType**: arg | dict | list: if the task functions accept known-fixed number of parameters, then use arg.
65
-
66
- **dependency**: this block defines dependency of a child task to its parent. If **inputParamName** is defined, then OUTPUT of the parent task is passed to the child with an argument name defined by inputParamName.
67
-
68
- **IMPORTATN NOTE**: A strict assumption is that task functions return a single output. If there are cases with multiple output, wrap them into a dictionary and unwrap them in the child task.
69
-
70
- **conditional**: These are blocks where you can define when a task runs depending on the outcome of its parent. The parent task should return True or False.
71
-
72
-
73
- Parameters have also a type entry: env | static. static is default value. If type is defined as env, the parameter value is loaded from Environment Variables. In this case, value should be the name of the corresponding env var.
74
-
75
- In above examples:
76
-
77
- ```
78
- def Task_A(x1, x2, x3):
79
- # do something
80
- return output_A
81
-
82
- def Task_B(xx1, xx2, xx3):
83
- # do something else
84
- return output_B
85
-
86
- def Task_C():
87
- # do another thing
88
- return output_C
89
- ```
90
-
91
-
92
-
93
- # Batch workflows
94
- kube_watch also enables to run workflows in batch. A separate manifest with following form is required:
95
-
96
- batchFlows:
97
- runner: sequential
98
- items:
99
- - path: path_to_flow_A.yaml
100
- - path: path_to_flow_B.yaml
101
- - path: path_to_flow_C.yaml
102
-
103
-
104
- # cron_app
105
- The cron_app folder contains an example use case of kube_watch library. The cron_app can be used to deploy a CronJob in a kubernetes environment. The app assumes the manifests are located in a separate repository. It will clone the repo and read the manifests and runs the workflows.
106
-
107
- # Connect to a server
108
- ## Start Server
109
- `prefect server start`
110
- ## To Connect
111
- To connect to a server, simply set the following environment variable: `PREFECT_API_URL`
1
+ # kube_watch
2
+
3
+ # To setup the project:
4
+
5
+ - `poetry install`
6
+ - `poetry shell`
7
+
8
+ # To install package to your environment locally:
9
+
10
+ python setup.py install
11
+
12
+ # To publish
13
+
14
+ `poetry config pypi-token.pypi your-api-token`
15
+ `poetry build`
16
+ `poetry publish`
17
+
18
+
19
+ # Description
20
+ The kube_watch library is build on top of <a href='https://docs.prefect.io/latest/'>Prefect</a>. The library is designed to define workflows in a declaritive and flexible fashion. Originally, workflows in Prefect are defined via decorators such as @flow and @task. In kube_watch, workflows can be defined in a declaritive form via yaml files. The library is mainly focused on running scheduled workflows in kubernetes environment. However, it can easily be extended to be used for any purpose requiring a workflow. The workflow manifest has the following generic structure:
21
+
22
+ ```
23
+ workflow:
24
+ name: Dummy Workflow
25
+ runner: concurrent
26
+ tasks:
27
+ - name: Task_A
28
+ module: <module_path>
29
+ task: <func_name>
30
+ inputsArgType: arg
31
+ inputs:
32
+ parameters:
33
+ - name: x1
34
+ value: y1
35
+ - name: x2
36
+ value: y2
37
+ - name: x3
38
+ type: env
39
+ value: Y3
40
+
41
+ - name: Task_B
42
+ module: <module_path>
43
+ task: <func_name>
44
+ inputsArgType: arg
45
+ inputs:
46
+ parameters:
47
+ - name: xx1
48
+ value: yy1
49
+ - name: xx2
50
+ value: yy2
51
+ dependency:
52
+ - taskName: Task_A
53
+ inputParamName: xx3
54
+
55
+ - name: Task_C
56
+ module: <module_path>
57
+ task: <func_name>
58
+ inputsArgType: arg
59
+ conditional:
60
+ tasks: ["Task_B"]
61
+ ```
62
+
63
+
64
+ **runner**: concurrent | sequential: if concurrent selected, tasks will be run concurrently.
65
+
66
+ **module**: all modules are located in 'modules' directory in kube_watch. This is where you can extend the library and add new tasks / modules. Below modules, there are submodules such as providers, clusters, and logic. Within each of this submodules, specific modules are defined. For example: providers.aws contains a series of tasks related to AWS. In this case, <module_path> = providers.aws. To add new tasks, add a new module with a similar pattern and refer the path in your task block.
67
+
68
+ **task**: task is simply the name function that you put in the <module_path>. i.e. as you define a function in a module, you can simply start to use it in your manifests.
69
+
70
+ **inputArgType**: arg | dict | list: if the task functions accept known-fixed number of parameters, then use arg.
71
+
72
+ **dependency**: this block defines dependency of a child task to its parent. If **inputParamName** is defined, then OUTPUT of the parent task is passed to the child with an argument name defined by inputParamName.
73
+
74
+ **IMPORTATN NOTE**: A strict assumption is that task functions return a single output. If there are cases with multiple output, wrap them into a dictionary and unwrap them in the child task.
75
+
76
+ **conditional**: These are blocks where you can define when a task runs depending on the outcome of its parent. The parent task should return True or False.
77
+
78
+
79
+ Parameters have also a type entry: env | static. static is default value. If type is defined as env, the parameter value is loaded from Environment Variables. In this case, value should be the name of the corresponding env var.
80
+
81
+ In above examples:
82
+
83
+ ```
84
+ def Task_A(x1, x2, x3):
85
+ # do something
86
+ return output_A
87
+
88
+ def Task_B(xx1, xx2, xx3):
89
+ # do something else
90
+ return output_B
91
+
92
+ def Task_C():
93
+ # do another thing
94
+ return output_C
95
+ ```
96
+
97
+
98
+
99
+ # Batch workflows
100
+ kube_watch also enables to run workflows in batch. A separate manifest with following form is required:
101
+
102
+ batchFlows:
103
+ runner: sequential
104
+ items:
105
+ - path: path_to_flow_A.yaml
106
+ - path: path_to_flow_B.yaml
107
+ - path: path_to_flow_C.yaml
108
+
109
+
110
+ # cron_app
111
+ The cron_app folder contains an example use case of kube_watch library. The cron_app can be used to deploy a CronJob in a kubernetes environment. The app assumes the manifests are located in a separate repository. It will clone the repo and read the manifests and runs the workflows.
112
+
113
+ # Connect to a server
114
+ ## Start Server
115
+ `prefect server start`
116
+ ## To Connect
117
+ To connect to a server, simply set the following environment variable: `PREFECT_API_URL`
@@ -3,6 +3,7 @@ from enum import Enum
3
3
  class ParameterType(str, Enum):
4
4
  STATIC = 'static'
5
5
  FROM_ENV = 'env'
6
+ FROM_FLOW = 'flow'
6
7
 
7
8
 
8
9
  class TaskRunners(str, Enum):
@@ -25,10 +25,14 @@ class Condition(CamelModel):
25
25
  operation: Optional[Operations] = Operations.AND
26
26
 
27
27
  class Task(CamelModel):
28
+ """
29
+ :param plugin_path: define if referring to an external module outside the library.
30
+ """
28
31
  module: str
29
32
  task: str
30
33
  name: str
31
- inputsArgType: Optional[TaskInputsType] = TaskInputsType.ARG
34
+ plugin_path: Optional[str] = ""
35
+ inputsArgType: Optional[TaskInputsType] = TaskInputsType.ARG # @TODO refactor inputsArgType to inputs_arg_type
32
36
  inputs: Optional[Inputs] = None
33
37
  dependency: Optional[List[Dependency]] = None
34
38
  conditional: Optional[Condition] = None
@@ -37,6 +41,7 @@ class Task(CamelModel):
37
41
  class WorkflowConfig(CamelModel):
38
42
  name: str
39
43
  runner: TaskRunners = TaskRunners.CONCURRENT
44
+ parameters: Optional[List[Parameter]] = []
40
45
  tasks: List[Task]
41
46
 
42
47
  class WorkflowOutput(CamelModel):
@@ -0,0 +1,56 @@
1
+ import subprocess
2
+ import os
3
+ from typing import List
4
+ from prefect import get_run_logger
5
+ logger = get_run_logger()
6
+
7
+ def run_standalone_script(package_name, package_run, package_exec):
8
+ script_dir = os.path.dirname(os.path.realpath(__file__))
9
+ # script_path = os.path.join(script_dir, package_name.replace('.', os.sep))
10
+ target_dir = os.path.join(script_dir, os.pardir, os.pardir, *package_name.split('.'))
11
+
12
+ full_command = f"{package_run} {os.path.join(target_dir, package_exec)}"
13
+
14
+ # Execute the command
15
+ try:
16
+ result = subprocess.run(full_command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
17
+ if result.stdout:
18
+ logger.info(result.stdout)
19
+ if result.stderr:
20
+ logger.error(result.stderr)
21
+ # logger.info(f"Output: {result.stdout}")
22
+ result.check_returncode()
23
+ except subprocess.CalledProcessError as e:
24
+ # All logs should have already been handled above, now just raise an exception
25
+ logger.error("The subprocess encountered an error: %s", e)
26
+ raise Exception("Subprocess failed with exit code {}".format(e.returncode))
27
+
28
+
29
+ def run_standalone_script_modified(base_path: str, package_name: str, package_run_cmds: List[str]):
30
+ # Construct the absolute path to the target directory
31
+ target_dir = os.path.join(base_path, *package_name.split('.'))
32
+
33
+ commands = [f"cd {target_dir}"] + package_run_cmds
34
+ full_command = " && ".join(commands)
35
+
36
+ # full_command = f"cd {target_dir} && {package_run_cmd}"
37
+
38
+ # Build the full command to execute
39
+ # full_command = f"{package_run} {os.path.join(target_dir, package_exec)}"
40
+
41
+ # print(full_command)
42
+
43
+ # Execute the command
44
+ try:
45
+ result = subprocess.run(full_command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
46
+ if result.stdout:
47
+ logger.info(result.stdout)
48
+ if result.stderr:
49
+ logger.error(result.stderr)
50
+ result.check_returncode()
51
+ except subprocess.CalledProcessError as e:
52
+ logger.error("Command failed with exit code %s", e.returncode)
53
+ logger.error("Output:\n%s", e.stdout)
54
+ logger.error("Errors:\n%s", e.stderr)
55
+ raise Exception(f"Subprocess failed with exit code {e.returncode}. Check logs for more details.")
56
+ # raise Exception(f"Subprocess failed with exit code {e.returncode}")
@@ -0,0 +1,53 @@
1
+ import time
2
+ import random
3
+ from prefect import runtime
4
+
5
+ def generate_number():
6
+ return 42
7
+
8
+
9
+ def mock_dict_data():
10
+ return {
11
+ "key1": "value1",
12
+ "key2": "value2",
13
+ "key3": {
14
+ "k1": 1, "k2": 2, "k3": [1, 3, "ali"]
15
+ }
16
+ }
17
+
18
+ def print_input(params):
19
+ print(params)
20
+
21
+ def print_number(number, dummy_param, env_var_name):
22
+ print(f"The generated number is: {number} and the dummy_value is: {dummy_param}")
23
+ return number, dummy_param, env_var_name
24
+
25
+
26
+ def print_flow_parameters():
27
+ assert runtime.flow_run.parameters.get("WORK_DIR") is not None
28
+ assert runtime.flow_run.parameters.get("MODULE_PATH") is not None
29
+ print(runtime.flow_run.parameters.get("WORK_DIR"))
30
+ print(runtime.flow_run.parameters.get("MODULE_PATH"))
31
+
32
+ def print_from_flow_parameters(work_dir):
33
+ """
34
+ work_dir is provided via flow parameters
35
+ """
36
+ assert work_dir != ''
37
+ print(work_dir)
38
+
39
+
40
+ def delay(seconds):
41
+ time.sleep(seconds)
42
+
43
+
44
+ def random_boolean():
45
+ return random.choice([True, False])
46
+
47
+ def merge_bools(inp_dict):
48
+ list_bools = [v for k,v in inp_dict.items()]
49
+ return any(list_bools)
50
+
51
+ def print_result(task_name, result):
52
+ print(f'=========== {task_name} RESULT =================')
53
+ print(result)
@@ -4,7 +4,6 @@
4
4
  import boto3
5
5
  import base64
6
6
  import json
7
-
8
7
  from datetime import datetime , timezone, timedelta
9
8
  from botocore.exceptions import ClientError
10
9
  from prefect import get_run_logger
@@ -77,7 +76,17 @@ def task_get_latest_image_digest(session, resource, region, repository_name, tag
77
76
  #========================================================================================
78
77
  # IAM Cred update
79
78
  #========================================================================================
80
- def task_rotate_iam_creds(session, user_name, old_access_key_id, old_access_key_secret, access_key_id_var_name, access_secret_key_var_name, rotate_interval):
79
+ def task_rotate_iam_creds(
80
+ session,
81
+ user_name,
82
+ old_access_key_id,
83
+ old_access_key_secret,
84
+ access_key_id_var_name,
85
+ access_secret_key_var_name,
86
+ rotate_interval,
87
+ require_smtp_conversion = False,
88
+ ses_region = "ap-southeast-2"
89
+ ):
81
90
  iam = session.client('iam')
82
91
  creation_date = None
83
92
 
@@ -102,14 +111,15 @@ def task_rotate_iam_creds(session, user_name, old_access_key_id, old_access_key_
102
111
 
103
112
  curr_date = datetime.now(timezone.utc)
104
113
  # Check if the key needs rotation
105
- if (curr_date.weekday() == 5 and
106
- curr_date - creation_date > timedelta(days=dd,hours=hh,minutes=mm)):
114
+ if (curr_date - creation_date > timedelta(days=dd,hours=hh,minutes=mm)):
107
115
  logger.info("Key is older than rotation period, rotating now.")
108
116
  # Delete the old key
109
117
  delete_iam_user_key(session, user_name, old_access_key_id)
110
118
 
111
119
  # Create a new access key
112
120
  access_key_id, secret_access_key = create_iam_user_key(session, user_name)
121
+ if require_smtp_conversion:
122
+ secret_access_key = convert_to_smtp_password(secret_access_key, ses_region)
113
123
  return {access_key_id_var_name: access_key_id, access_secret_key_var_name: secret_access_key}
114
124
 
115
125
  else:
@@ -117,9 +127,9 @@ def task_rotate_iam_creds(session, user_name, old_access_key_id, old_access_key_
117
127
  return {access_key_id_var_name: old_access_key_id, access_secret_key_var_name: old_access_key_secret}
118
128
 
119
129
 
130
+
120
131
  def create_iam_user_key(session, user_name):
121
132
  iam = session.client('iam')
122
-
123
133
  # Check if the user exists
124
134
  try:
125
135
  iam.get_user(UserName=user_name)
@@ -152,3 +162,50 @@ def delete_iam_user_key(session, user_name, access_key_id):
152
162
  except Exception as e:
153
163
  raise Exception(f"Failed to delete old key: {e}")
154
164
 
165
+
166
+
167
+ def convert_to_smtp_password(secret_access_key, region):
168
+ """Convert IAM Secret Key to SMTP Password."""
169
+ import hmac
170
+ import hashlib
171
+ import base64
172
+
173
+ SMTP_REGIONS = [
174
+ 'us-east-2', # US East (Ohio)
175
+ 'us-east-1', # US East (N. Virginia)
176
+ 'us-west-2', # US West (Oregon)
177
+ 'ap-south-1', # Asia Pacific (Mumbai)
178
+ 'ap-northeast-2', # Asia Pacific (Seoul)
179
+ 'ap-southeast-1', # Asia Pacific (Singapore)
180
+ 'ap-southeast-2', # Asia Pacific (Sydney)
181
+ 'ap-northeast-1', # Asia Pacific (Tokyo)
182
+ 'ca-central-1', # Canada (Central)
183
+ 'eu-central-1', # Europe (Frankfurt)
184
+ 'eu-west-1', # Europe (Ireland)
185
+ 'eu-west-2', # Europe (London)
186
+ 'sa-east-1', # South America (Sao Paulo)
187
+ 'us-gov-west-1', # AWS GovCloud (US)
188
+ ]
189
+
190
+ # These values are required to calculate the signature. Do not change them.
191
+ DATE = "11111111"
192
+ SERVICE = "ses"
193
+ MESSAGE = "SendRawEmail"
194
+ TERMINAL = "aws4_request"
195
+ VERSION = 0x04
196
+
197
+
198
+ def sign(key, msg):
199
+ return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
200
+
201
+ if region not in SMTP_REGIONS:
202
+ raise ValueError(f"The {region} Region doesn't have an SMTP endpoint.")
203
+
204
+ signature = sign(("AWS4" + secret_access_key).encode('utf-8'), DATE)
205
+ signature = sign(signature, region)
206
+ signature = sign(signature, SERVICE)
207
+ signature = sign(signature, TERMINAL)
208
+ signature = sign(signature, MESSAGE)
209
+ signature_and_version = bytes([VERSION]) + signature
210
+ smtp_password = base64.b64encode(signature_and_version)
211
+ return smtp_password.decode('utf-8')
@@ -1,7 +1,7 @@
1
1
  from prefect import task
2
- import functools
3
- import asyncio
2
+ import sys
4
3
  from prefect.task_runners import ConcurrentTaskRunner, SequentialTaskRunner
4
+ from prefect import runtime
5
5
  # from prefect_dask.task_runners import DaskTaskRunner
6
6
  from typing import Dict, List
7
7
  import yaml
@@ -51,28 +51,46 @@ def func_task(name="default_task_name", task_input_type: TaskInputsType = TaskIn
51
51
 
52
52
 
53
53
 
54
- def get_task_function(module_name, task_name):
55
- # module = importlib.import_module(f"sparrow_watch.modules.{module_name}")
56
- # klass = getattr(module, class_name)
57
- # return getattr(klass, task_name)
54
+ def get_task_function(module_name, task_name, plugin_path=None):
58
55
  """
59
56
  Fetch a function directly from a specified module.
60
57
 
61
58
  Args:
62
59
  module_name (str): The name of the module to import the function from. e.g. providers.aws
63
60
  task_name (str): The name of the function to fetch from the module.
61
+ plugin_path (ster): define for external modules
64
62
 
65
63
  Returns:
66
64
  function: The function object fetched from the module.
67
65
  """
68
- module = importlib.import_module(f"kube_watch.modules.{module_name}")
69
- return getattr(module, task_name)
66
+ try:
67
+ if plugin_path:
68
+ # Temporarily prepend the plugin path to sys.path to find the module
69
+ module_path = os.path.join(plugin_path, *module_name.split('.')) + '.py'
70
+ module_spec = importlib.util.spec_from_file_location(module_name, module_path)
71
+ module = importlib.util.module_from_spec(module_spec)
72
+ module_spec.loader.exec_module(module)
73
+ else:
74
+ # Standard import from the internal library path
75
+ module = importlib.import_module(f"kube_watch.modules.{module_name}")
76
+
77
+ return getattr(module, task_name)
78
+ except ImportError as e:
79
+ raise ImportError(f"Unable to import module '{module_name}': {e}")
80
+ except AttributeError as e:
81
+ raise AttributeError(f"The module '{module_name}' does not have a function named '{task_name}': {e}")
82
+ finally:
83
+ if plugin_path:
84
+ # Remove the plugin path from sys.path after importing
85
+ sys.path.pop(0) # Using pop(0) is safer in the context of insert(0, plugin_path)
70
86
 
71
87
 
72
88
 
73
89
  def resolve_parameter_value(param):
74
90
  if param.type == ParameterType.FROM_ENV:
75
91
  return os.getenv(param.value, '') # Default to empty string if env var is not set
92
+ if param.type == ParameterType.FROM_FLOW:
93
+ return runtime.flow_run.parameters.get(param.value, '')
76
94
  return param.value
77
95
 
78
96
  def prepare_task_inputs(parameters):
@@ -1,4 +1,4 @@
1
- from prefect import flow, get_run_logger
1
+ from prefect import flow, get_run_logger, runtime
2
2
  import asyncio
3
3
  from typing import List
4
4
  import secrets
@@ -20,10 +20,14 @@ def create_flow_based_on_config(yaml_file, run_async=True):
20
20
  async def dynamic_workflow():
21
21
  logger = get_run_logger()
22
22
  tasks = {}
23
+
24
+ for param in workflow_config.parameters:
25
+ runtime.flow_run.parameters[param.name] = param.value
26
+
23
27
  logger.info(f"Starting flow: {flow_name}")
24
28
  for task_data in workflow_config.tasks:
25
29
  task_name = task_data.name
26
- func = helpers.get_task_function(task_data.module, task_data.task)
30
+ func = helpers.get_task_function(task_data.module, task_data.task, task_data.plugin_path)
27
31
  task_inputs = helpers.prepare_task_inputs(task_data.inputs.parameters) if task_data.inputs else {}
28
32
 
29
33
  condition_result = True
@@ -38,17 +42,6 @@ def create_flow_based_on_config(yaml_file, run_async=True):
38
42
  task_future = helpers.submit_task(task_name, task_data, task_inputs, func)
39
43
  tasks[task_data.name] = task_future
40
44
 
41
- # if task_data.dependency:
42
- # task_inputs = helpers.prepare_task_inputs_from_dep(task_data, task_inputs, tasks)
43
-
44
- # if task_data.conditional:
45
- # condition_result = helpers.resolve_conditional(task_data, tasks)
46
- # if condition_result:
47
- # task_future = helpers.submit_task(task_name, task_data, task_inputs, func)
48
- # tasks[task_data.name] = task_future
49
- # else:
50
- # task_future = helpers.submit_task(task_name, task_data, task_inputs, func)
51
- # tasks[task_data.name] = task_future
52
45
 
53
46
  return tasks
54
47
  return dynamic_workflow
@@ -1,23 +1,21 @@
1
1
  [tool.poetry]
2
2
  name = "kubernetes-watch"
3
- version = "0.1.1"
3
+ version = "0.1.3"
4
4
  description = ""
5
5
  authors = ["bmotevalli <b.motevalli@gmail.com>"]
6
- readme = "README.md"
7
6
  packages = [{include = "kube_watch"}]
8
- license = "MIT"
7
+ readme = "README.md"
9
8
 
10
9
  [tool.poetry.dependencies]
11
10
  python = ">=3.10,<4"
12
- prefect = "2.18.0"
13
- boto3 = "1.34.68"
14
- hvac = "2.1.0"
15
- humps = "0.2.2"
16
- requests = "2.31.0"
17
- kubernetes = "29.0.0"
18
- pyyaml = "6.0.1"
19
- gitpython = "3.1.43"
20
- cryptography = "42.0.5"
11
+ prefect = "^2.18.0"
12
+ boto3 = "^1.34.68"
13
+ hvac = "^2.1.0"
14
+ humps = "^0.2.2"
15
+ requests = "^2.32.3"
16
+ kubernetes = "^29.0.0"
17
+ PyYAML = "^6.0.1"
18
+ GitPython = "^3.1.43"
21
19
 
22
20
 
23
21
  [build-system]
@@ -1,26 +0,0 @@
1
- import subprocess
2
- import os
3
- from prefect import get_run_logger
4
- logger = get_run_logger()
5
-
6
- def run_standalone_script(package_name, package_run, package_exec):
7
- script_dir = os.path.dirname(os.path.realpath(__file__))
8
- # script_path = os.path.join(script_dir, package_name.replace('.', os.sep))
9
- target_dir = os.path.join(script_dir, os.pardir, os.pardir, *package_name.split('.'))
10
-
11
- # Change the current working directory to the script directory
12
- full_command = f"{package_run} {os.path.join(target_dir, package_exec)}"
13
-
14
- # Execute the command
15
- try:
16
- result = subprocess.run(full_command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
17
- if result.stdout:
18
- logger.info(result.stdout)
19
- if result.stderr:
20
- logger.error(result.stderr)
21
- # logger.info(f"Output: {result.stdout}")
22
- result.check_returncode()
23
- except subprocess.CalledProcessError as e:
24
- # All logs should have already been handled above, now just raise an exception
25
- logger.error("The subprocess encountered an error: %s", e)
26
- raise Exception("Subprocess failed with exit code {}".format(e.returncode))
@@ -1,24 +0,0 @@
1
- import time
2
- import random
3
-
4
- def generate_number():
5
- return 42
6
-
7
- def print_number(number, dummy_param, env_var_name):
8
- print(f"The generated number is: {number} and the dummy_value is: {dummy_param}")
9
- return number, dummy_param, env_var_name
10
-
11
- def delay(seconds):
12
- time.sleep(seconds)
13
-
14
-
15
- def random_boolean():
16
- return random.choice([True, False])
17
-
18
- def merge_bools(inp_dict):
19
- list_bools = [v for k,v in inp_dict.items()]
20
- return any(list_bools)
21
-
22
- def print_result(task_name, result):
23
- print(f'=========== {task_name} RESULT =================')
24
- print(result)