kubernetes-watch 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kube_watch/__init__.py ADDED
File without changes
File without changes
@@ -0,0 +1,6 @@
1
+ from enum import Enum
2
+
3
+
4
+ class Hosts(str, Enum):
5
+ LOCAL = 'local'
6
+ REMOTE = 'remote'
@@ -0,0 +1,9 @@
1
+ from enum import Enum
2
+
3
+ class Operations(str, Enum):
4
+ OR = 'or'
5
+ AND = 'and'
6
+ SUM = 'sum'
7
+ AVG = 'avg'
8
+ MAX = 'max'
9
+ MIN = 'min'
@@ -0,0 +1,13 @@
1
+ from enum import Enum
2
+
3
+ class Providers(str, Enum):
4
+ AWS = "aws"
5
+ AZURE = "azure"
6
+ GCP = "gcp"
7
+ VAULT = "vault"
8
+
9
+
10
+ class AwsResources(str, Enum):
11
+ ECR = "ecr" # elastic container registry
12
+ S3 = "s3"
13
+ IAM = "iam"
@@ -0,0 +1,17 @@
1
+ from enum import Enum
2
+
3
+ class ParameterType(str, Enum):
4
+ STATIC = 'static'
5
+ FROM_ENV = 'env'
6
+
7
+
8
+ class TaskRunners(str, Enum):
9
+ SEQUENTIAL = 'sequential'
10
+ CONCURRENT = 'concurrent'
11
+ DASK = 'dask'
12
+ RAY = 'ray'
13
+
14
+
15
+ class TaskInputsType(str, Enum):
16
+ ARG = 'arg'
17
+ DICT = 'dict'
File without changes
@@ -0,0 +1,17 @@
1
+ from pydantic import BaseModel, ConfigDict
2
+ from humps.camel import case
3
+
4
+ def to_camel(string):
5
+ if string == "id":
6
+ return "_id"
7
+ if string.startswith("_"): # "_id"
8
+ return string
9
+ return case(string)
10
+
11
+ class CamelModel(BaseModel):
12
+ """
13
+ Replacement for pydanitc BaseModel which simply adds a camel case alias to every field
14
+ NOTE: This has been updated for Pydantic 2 to remove some common encoding helpers
15
+ """
16
+
17
+ model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)
@@ -0,0 +1,55 @@
1
+ from typing import List, Optional, Dict, Any
2
+ from kube_watch.enums.workflow import ParameterType, TaskRunners, TaskInputsType
3
+ from kube_watch.enums.logic import Operations
4
+
5
+ from .common import CamelModel
6
+
7
+ class Parameter(CamelModel):
8
+ name: str
9
+ value: Any
10
+ type: Optional[ParameterType] = ParameterType.STATIC
11
+
12
+ class Artifact(CamelModel):
13
+ path: str
14
+
15
+ class Inputs(CamelModel):
16
+ parameters: Optional[List[Parameter]] = []
17
+ artifacts: Optional[List[Artifact]] = []
18
+
19
+ class Dependency(CamelModel):
20
+ taskName: str
21
+ inputParamName: Optional[str] = None
22
+
23
+ class Condition(CamelModel):
24
+ tasks: List[str]
25
+ operation: Optional[Operations] = Operations.AND
26
+
27
+ class Task(CamelModel):
28
+ module: str
29
+ task: str
30
+ name: str
31
+ inputsArgType: Optional[TaskInputsType] = TaskInputsType.ARG
32
+ inputs: Optional[Inputs] = None
33
+ dependency: Optional[List[Dependency]] = None
34
+ conditional: Optional[Condition] = None
35
+ outputs: Optional[List[str]] = None
36
+
37
+ class WorkflowConfig(CamelModel):
38
+ name: str
39
+ runner: TaskRunners = TaskRunners.CONCURRENT
40
+ tasks: List[Task]
41
+
42
+ class WorkflowOutput(CamelModel):
43
+ flow_run: Any
44
+ config: Any
45
+
46
+ class BatchFlowItem(CamelModel):
47
+ path: str
48
+
49
+ class BatchFlowConfig(CamelModel):
50
+ # Only possible runners are concurrent and sequential
51
+ runner: TaskRunners = TaskRunners.CONCURRENT
52
+ items: List[BatchFlowItem]
53
+
54
+
55
+
File without changes
File without changes
@@ -0,0 +1,186 @@
1
+ from prefect import get_run_logger
2
+ from typing import List
3
+ from kubernetes import config
4
+ from kubernetes import client
5
+ from kubernetes.client.rest import ApiException
6
+ import base64
7
+ import datetime
8
+
9
+ from kube_watch.enums.kube import Hosts
10
+
11
+ logger = get_run_logger()
12
+
13
+
14
+ def setup(host=Hosts.REMOTE, context=None):
15
+ if host == Hosts.LOCAL:
16
+ # Running outside a Kubernetes cluster (e.g., local development)
17
+ config.load_kube_config(context=context) # You can specify the context here if necessary
18
+ else:
19
+ # Running inside a Kubernetes cluster
20
+ config.load_incluster_config()
21
+
22
+
23
+
24
+ def create_or_update_configmap(config_name, namespace, data):
25
+ """
26
+ Create or update a ConfigMap in a specified namespace if the data is different.
27
+
28
+ :param config_name: The name of the ConfigMap.
29
+ :param namespace: The namespace of the ConfigMap.
30
+ :param data: A dictionary containing the data for the ConfigMap.
31
+ :return: True if the ConfigMap was created or updated, False otherwise.
32
+ """
33
+ v1 = client.CoreV1Api()
34
+ configmap_metadata = client.V1ObjectMeta(name=config_name, namespace=namespace)
35
+ configmap = client.V1ConfigMap(api_version="v1", kind="ConfigMap", metadata=configmap_metadata, data=data)
36
+
37
+ try:
38
+ existing_configmap = v1.read_namespaced_config_map(name=config_name, namespace=namespace)
39
+ # Compare the existing ConfigMap's data with the new data
40
+ if existing_configmap.data == data:
41
+ logger.info("No update needed for ConfigMap: {}".format(config_name))
42
+ return False
43
+ else:
44
+ # Data is different, update the ConfigMap
45
+ api_response = v1.replace_namespaced_config_map(name=config_name, namespace=namespace, body=configmap)
46
+ logger.info("ConfigMap updated. Name: {}".format(api_response.metadata.name))
47
+ return True
48
+ except ApiException as e:
49
+ if e.status == 404: # ConfigMap not found, create it
50
+ try:
51
+ api_response = v1.create_namespaced_config_map(namespace=namespace, body=configmap)
52
+ logger.info("ConfigMap created. Name: {}".format(api_response.metadata.name))
53
+ return {'trigger_restart': True}
54
+ except ApiException as e:
55
+ logger.error("Exception when creating ConfigMap: {}".format(e))
56
+ raise ValueError
57
+ else:
58
+ logger.error("Failed to get or create ConfigMap: {}".format(e))
59
+ raise ValueError
60
+
61
+
62
+ def create_or_update_secret(secret_name, namespace, data, secret_type = None):
63
+ """
64
+ Create or update a Secret in a specified namespace if the data is different.
65
+
66
+ :param name: The name of the Secret.
67
+ :param namespace: The namespace of the Secret.
68
+ :param data: A dictionary containing the data for the Secret. Values must be strings (not Base64 encoded).
69
+ :return: True if the secret was created or updated, False otherwise.
70
+ """
71
+ if secret_type == None:
72
+ secret_type = "Opaque"
73
+
74
+ v1 = client.CoreV1Api()
75
+ secret_metadata = client.V1ObjectMeta(name=secret_name, namespace=namespace)
76
+ secret = client.V1Secret(
77
+ api_version="v1",
78
+ kind="Secret",
79
+ metadata=secret_metadata,
80
+ string_data=data,
81
+ type=secret_type
82
+ )
83
+
84
+ try:
85
+ existing_secret = v1.read_namespaced_secret(name=secret_name, namespace=namespace)
86
+ # Encode the new data to compare with the existing Secret
87
+ encoded_data = {k: base64.b64encode(v.encode()).decode() for k, v in data.items()}
88
+
89
+ # Check if the existing secret's data matches the new data
90
+ if existing_secret.data == encoded_data:
91
+ logger.info("No update needed for Secret: {}".format(secret_name))
92
+ return False
93
+ else:
94
+ # Data is different, update the Secret
95
+ api_response = v1.replace_namespaced_secret(name=secret_name, namespace=namespace, body=secret)
96
+ logger.info("Secret updated. Name: {}".format(api_response.metadata.name))
97
+ return True
98
+
99
+ except ApiException as e:
100
+ if e.status == 404: # Secret not found, create it
101
+ try:
102
+ api_response = v1.create_namespaced_secret(namespace=namespace, body=secret)
103
+ logger.info("Secret created. Name: {}".format(api_response.metadata.name))
104
+ return {'trigger_restart': True}
105
+ except ApiException as e:
106
+ logger.error("Exception when creating Secret: {}".format(e))
107
+ raise ValueError
108
+ else:
109
+ logger.error("Failed to get or create Secret: {}".format(e))
110
+ raise ValueError
111
+
112
+
113
+ def get_kubernetes_secret(secret_name, namespace):
114
+ # Assuming that the Kubernetes configuration is already set
115
+ v1 = client.CoreV1Api()
116
+ try:
117
+ secret = v1.read_namespaced_secret(secret_name, namespace)
118
+ # Decoding the base64 encoded data
119
+ decoded_data = {key: base64.b64decode(value).decode('utf-8') for key, value in secret.data.items()}
120
+ return decoded_data
121
+ except ApiException as e:
122
+ logger.error(f"Failed to get secret: {e}")
123
+ return None
124
+
125
+
126
+ def restart_deployment(deployment, namespace):
127
+ """
128
+ Trigger a rollout restart of a deployment in a specified namespace.
129
+
130
+ :param name: The name of the deployment.
131
+ :param namespace: The namespace of the deployment.
132
+ """
133
+
134
+ v1 = client.AppsV1Api()
135
+ body = {
136
+ 'spec': {
137
+ 'template': {
138
+ 'metadata': {
139
+ 'annotations': {
140
+ 'kubectl.kubernetes.io/restartedAt': datetime.datetime.utcnow().isoformat()
141
+ }
142
+ }
143
+ }
144
+ }
145
+ }
146
+ try:
147
+ api_response = v1.patch_namespaced_deployment(name=deployment, namespace=namespace, body=body)
148
+ logger.info(f"Deployment restarted. Name: {api_response.metadata.name}")
149
+ except ApiException as e:
150
+ logger.error(f"Exception when restarting deployment: {e}")
151
+
152
+
153
+ def has_mismatch_image_digest(repo_digest, label_selector, namespace):
154
+ """
155
+ Check all pods in the given namespace and matching the label selector for any
156
+ mismatch between the latest image digest and the current image digest.
157
+
158
+ parameters:
159
+ - namespace: The namespace to search for pods.
160
+ - label_selector: The label selector to identify the relevant pods.
161
+ - repo_digest: The latest image digest to compare against.
162
+
163
+ Returns:
164
+ - True if any pod is found with an image digest mismatch.
165
+ - False if all pods match the latest image digest.
166
+ """
167
+ core_v1_api = client.CoreV1Api()
168
+
169
+ # Fetch pods based on namespace and label selector
170
+ pods = core_v1_api.list_namespaced_pod(namespace, label_selector=label_selector)
171
+
172
+ # Iterate over pods and their containers
173
+ for pod in pods.items:
174
+ for container_status in pod.status.container_statuses:
175
+ current_image_id = container_status.image_id
176
+ # Check for digest mismatch
177
+ if current_image_id.split('@')[-1] != repo_digest:
178
+ logger.info(f"Mismatch found in pod: {pod.metadata.name}, container: {container_status.name}")
179
+ logger.info(f"Repo digest: {repo_digest}")
180
+ logger.info(f"Curr digest: {current_image_id.split('@')[-1]}")
181
+ return True
182
+
183
+ logger.info("Images are in-sync.")
184
+ logger.info(f"Repo digest: {repo_digest}")
185
+ logger.info(f"Curr digest: {current_image_id.split('@')[-1]}")
186
+ return False
@@ -0,0 +1,26 @@
1
+ import subprocess
2
+ import os
3
+ from prefect import get_run_logger
4
+ logger = get_run_logger()
5
+
6
+ def run_standalone_script(package_name, package_run, package_exec):
7
+ script_dir = os.path.dirname(os.path.realpath(__file__))
8
+ # script_path = os.path.join(script_dir, package_name.replace('.', os.sep))
9
+ target_dir = os.path.join(script_dir, os.pardir, os.pardir, *package_name.split('.'))
10
+
11
+ # Change the current working directory to the script directory
12
+ full_command = f"{package_run} {os.path.join(target_dir, package_exec)}"
13
+
14
+ # Execute the command
15
+ try:
16
+ result = subprocess.run(full_command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
17
+ if result.stdout:
18
+ logger.info(result.stdout)
19
+ if result.stderr:
20
+ logger.error(result.stderr)
21
+ # logger.info(f"Output: {result.stdout}")
22
+ result.check_returncode()
23
+ except subprocess.CalledProcessError as e:
24
+ # All logs should have already been handled above, now just raise an exception
25
+ logger.error("The subprocess encountered an error: %s", e)
26
+ raise Exception("Subprocess failed with exit code {}".format(e.returncode))
@@ -0,0 +1,8 @@
1
+
2
+
3
+ def dicts_has_diff(dict_a, dict_b):
4
+ return dict_a != dict_b
5
+
6
+
7
+ def remove_keys(d, keys):
8
+ return {k: v for k, v in d.items() if k not in keys}
@@ -0,0 +1,8 @@
1
+ import os
2
+ from prefect import get_run_logger
3
+ logger = get_run_logger()
4
+
5
+ def load_secrets_to_env(data):
6
+ for key, value in data.items():
7
+ os.environ[key] = value
8
+ # logger.info(f"ENV VAR: {key} loaded")
@@ -0,0 +1,31 @@
1
+ from typing import Any, List, Dict
2
+ from kube_watch.enums.logic import Operations
3
+
4
+
5
+ def merge_logical_outputs(inp_dict: Dict):
6
+ if 'operation' not in inp_dict.keys():
7
+ raise TypeError("Missing required parameters: 'operation'")
8
+ operation = inp_dict.get('operation')
9
+ del inp_dict['operation']
10
+
11
+ inputs = [v for k,v in inp_dict.items()]
12
+ return merge_logical_list(inputs, operation)
13
+
14
+
15
+ def merge_logical_list(inp_list: List, operation: Operations):
16
+ if operation == Operations.OR:
17
+ return any(inp_list)
18
+ if operation == Operations.AND:
19
+ return all(inp_list)
20
+ raise ValueError("Invalid logical operation")
21
+
22
+
23
+ def partial_dict_update(orig_data, new_data):
24
+ """
25
+ This function is used when some key value pairs in orig_data should
26
+ be updated from new_data.
27
+ """
28
+ for k, v in new_data.items():
29
+ orig_data[k] = v
30
+
31
+ return orig_data
@@ -0,0 +1,74 @@
1
+ from datetime import datetime
2
+ from enum import Enum
3
+
4
+ from prefect import get_run_logger
5
+ logger = get_run_logger()
6
+
7
+ class IntervalType(Enum):
8
+ MINUTES = 'minutes'
9
+ HOURLY = 'hourly'
10
+ DAILY = 'daily'
11
+ WEEKLY = 'weekly'
12
+ MONTHLY = 'monthly'
13
+ QUARTERLY = 'quarterly'
14
+ SEMIANNUAL = 'semiannual'
15
+ YEARLY = 'yearly'
16
+
17
+
18
+ def should_run_task(interval_type, interval_value=None, interval_buffer=10, specific_day=None):
19
+ """
20
+ The function `should_run_task` determines whether a task should run based on the specified interval
21
+ type and values.
22
+
23
+ :param interval_type: The `interval_type` parameter specifies the type of interval at which a task
24
+ should run. It can take on the following values:
25
+ :param interval_value: The `interval_value` parameter represents the specific value associated with
26
+ the interval type. For example, if the interval type is `DAILY`, the `interval_value` would
27
+ represent the specific hour at which the task should run daily. Similarly, for `WEEKLY`, it would
28
+ represent the specific day
29
+ :param interval_buffer: The `interval_buffer` parameter in the `should_run_task` function is a
30
+ default value set to 20 minutes. This provides an acceptable range for a task to get executed. Suitable
31
+ for Daily, Weekly, Monthly, etc. schedules.
32
+ :param specific_day: The `specific_day` parameter represents the day of the week when the task
33
+ should run in the case of a weekly interval. The values for `specific_day` are as follows:
34
+ :return: The function `should_run_task` takes in various parameters related to different interval
35
+ types (such as minutes, hourly, daily, weekly, monthly, quarterly, semiannual, yearly) and checks if
36
+ the current datetime matches the specified interval criteria.
37
+ """
38
+
39
+ now = datetime.now()
40
+ # Match the interval type
41
+ if interval_type == IntervalType.MINUTES.value:
42
+ # Runs every 'interval_value' minutes
43
+ return now.minute % interval_value == 0
44
+
45
+ if interval_type == IntervalType.HOURLY.value:
46
+ # Runs every 'interval_value' hours on the hour
47
+ return now.hour % interval_value == 0 and now.minute < interval_buffer
48
+
49
+ if interval_type == IntervalType.DAILY.value:
50
+ # Runs once a day at 'interval_value' hour
51
+ return now.hour == interval_value and now.minute < interval_buffer
52
+
53
+ if interval_type == IntervalType.WEEKLY.value:
54
+ # Runs once a week on 'specific_day' (0=Monday, 6=Sunday)
55
+ return now.weekday() == specific_day and now.hour == 0 and now.minute < interval_buffer
56
+
57
+ if interval_type == IntervalType.MONTHLY.value:
58
+ # Runs on the 'interval_value' day of each month
59
+ return now.day == interval_value and now.hour == 0 and now.minute < interval_buffer
60
+
61
+ if interval_type == IntervalType.QUARTERLY.value:
62
+ # Runs on the first day of each quarter
63
+ return now.month % 3 == 1 and now.day == 1 and now.hour == 0 and now.minute < interval_buffer
64
+
65
+ if interval_type == IntervalType.SEMIANNUAL.value:
66
+ # Runs on the first day of the 1st and 7th month
67
+ return (now.month == 1 or now.month == 7) and now.day == 1 and now.hour == 0 and now.minute < interval_buffer
68
+
69
+ if interval_type == IntervalType.YEARLY.value:
70
+ # Runs on the first day of the year
71
+ return now.month == 1 and now.day == 1 and now.hour == 0 and now.minute < interval_buffer
72
+
73
+ return False
74
+
File without changes
File without changes
@@ -0,0 +1,24 @@
1
+ import time
2
+ import random
3
+
4
+ def generate_number():
5
+ return 42
6
+
7
+ def print_number(number, dummy_param, env_var_name):
8
+ print(f"The generated number is: {number} and the dummy_value is: {dummy_param}")
9
+ return number, dummy_param, env_var_name
10
+
11
+ def delay(seconds):
12
+ time.sleep(seconds)
13
+
14
+
15
+ def random_boolean():
16
+ return random.choice([True, False])
17
+
18
+ def merge_bools(inp_dict):
19
+ list_bools = [v for k,v in inp_dict.items()]
20
+ return any(list_bools)
21
+
22
+ def print_result(task_name, result):
23
+ print(f'=========== {task_name} RESULT =================')
24
+ print(result)
File without changes
@@ -0,0 +1,154 @@
1
+ #========================================================================
2
+ # This class is deprecated. Please refer to aws.py
3
+ #========================================================================
4
+ import boto3
5
+ import base64
6
+ import json
7
+
8
+ from datetime import datetime , timezone, timedelta
9
+ from botocore.exceptions import ClientError
10
+ from prefect import get_run_logger
11
+ from kube_watch.enums.providers import AwsResources
12
+
13
+ logger = get_run_logger()
14
+
15
+ def create_session(aws_creds):
16
+ """Create a boto3 session."""
17
+ session = boto3.Session(
18
+ aws_access_key_id=aws_creds.get('data').get('access_key'),
19
+ aws_secret_access_key=aws_creds.get('data').get('secret_key'),
20
+ aws_session_token=aws_creds.get('data').get('security_token'), # Using .get() for optional fields
21
+ )
22
+ logger.info("Created AWS Session Successfully!")
23
+ return session
24
+
25
+ #========================================================================================
26
+ # ECR
27
+ #========================================================================================
28
+ def _prepare_ecr_secret_data(username, password, auth_token, ecr_url):
29
+ key_url = f"https://{ecr_url}"
30
+ docker_config_dict = {
31
+ "auths": {
32
+ key_url: {
33
+ "username": username,
34
+ "password": password,
35
+ "auth": auth_token,
36
+ }
37
+ }
38
+ }
39
+ return {'.dockerconfigjson': json.dumps(docker_config_dict)}
40
+
41
+ def task_get_access_token(session, resource, region, base_image_url):
42
+
43
+ if resource == AwsResources.ECR:
44
+ ecr_client = session.client('ecr', region_name=region)
45
+ token_response = ecr_client.get_authorization_token()
46
+ decoded_token = base64.b64decode(token_response['authorizationData'][0]['authorizationToken']).decode()
47
+ username, password = decoded_token.split(':')
48
+ return _prepare_ecr_secret_data(username, password, token_response['authorizationData'][0]['authorizationToken'], base_image_url)
49
+ # return {'username': username, 'password': password}
50
+
51
+ raise ValueError('Unknown resource')
52
+
53
+
54
+ def task_get_latest_image_digest(session, resource, region, repository_name, tag):
55
+ """
56
+ Fetches the digest of the latest image from the specified ECR repository.
57
+ """
58
+ if resource == AwsResources.ECR:
59
+ ecr_client = session.client('ecr', region_name=region)
60
+ try:
61
+ response = ecr_client.describe_images(
62
+ repositoryName=repository_name,
63
+ imageIds = [{'imageTag': tag}],
64
+ filter={'tagStatus': 'TAGGED'}
65
+ )
66
+ images = response['imageDetails']
67
+ # Assuming 'latest' tag is used correctly, there should be only one such image
68
+ if images:
69
+ # Extracting the digest of the latest image
70
+ return images[0]['imageDigest']
71
+ except Exception as e:
72
+ logger.error(f"Error fetching latest image digest: {e}")
73
+ return None
74
+
75
+ raise ValueError('Unknown resource')
76
+
77
+ #========================================================================================
78
+ # IAM Cred update
79
+ #========================================================================================
80
+ def task_rotate_iam_creds(session, user_name, old_access_key_id, old_access_key_secret, access_key_id_var_name, access_secret_key_var_name, rotate_interval):
81
+ iam = session.client('iam')
82
+ creation_date = None
83
+
84
+ # Retrieve the specified access key
85
+ has_key_exist = False
86
+ try:
87
+ response = iam.list_access_keys(UserName=user_name)
88
+ for key in response['AccessKeyMetadata']:
89
+ if key['AccessKeyId'] == old_access_key_id:
90
+ creation_date = key['CreateDate']
91
+ has_key_exist = True
92
+ break
93
+ except ClientError as error:
94
+ logger.error(f"Error retrieving key: {error}")
95
+ raise Exception(f"Error retrieving key: {error}")
96
+
97
+ if not has_key_exist:
98
+ logger.error(f"The provided Access Key ID; {old_access_key_id} does not exist.")
99
+ raise KeyError(f"The provided Access Key ID; {old_access_key_id} does not exist.")
100
+
101
+ dd, hh, mm = list(map(lambda x: int(x), rotate_interval.split(":")))
102
+
103
+ curr_date = datetime.now(timezone.utc)
104
+ # Check if the key needs rotation
105
+ if (curr_date.weekday() == 5 and
106
+ curr_date - creation_date > timedelta(days=dd,hours=hh,minutes=mm)):
107
+ logger.info("Key is older than rotation period, rotating now.")
108
+ # Delete the old key
109
+ delete_iam_user_key(session, user_name, old_access_key_id)
110
+
111
+ # Create a new access key
112
+ access_key_id, secret_access_key = create_iam_user_key(session, user_name)
113
+ return {access_key_id_var_name: access_key_id, access_secret_key_var_name: secret_access_key}
114
+
115
+ else:
116
+ logger.info("Key rotation not necessary.")
117
+ return {access_key_id_var_name: old_access_key_id, access_secret_key_var_name: old_access_key_secret}
118
+
119
+
120
+ def create_iam_user_key(session, user_name):
121
+ iam = session.client('iam')
122
+
123
+ # Check if the user exists
124
+ try:
125
+ iam.get_user(UserName=user_name)
126
+ logger.info("User exists, proceeding to create access key.")
127
+ except iam.exceptions.NoSuchEntityException:
128
+ raise Exception(f"User '{user_name}' does not exist, cannot proceed with key creation.")
129
+ except ClientError as error:
130
+ error_code = error.response['Error']['Code']
131
+ if error_code == 'NoSuchEntity':
132
+ raise Exception(f"User '{user_name}' does not exist in AWS IAM.")
133
+ else:
134
+ raise Exception(f"An unexpected error occurred: {error.response['Error']['Message']}")
135
+
136
+ # Create access key for this user
137
+ response = iam.create_access_key(UserName=user_name)
138
+ access_key_id = response['AccessKey']['AccessKeyId']
139
+ secret_access_key = response['AccessKey']['SecretAccessKey']
140
+
141
+ # print(access_key_id)
142
+ # print(secret_access_key)
143
+
144
+ return access_key_id, secret_access_key
145
+
146
+
147
+ def delete_iam_user_key(session, user_name, access_key_id):
148
+ iam = session.client('iam')
149
+ try:
150
+ iam.delete_access_key(UserName=user_name, AccessKeyId=access_key_id)
151
+ logger.info(f"Old key {access_key_id} deleted successfully.")
152
+ except Exception as e:
153
+ raise Exception(f"Failed to delete old key: {e}")
154
+