krkn-lib 5.1.10__py3-none-any.whl → 6.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. krkn_lib/aws_tests/__init__.py +1 -1
  2. krkn_lib/elastic/krkn_elastic.py +3 -1
  3. krkn_lib/k8s/krkn_kubernetes.py +408 -20
  4. krkn_lib/k8s/pod_monitor/__init__.py +1 -2
  5. krkn_lib/k8s/pod_monitor/pod_monitor.py +146 -56
  6. krkn_lib/k8s/templates/snapshot.j2 +10 -0
  7. krkn_lib/models/elastic/models.py +24 -1
  8. krkn_lib/models/k8s/models.py +1 -1
  9. krkn_lib/models/pod_monitor/models.py +2 -2
  10. krkn_lib/models/telemetry/models.py +9 -0
  11. krkn_lib/ocp/krkn_openshift.py +4 -4
  12. krkn_lib/prometheus/krkn_prometheus.py +1 -1
  13. krkn_lib/telemetry/k8s/krkn_telemetry_kubernetes.py +1 -1
  14. krkn_lib/telemetry/ocp/krkn_telemetry_openshift.py +1 -1
  15. krkn_lib/tests/base_test.py +16 -3
  16. krkn_lib/tests/test_krkn_elastic_models.py +23 -4
  17. krkn_lib/tests/test_krkn_kubernetes_check.py +3 -2
  18. krkn_lib/tests/test_krkn_kubernetes_create.py +5 -3
  19. krkn_lib/tests/test_krkn_kubernetes_delete.py +3 -2
  20. krkn_lib/tests/test_krkn_kubernetes_get.py +5 -4
  21. krkn_lib/tests/test_krkn_kubernetes_misc.py +3 -3
  22. krkn_lib/tests/test_krkn_kubernetes_models.py +1 -1
  23. krkn_lib/tests/test_krkn_kubernetes_pods_monitor_models.py +3 -4
  24. krkn_lib/tests/test_krkn_kubernetes_virt.py +735 -0
  25. krkn_lib/tests/test_krkn_openshift.py +571 -48
  26. krkn_lib/tests/test_krkn_telemetry_kubernetes.py +848 -0
  27. krkn_lib/tests/test_safe_logger.py +496 -0
  28. krkn_lib/tests/test_utils.py +4 -5
  29. krkn_lib/utils/functions.py +4 -3
  30. krkn_lib/version/version.py +5 -2
  31. {krkn_lib-5.1.10.dist-info → krkn_lib-6.0.0.dist-info}/METADATA +7 -10
  32. {krkn_lib-5.1.10.dist-info → krkn_lib-6.0.0.dist-info}/RECORD +34 -30
  33. {krkn_lib-5.1.10.dist-info → krkn_lib-6.0.0.dist-info}/WHEEL +1 -1
  34. {krkn_lib-5.1.10.dist-info/licenses → krkn_lib-6.0.0.dist-info}/LICENSE +0 -0
@@ -1,15 +1,19 @@
1
+ import logging
1
2
  import re
3
+ import time
4
+ import traceback
2
5
  from concurrent.futures import Future
3
6
  from concurrent.futures.thread import ThreadPoolExecutor
4
7
  from functools import partial
5
8
 
6
9
  from kubernetes import watch
7
- from kubernetes.client import V1Pod, CoreV1Api
10
+ from kubernetes.client import CoreV1Api, V1Pod
11
+ from urllib3.exceptions import ProtocolError
8
12
 
9
13
  from krkn_lib.models.pod_monitor.models import (
10
- PodsSnapshot,
11
14
  MonitoredPod,
12
15
  PodEvent,
16
+ PodsSnapshot,
13
17
  PodStatus,
14
18
  )
15
19
 
@@ -47,67 +51,153 @@ def _monitor_pods(
47
51
  max_timeout: int,
48
52
  name_pattern: str = None,
49
53
  namespace_pattern: str = None,
54
+ max_retries: int = 3,
50
55
  ) -> PodsSnapshot:
51
- w = watch.Watch(return_type=V1Pod)
56
+ """
57
+ Monitor pods with automatic retry on watch stream disconnection.
58
+
59
+ :param monitor_partial: Partial function for monitoring pods
60
+ :param snapshot: Snapshot to populate with pod events
61
+ :param max_timeout: Maximum time to monitor (seconds)
62
+ :param name_pattern: Regex pattern for pod names
63
+ :param namespace_pattern: Regex pattern for namespaces
64
+ :param max_retries: Maximum number of retries on connection error
65
+ (default: 3)
66
+ :return: PodsSnapshot with collected pod events
67
+ """
68
+
69
+ start_time = time.time()
70
+ retry_count = 0
52
71
  deleted_parent_pods = []
53
72
  restored_pods = []
54
73
  cluster_restored = False
55
- for event in w.stream(monitor_partial, timeout_seconds=max_timeout):
56
- match_name = True
57
- match_namespace = True
58
- event_type = event["type"]
59
- pod = event["object"]
60
74
 
61
- if namespace_pattern:
62
- match = re.match(namespace_pattern, pod.metadata.namespace)
63
- match_namespace = match is not None
64
- if name_pattern:
65
- match = re.match(name_pattern, pod.metadata.name)
66
- match_name = match is not None
67
-
68
- if match_name and match_namespace:
69
- pod_event = PodEvent()
70
- if event_type == "MODIFIED":
71
- if pod.metadata.deletion_timestamp is not None:
72
- pod_event.status = PodStatus.DELETION_SCHEDULED
73
- deleted_parent_pods.append(pod.metadata.name)
74
- elif _is_pod_ready(pod):
75
- pod_event.status = PodStatus.READY
76
- # if there are at least the same number of ready
77
- # pods as the snapshot.initial_pods set we assume that
78
- # the cluster is restored to the initial condition
79
- restored_pods.append(pod.metadata.name)
80
- if len(restored_pods) >= len(snapshot.initial_pods):
81
- cluster_restored = True
82
- else:
83
- pod_event.status = PodStatus.NOT_READY
84
-
85
- elif event_type == "DELETED":
86
- pod_event.status = PodStatus.DELETED
87
- elif event_type == "ADDED":
88
- pod_event.status = PodStatus.ADDED
89
-
90
- if pod_event.status == PodStatus.ADDED:
91
- snapshot.added_pods.append(pod.metadata.name)
92
- # in case a pod is respawn with the same name
93
- # the dictionary must not be reinitialized
94
- if pod.metadata.name not in snapshot.pods:
95
- snapshot.pods[pod.metadata.name] = MonitoredPod()
96
- snapshot.pods[pod.metadata.name].name = pod.metadata.name
97
- snapshot.pods[pod.metadata.name].namespace = (
98
- pod.metadata.namespace
75
+ while retry_count <= max_retries:
76
+ try:
77
+ # Calculate remaining timeout if retrying
78
+ if retry_count > 0:
79
+ elapsed = time.time() - start_time
80
+ remain_timeout = max(1, int(max_timeout - elapsed))
81
+ logging.info("remain timeout " + str(remain_timeout))
82
+ if remain_timeout <= 0:
83
+ logging.info(
84
+ "Maximum timeout reached, stopping monitoring"
99
85
  )
100
- # skips events out of the snapshot
101
- if pod.metadata.name in snapshot.pods:
102
- snapshot.pods[pod.metadata.name].status_changes.append(
103
- pod_event
86
+ break
87
+ logging.info(
88
+ "Reconnecting watch stream"
89
+ f"(attempt {retry_count}/{max_retries}),"
90
+ f"remaining timeout: {remain_timeout}s"
104
91
  )
105
- # this flag is set when all the pods
106
- # that has been deleted or not ready
107
- # have been restored, if True the
108
- # monitoring is stopeed earlier
109
- if cluster_restored:
110
- w.stop()
92
+ else:
93
+ remain_timeout = max_timeout
94
+
95
+ w = watch.Watch(return_type=V1Pod)
96
+
97
+ for e in w.stream(monitor_partial, timeout_seconds=remain_timeout):
98
+ match_name = True
99
+ match_namespace = True
100
+ event_type = e["type"]
101
+ pod = e["object"]
102
+
103
+ if namespace_pattern:
104
+ match = re.match(namespace_pattern, pod.metadata.namespace)
105
+ match_namespace = match is not None
106
+ if name_pattern:
107
+ match = re.match(name_pattern, pod.metadata.name)
108
+ match_name = match is not None
109
+
110
+ if match_name and match_namespace:
111
+ pod_event = PodEvent()
112
+ pod_name = pod.metadata.name
113
+ if event_type == "MODIFIED":
114
+ if pod.metadata.deletion_timestamp is not None:
115
+ pod_event.status = PodStatus.DELETION_SCHEDULED
116
+ if pod_name not in deleted_parent_pods:
117
+ deleted_parent_pods.append(pod_name)
118
+ elif _is_pod_ready(pod):
119
+ pod_event.status = PodStatus.READY
120
+ # if there are at least the same number of ready
121
+ # pods as the snapshot.initial_pods set we assume
122
+ # the cluster is restored to the initial condition
123
+ if pod_name not in restored_pods:
124
+ restored_pods.append(pod_name)
125
+ inital_pod_len = len(snapshot.initial_pods)
126
+ if len(restored_pods) >= inital_pod_len:
127
+ cluster_restored = True
128
+ else:
129
+ pod_event.status = PodStatus.NOT_READY
130
+
131
+ elif event_type == "DELETED":
132
+ pod_event.status = PodStatus.DELETED
133
+ elif event_type == "ADDED":
134
+ pod_event.status = PodStatus.ADDED
135
+
136
+ if pod_event.status == PodStatus.ADDED:
137
+
138
+ if pod_name not in snapshot.added_pods:
139
+ snapshot.added_pods.append(pod_name)
140
+ # in case a pod is respawn with the same name
141
+ # the dictionary must not be reinitialized
142
+ if pod_name not in snapshot.pods:
143
+ snapshot.pods[pod_name] = MonitoredPod()
144
+ snapshot.pods[pod_name].name = pod_name
145
+ snapshot.pods[pod_name].namespace = (
146
+ pod.metadata.namespace
147
+ )
148
+ # skips events out of the snapshot
149
+ if pod_name in snapshot.pods:
150
+ snapshot.pods[pod_name].status_changes.append(
151
+ pod_event
152
+ )
153
+ # this flag is set when all the pods
154
+ # that has been deleted or not ready
155
+ # have been restored, if True the
156
+ # monitoring is stopped earlier
157
+ if cluster_restored:
158
+ logging.info("Cluster restored, stopping monitoring")
159
+ w.stop()
160
+ return snapshot
161
+
162
+ # If we exit the loop normally (timeout reached), we're done
163
+ logging.info("Watch stream completed normally")
164
+ break
165
+
166
+ except ProtocolError as e:
167
+
168
+ if retry_count > max_retries:
169
+ logging.warning(
170
+ f"Watch stream connection broken after {max_retries}"
171
+ f"retries. ProtocolError: {e}. Returning snapshot "
172
+ "with data collected so far."
173
+ )
174
+ break
175
+
176
+ # Log retry attempt
177
+ logging.info(
178
+ f"Watch stream connection broken (ProtocolError): {e}. "
179
+ f"Retry {retry_count}/{max_retries} in progress..."
180
+ )
181
+ backoff_time = 1
182
+
183
+ # Check if we have time for backoff
184
+ elapsed = time.time() - start_time
185
+ if elapsed + backoff_time >= max_timeout:
186
+ logging.info(
187
+ "Not enough time remaining for backoff, "
188
+ "returning snapshot with data collected."
189
+ )
190
+ break
191
+
192
+ logging.debug(f"Waiting {backoff_time}s before retry...")
193
+ time.sleep(backoff_time)
194
+
195
+ except Exception as e:
196
+ logging.error("Error in monitor pods: " + str(e))
197
+ logging.error("Stack trace:\n%s", traceback.format_exc())
198
+ raise Exception(e)
199
+
200
+ retry_count += 1
111
201
 
112
202
  return snapshot
113
203
 
@@ -0,0 +1,10 @@
1
+ apiVersion: snapshot.kubevirt.io/v1beta1
2
+ kind: VirtualMachineSnapshot
3
+ metadata:
4
+ name: {{name}}
5
+ namespace: {{namespace}}
6
+ spec:
7
+ source:
8
+ apiGroup: kubevirt.io
9
+ kind: VirtualMachine
10
+ name: {{vm_name}}
@@ -1,6 +1,7 @@
1
1
  import datetime
2
2
 
3
3
  from elasticsearch_dsl import (
4
+ Boolean,
4
5
  Date,
5
6
  Document,
6
7
  Float,
@@ -9,7 +10,6 @@ from elasticsearch_dsl import (
9
10
  Keyword,
10
11
  Nested,
11
12
  Text,
12
- Boolean,
13
13
  )
14
14
 
15
15
  from krkn_lib.models.telemetry import ChaosRunTelemetry
@@ -144,6 +144,7 @@ class ElasticChaosRunTelemetry(Document):
144
144
  run_uuid = Text(fields={"keyword": Keyword()})
145
145
  health_checks = Nested(ElasticHealthChecks, multi=True)
146
146
  virt_checks = Nested(ElasticVirtChecks, multi=True)
147
+ post_virt_checks = Nested(ElasticVirtChecks, multi=True)
147
148
 
148
149
  class Index:
149
150
  name = "chaos_run_telemetry"
@@ -261,6 +262,28 @@ class ElasticChaosRunTelemetry(Document):
261
262
  else:
262
263
  self.virt_checks = None
263
264
 
265
+ if chaos_run_telemetry.post_virt_checks:
266
+ self.post_virt_checks = [
267
+ ElasticVirtChecks(
268
+ vm_name=post_info.vm_name,
269
+ ip_address=post_info.ip_address,
270
+ new_ip_address=post_info.new_ip_address,
271
+ namespace=post_info.namespace,
272
+ node_name=post_info.node_name,
273
+ status=post_info.status,
274
+ start_timestamp=datetime.datetime.fromisoformat(
275
+ str(post_info.start_timestamp)
276
+ ),
277
+ end_timestamp=datetime.datetime.fromisoformat(
278
+ str(post_info.end_timestamp)
279
+ ),
280
+ duration=post_info.duration,
281
+ )
282
+ for post_info in chaos_run_telemetry.post_virt_checks
283
+ ]
284
+ else:
285
+ self.post_virt_checks = None
286
+
264
287
  self.timestamp = chaos_run_telemetry.timestamp
265
288
  self.total_node_count = chaos_run_telemetry.total_node_count
266
289
  self.cloud_infrastructure = chaos_run_telemetry.cloud_infrastructure
@@ -326,7 +326,7 @@ class AffectedNodeStatus:
326
326
  match_found = []
327
327
  for affected_node in self.affected_nodes:
328
328
  counter2 = counter + 1
329
- for aff_node2 in self.affected_nodes[counter + 1:]: # fmt: skip
329
+ for aff_node2 in self.affected_nodes[counter + 1 :]: # noqa: E203
330
330
  if affected_node.node_name == aff_node2.node_name:
331
331
  match_found.append(counter2)
332
332
  cur_node = self.affected_nodes[counter]
@@ -2,9 +2,9 @@ import json
2
2
  import time
3
3
  from dataclasses import dataclass
4
4
  from enum import Enum
5
- from typing import Optional, Any
5
+ from typing import Any, Optional
6
6
 
7
- from krkn_lib.models.k8s import PodsStatus, AffectedPod
7
+ from krkn_lib.models.k8s import AffectedPod, PodsStatus
8
8
 
9
9
 
10
10
  class PodStatus(Enum):
@@ -545,6 +545,10 @@ class ChaosRunTelemetry:
545
545
  """
546
546
  Virt checks of VMIs
547
547
  """
548
+ post_virt_checks: list[VirtCheck] = None
549
+ """
550
+ Post Scenario Virt checks of VMIs
551
+ """
548
552
  job_status: bool = True
549
553
  """
550
554
  Overall job status, will take all scenario's exit status
@@ -597,6 +601,11 @@ class ChaosRunTelemetry:
597
601
  if json_dict.get("virt_checks")
598
602
  else None
599
603
  )
604
+ self.post_virt_checks = (
605
+ [VirtCheck(k) for k in json_dict.get("post_virt_checks")]
606
+ if json_dict.get("post_virt_checks")
607
+ else None
608
+ )
600
609
  self.job_status = json_dict.get("job_status")
601
610
  self.build_url = json_dict.get("build_url")
602
611
 
@@ -65,7 +65,7 @@ class KrknOpenshift(KrknKubernetes):
65
65
  )
66
66
 
67
67
  path = "/apis/config.openshift.io/v1/infrastructures/cluster"
68
- (data) = api_client.call_api(
68
+ data = api_client.call_api(
69
69
  path,
70
70
  "GET",
71
71
  path_params,
@@ -111,7 +111,7 @@ class KrknOpenshift(KrknKubernetes):
111
111
  )
112
112
 
113
113
  path = "/apis/config.openshift.io/v1/infrastructures/cluster"
114
- (data) = api_client.call_api(
114
+ data = api_client.call_api(
115
115
  path,
116
116
  "GET",
117
117
  path_params,
@@ -147,7 +147,7 @@ class KrknOpenshift(KrknKubernetes):
147
147
  )
148
148
 
149
149
  path = "/apis/config.openshift.io/v1/networks"
150
- (data) = api_client.call_api(
150
+ data = api_client.call_api(
151
151
  path,
152
152
  "GET",
153
153
  path_params,
@@ -440,7 +440,7 @@ class KrknOpenshift(KrknKubernetes):
440
440
  ["application/json"]
441
441
  )
442
442
 
443
- (data) = self.api_client.call_api(
443
+ data = self.api_client.call_api(
444
444
  path,
445
445
  "GET",
446
446
  path_params,
@@ -50,7 +50,7 @@ class KrknPrometheus:
50
50
  query: str,
51
51
  start_time: datetime = None,
52
52
  end_time: datetime = None,
53
- granularity: int = 10
53
+ granularity: int = 10,
54
54
  ) -> list[dict[str:any]]:
55
55
  """
56
56
  Executes a query to the Prometheus API in PromQL languag,
@@ -425,7 +425,7 @@ class KrknTelemetryKubernetes:
425
425
  :param uploaded_file_list: uploaded file list shared between threads
426
426
  :param max_retries: maximum number of retries from config.yaml.
427
427
  If 0 will retry indefinitely.
428
- :param remote_file_prefix: the prefix that will given to the file
428
+ :param remote_file_prefix: the prefix that will be given to the file
429
429
  in the S3 bucket along with the progressive number
430
430
  (if is a multiple file archive)
431
431
  :param remote_file_extension: the extension of the remote
@@ -244,7 +244,7 @@ class KrknTelemetryOpenshift(KrknTelemetryKubernetes):
244
244
  )
245
245
 
246
246
  path = "/apis/kubevirt.io/v1/virtualmachineinstances"
247
- (data) = api_client.call_api(
247
+ data = api_client.call_api(
248
248
  path,
249
249
  "GET",
250
250
  path_params,
@@ -583,7 +583,7 @@ class BaseTest(unittest.TestCase):
583
583
  "start_timestamp": "2025-03-12T14:57:34.555878",
584
584
  "end_timestamp": "2025-03-12T14:57:54.904352",
585
585
  "duration": 20.348474,
586
- "new_ip_address": ""
586
+ "new_ip_address": "",
587
587
  },
588
588
  {
589
589
  "node_name": "h27-r660",
@@ -594,7 +594,7 @@ class BaseTest(unittest.TestCase):
594
594
  "start_timestamp": "2025-03-12T14:57:34.759105",
595
595
  "end_timestamp": "2025-03-12T14:57:54.904352",
596
596
  "duration": 20.145247,
597
- "new_ip_address": ""
597
+ "new_ip_address": "",
598
598
  },
599
599
  {
600
600
  "node_name": "h10-r660",
@@ -605,9 +605,22 @@ class BaseTest(unittest.TestCase):
605
605
  "start_timestamp": "2025-03-12T14:57:35.308957",
606
606
  "end_timestamp": "2025-03-12T14:57:54.904352",
607
607
  "duration": 19.595395,
608
- "new_ip_address": "0.0.0.3"
608
+ "new_ip_address": "0.0.0.3",
609
609
  },
610
610
  ],
611
+ "post_virt_checks": [
612
+ {
613
+ "node_name": "h10-r660",
614
+ "namespace": "benchmark-runner",
615
+ "ip_address": "0.0.0.0",
616
+ "vm_name": "windows-vm-52",
617
+ "status": False,
618
+ "start_timestamp": "2025-03-12T14:57:55.904352",
619
+ "end_timestamp": "2025-03-12T14:57:55.904352",
620
+ "duration": 0.00,
621
+ "new_ip_address": "",
622
+ }
623
+ ],
611
624
  "total_node_count": 3,
612
625
  "cloud_infrastructure": "AWS",
613
626
  "cloud_type": "EC2",
@@ -1,9 +1,9 @@
1
+ import datetime
1
2
  import uuid
2
3
 
3
4
  from krkn_lib.models.elastic.models import ElasticChaosRunTelemetry
4
5
  from krkn_lib.models.telemetry import ChaosRunTelemetry
5
6
  from krkn_lib.tests import BaseTest
6
- import datetime
7
7
 
8
8
 
9
9
  class TestKrknElasticModels(BaseTest):
@@ -222,9 +222,7 @@ class TestKrknElasticModels(BaseTest):
222
222
  self.assertEqual(
223
223
  elastic_telemetry.virt_checks[0].ip_address, "0.0.0.0"
224
224
  )
225
- self.assertEqual(
226
- elastic_telemetry.virt_checks[0].new_ip_address, ""
227
- )
225
+ self.assertEqual(elastic_telemetry.virt_checks[0].new_ip_address, "")
228
226
  self.assertEqual(
229
227
  elastic_telemetry.virt_checks[2].new_ip_address, "0.0.0.3"
230
228
  )
@@ -246,6 +244,27 @@ class TestKrknElasticModels(BaseTest):
246
244
  )
247
245
  self.assertEqual(elastic_telemetry.virt_checks[0].duration, 20.348474)
248
246
 
247
+ # post_virt_checks
248
+ self.assertEqual(len(elastic_telemetry.post_virt_checks), 1)
249
+ self.assertEqual(
250
+ elastic_telemetry.post_virt_checks[0].vm_name, "windows-vm-52"
251
+ )
252
+ self.assertEqual(
253
+ elastic_telemetry.post_virt_checks[0].ip_address, "0.0.0.0"
254
+ )
255
+ self.assertEqual(
256
+ elastic_telemetry.post_virt_checks[0].new_ip_address, ""
257
+ )
258
+
259
+ self.assertEqual(
260
+ elastic_telemetry.post_virt_checks[0].namespace, "benchmark-runner"
261
+ )
262
+ self.assertEqual(
263
+ elastic_telemetry.post_virt_checks[0].node_name, "h10-r660"
264
+ )
265
+
266
+ self.assertEqual(elastic_telemetry.post_virt_checks[0].status, False)
267
+
249
268
  self.assertEqual(elastic_telemetry.total_node_count, 3)
250
269
  self.assertEqual(elastic_telemetry.cloud_infrastructure, "AWS")
251
270
  self.assertEqual(elastic_telemetry.cloud_type, "EC2")
@@ -2,10 +2,11 @@ import logging
2
2
  import time
3
3
  import unittest
4
4
 
5
- from krkn_lib.tests import BaseTest
6
- from krkn_lib.k8s import ApiRequestException
7
5
  from kubernetes.client import ApiException
8
6
 
7
+ from krkn_lib.k8s import ApiRequestException
8
+ from krkn_lib.tests import BaseTest
9
+
9
10
 
10
11
  class KrknKubernetesTestsCheck(BaseTest):
11
12
  def test_check_namespaces(self):
@@ -1,10 +1,12 @@
1
1
  import logging
2
+ import tempfile
2
3
  import unittest
4
+
3
5
  import yaml
4
- import tempfile
5
- from krkn_lib.tests import BaseTest
6
- from kubernetes.client import ApiException
7
6
  from jinja2 import Environment, FileSystemLoader
7
+ from kubernetes.client import ApiException
8
+
9
+ from krkn_lib.tests import BaseTest
8
10
 
9
11
 
10
12
  class KrknKubernetesTestsCreate(BaseTest):
@@ -2,10 +2,11 @@ import logging
2
2
  import time
3
3
  import unittest
4
4
 
5
- from krkn_lib.tests import BaseTest
6
- from krkn_lib.k8s import ApiRequestException
7
5
  from kubernetes.client import ApiException
8
6
 
7
+ from krkn_lib.k8s import ApiRequestException
8
+ from krkn_lib.tests import BaseTest
9
+
9
10
 
10
11
  class KrknKubernetesTestsDelete(BaseTest):
11
12
  def test_delete_namespace(self):
@@ -1,16 +1,17 @@
1
1
  import logging
2
+ import os
2
3
  import random
3
4
  import re
4
5
  import time
5
- import os
6
6
  import unittest
7
7
 
8
- from krkn_lib.models.telemetry import ChaosRunTelemetry
9
- from krkn_lib.tests import BaseTest
10
8
  from kubernetes import config
11
- from krkn_lib.k8s import ApiRequestException, KrknKubernetes
12
9
  from kubernetes.client import ApiException
13
10
 
11
+ from krkn_lib.k8s import ApiRequestException, KrknKubernetes
12
+ from krkn_lib.models.telemetry import ChaosRunTelemetry
13
+ from krkn_lib.tests import BaseTest
14
+
14
15
 
15
16
  class KrknKubernetesTestsGet(BaseTest):
16
17
  def test_get_version(self):
@@ -6,11 +6,11 @@ import time
6
6
  import unittest
7
7
 
8
8
  import yaml
9
+ from kubernetes.client import ApiException
10
+ from tzlocal import get_localzone
9
11
 
10
12
  from krkn_lib.models.krkn import HogConfig, HogType
11
13
  from krkn_lib.tests import BaseTest
12
- from tzlocal import get_localzone
13
- from kubernetes.client import ApiException
14
14
 
15
15
 
16
16
  class KrknKubernetesTestsMisc(BaseTest):
@@ -232,7 +232,7 @@ class KrknKubernetesTestsMisc(BaseTest):
232
232
  ["application/json"]
233
233
  )
234
234
  path = f"/api/v1/nodes/{node_name}/proxy/stats/summary"
235
- (data) = self.lib_k8s.api_client.call_api(
235
+ data = self.lib_k8s.api_client.call_api(
236
236
  path,
237
237
  "GET",
238
238
  path_params,
@@ -1,10 +1,10 @@
1
- from krkn_lib.models.krkn import HogConfig, HogType
2
1
  from krkn_lib.models.k8s import (
3
2
  AffectedNode,
4
3
  AffectedNodeStatus,
5
4
  AffectedPod,
6
5
  PodsStatus,
7
6
  )
7
+ from krkn_lib.models.krkn import HogConfig, HogType
8
8
  from krkn_lib.tests import BaseTest
9
9
 
10
10
 
@@ -1,13 +1,12 @@
1
1
  import time
2
2
  import unittest
3
3
 
4
-
5
- from krkn_lib.models.k8s import PodsStatus, AffectedPod
4
+ from krkn_lib.models.k8s import AffectedPod, PodsStatus
6
5
  from krkn_lib.models.pod_monitor.models import (
7
- PodEvent,
8
- PodStatus,
9
6
  MonitoredPod,
7
+ PodEvent,
10
8
  PodsSnapshot,
9
+ PodStatus,
11
10
  )
12
11
 
13
12