krkn-lib 5.1.4__py3-none-any.whl → 5.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,202 +0,0 @@
1
- from concurrent.futures import ThreadPoolExecutor, wait
2
- from multiprocessing import Event
3
-
4
- from krkn_lib.k8s import KrknKubernetes
5
- from krkn_lib.models.k8s import PodsMonitorThread, PodsStatus
6
-
7
-
8
- class PodsMonitorPool:
9
- """
10
- This class has the purpose to manage pools of pod
11
- status monitoring threads for the Krkn Scenarios
12
- having multiple killing sessions at the same time (eg. Plugin scenarios)
13
- the methods reflects the behaviour of the underlying
14
- KrknKubernetes Primitives but each call to select_and_monitor_*
15
- method pushes a new thread that is managed by the pool.
16
- The join method joins all the threads in the pool simultaneously
17
- and merges the results on a single PodsStatus structure.
18
- """
19
-
20
- events: list[Event]
21
-
22
- def __init__(self, krkn_lib: KrknKubernetes):
23
- self.krkn_lib = krkn_lib
24
- self.pods_monitor_threads: list[PodsMonitorThread] = []
25
- self.pods_statuses = []
26
- self.events: list[Event] = []
27
-
28
- def select_and_monitor_by_label(
29
- self, label_selector: str, field_selector: str, max_timeout: int
30
- ):
31
- """
32
- Pushes into the pool a monitoring thread for all the pods identified
33
- by a label selector and collects infos about the
34
- pods recovery after a kill scenario while the scenario is running.
35
-
36
- :param label_selector: the label selector used
37
- to filter the pods to monitor (must be the
38
- same used in `select_pods_by_label`)
39
- :param max_timeout: the expected time the pods should take
40
- to recover. If the killed pods are replaced in this time frame,
41
- but they didn't reach the Ready State, they will be marked as
42
- unrecovered. If during the time frame the pods are not replaced
43
- at all the error field of the PodsStatus structure will be
44
- valorized with an exception.
45
-
46
- """
47
- event = Event()
48
- self.events.append(event)
49
- pods_and_namespaces = self.krkn_lib.select_pods_by_label(
50
- label_selector=label_selector, field_selector=field_selector
51
- )
52
- pod_monitor_thread = self.krkn_lib.monitor_pods_by_label(
53
- label_selector=label_selector,
54
- pods_and_namespaces=pods_and_namespaces,
55
- field_selector=field_selector,
56
- max_timeout=max_timeout,
57
- event=event,
58
- )
59
- self.pods_monitor_threads.append(pod_monitor_thread)
60
-
61
- def select_and_monitor_by_name_pattern_and_namespace_pattern(
62
- self,
63
- pod_name_pattern: str,
64
- namespace_pattern: str,
65
- field_selector: str,
66
- max_timeout: int,
67
- ):
68
- """
69
- Pushes into the pool a monitoring thread for all the pods identified
70
- by a pod name regex pattern
71
- and a namespace regex pattern, that collects infos about the
72
- pods recovery after a kill scenario while the scenario is running.
73
-
74
- :param pod_name_pattern: a regex representing the
75
- pod name pattern used to filter the pods to be monitored
76
- (must be the same used in
77
- `select_pods_by_name_pattern_and_namespace_pattern`)
78
- :param namespace_pattern: a regex representing the namespace
79
- pattern used to filter the pods to be monitored
80
- (must be the same used in
81
- `select_pods_by_name_pattern_and_namespace_pattern`)
82
- :param field_selector: Pod field selector
83
- :param max_timeout: the expected time the pods should take to
84
- recover. If the killed pods are replaced in this time frame,
85
- but they didn't reach the Ready State, they will be marked as
86
- unrecovered. If during the time frame the pods are not replaced
87
- at all the error field of the PodsStatus structure will be
88
- valorized with an exception.
89
-
90
- """
91
-
92
- event = Event()
93
- self.events.append(event)
94
-
95
- pods_and_namespaces = (
96
- self.krkn_lib.select_pods_by_name_pattern_and_namespace_pattern(
97
- pod_name_pattern=pod_name_pattern,
98
- namespace_pattern=namespace_pattern,
99
- field_selector=field_selector,
100
- )
101
- )
102
-
103
- pods_monitor_thread = (
104
- self.krkn_lib.monitor_pods_by_name_pattern_and_namespace_pattern(
105
- pod_name_pattern=pod_name_pattern,
106
- namespace_pattern=namespace_pattern,
107
- pods_and_namespaces=pods_and_namespaces,
108
- field_selector=field_selector,
109
- max_timeout=max_timeout,
110
- event=event,
111
- )
112
- )
113
-
114
- self.pods_monitor_threads.append(pods_monitor_thread)
115
-
116
- def select_and_monitor_by_namespace_pattern_and_label(
117
- self,
118
- namespace_pattern: str,
119
- label_selector: str,
120
- field_selector: str = None,
121
- max_timeout=30,
122
- ):
123
- """
124
- Pushes into the pool a monitoring thread for all the pods identified
125
- by a namespace regex pattern
126
- and a pod label selector, that collects infos about the
127
- pods recovery after a kill scenario while the scenario is running.
128
-
129
- :param label_selector: the label selector used to filter
130
- the pods to monitor (must be the same used in
131
- `select_pods_by_label`)
132
- :param namespace_pattern: a regex representing the namespace
133
- pattern used to filter the pods to be monitored (must be
134
- the same used
135
- in `select_pods_by_name_pattern_and_namespace_pattern`)
136
- :param max_timeout: the expected time the pods should take to recover.
137
- If the killed pods are replaced in this time frame, but they
138
- didn't reach the Ready State, they will be marked as unrecovered.
139
- If during the time frame the pods are not replaced
140
- at all the error field of the PodsStatus structure will be
141
- valorized with an exception.
142
-
143
- """
144
- event = Event()
145
- self.events.append(event)
146
- pods_and_namespaces = (
147
- self.krkn_lib.select_pods_by_namespace_pattern_and_label(
148
- namespace_pattern=namespace_pattern,
149
- label_selector=label_selector,
150
- field_selector=field_selector,
151
- )
152
- )
153
-
154
- pod_monitor_thread = (
155
- self.krkn_lib.monitor_pods_by_namespace_pattern_and_label(
156
- namespace_pattern=namespace_pattern,
157
- label_selector=label_selector,
158
- pods_and_namespaces=pods_and_namespaces,
159
- field_selector=field_selector,
160
- max_timeout=max_timeout,
161
- event=event,
162
- )
163
- )
164
- self.pods_monitor_threads.append(pod_monitor_thread)
165
-
166
- def cancel(self):
167
- """
168
- cancels all the threads in the pool and makes return
169
- the join() call immediately
170
- """
171
- for event in self.events:
172
- event.set()
173
-
174
- def join(self) -> PodsStatus:
175
- """
176
- waits all the threads pushed into the pool to finish
177
-
178
- :return: a PodsStatus structure that is the merge between
179
- all the PodsStatus structures returned by every thread
180
- pushed into the pool.
181
- """
182
- futures = []
183
- pods_statuses: list[PodsStatus] = []
184
- final_status = PodsStatus()
185
- with ThreadPoolExecutor() as executor:
186
- for thread in self.pods_monitor_threads:
187
- futures.append(executor.submit(thread.join))
188
- done, _ = wait(futures)
189
-
190
- for future in done:
191
- pods_statuses.append(future.result())
192
-
193
- exceptions = [status.error for status in pods_statuses if status.error]
194
- if len(exceptions) > 0:
195
- merged_exception_message = ", ".join([str(e) for e in exceptions])
196
- final_status.error = merged_exception_message
197
- return final_status
198
-
199
- for pod_status in pods_statuses:
200
- final_status.merge(pod_status)
201
-
202
- return final_status
@@ -1,367 +0,0 @@
1
- import logging
2
- import time
3
- import unittest
4
-
5
- from krkn_lib.tests import BaseTest
6
-
7
-
8
- class KrknKubernetesTestsCreate(BaseTest):
9
- def test_monitor_pods_by_label_no_pods_affected(self):
10
- # test no pods affected
11
- namespace = "test-ns-0-" + self.get_random_string(10)
12
- delayed_1 = "delayed-0-" + self.get_random_string(10)
13
- delayed_2 = "delayed-0-" + self.get_random_string(10)
14
- label = "readiness-" + self.get_random_string(5)
15
- self.deploy_namespace(namespace, [])
16
- self.deploy_delayed_readiness_pod(delayed_1, namespace, 0, label)
17
- self.deploy_delayed_readiness_pod(delayed_2, namespace, 0, label)
18
-
19
- self.wait_pod(delayed_1, namespace)
20
- self.wait_pod(delayed_2, namespace)
21
-
22
- monitor_timeout = 2
23
- pods_and_namespaces = self.lib_k8s.select_pods_by_label(
24
- f"test={label}"
25
- )
26
- start_time = time.time()
27
- pods_thread = self.lib_k8s.monitor_pods_by_label(
28
- f"test={label}",
29
- pods_and_namespaces,
30
- field_selector="status.phase=Running",
31
- max_timeout=monitor_timeout,
32
- )
33
-
34
- result = pods_thread.join()
35
- end_time = time.time() - start_time
36
- self.background_delete_pod(delayed_1, namespace)
37
- self.background_delete_pod(delayed_2, namespace)
38
- # added half second of delay that might be introduced to API
39
- # calls
40
- self.assertTrue(monitor_timeout < end_time < monitor_timeout + 0.5)
41
- self.assertIsNone(result.error)
42
- self.assertEqual(len(result.recovered), 0)
43
- self.assertEqual(len(result.unrecovered), 0)
44
- self.background_delete_ns(namespace)
45
-
46
- def test_pods_by_name_and_namespace_pattern_different_names_respawn(
47
- self,
48
- ):
49
- # test pod with different name recovered
50
- namespace = "test-ns-1-" + self.get_random_string(10)
51
- delayed_1 = "delayed-1-" + self.get_random_string(10)
52
- delayed_2 = "delayed-1-" + self.get_random_string(10)
53
- delayed_respawn = "delayed-1-respawn-" + self.get_random_string(10)
54
- label = "readiness-" + self.get_random_string(5)
55
- pod_delay = 1
56
- monitor_timeout = 10
57
- self.deploy_namespace(namespace, [])
58
- self.deploy_delayed_readiness_pod(delayed_1, namespace, 0, label)
59
- self.deploy_delayed_readiness_pod(delayed_2, namespace, 0, label)
60
- self.wait_pod(delayed_1, namespace)
61
- self.wait_pod(delayed_2, namespace)
62
- pods_and_namespaces = (
63
- self.lib_k8s.select_pods_by_name_pattern_and_namespace_pattern(
64
- "^delayed-1-.*",
65
- "^test-ns-1-.*",
66
- field_selector="status.phase=Running",
67
- )
68
- )
69
-
70
- pods_thread = (
71
- self.lib_k8s.monitor_pods_by_name_pattern_and_namespace_pattern(
72
- "^delayed-1-.*",
73
- "^test-ns-1-.*",
74
- pods_and_namespaces,
75
- field_selector="status.phase=Running",
76
- max_timeout=monitor_timeout,
77
- )
78
- )
79
-
80
- self.background_delete_pod(delayed_1, namespace)
81
- self.deploy_delayed_readiness_pod(
82
- delayed_respawn, namespace, pod_delay, label
83
- )
84
- self.wait_pod(delayed_1, namespace)
85
- self.wait_pod(delayed_respawn, namespace)
86
- result = pods_thread.join()
87
- self.assertIsNone(result.error)
88
- self.assertEqual(len(result.recovered), 1)
89
- self.assertEqual(result.recovered[0].pod_name, delayed_respawn)
90
- self.assertEqual(result.recovered[0].namespace, namespace)
91
- self.assertTrue(result.recovered[0].pod_readiness_time > 0)
92
- self.assertTrue(result.recovered[0].pod_rescheduling_time > 0)
93
- self.assertTrue(result.recovered[0].total_recovery_time >= pod_delay)
94
- self.assertEqual(len(result.unrecovered), 0)
95
- self.background_delete_ns(namespace)
96
-
97
- def test_pods_by_namespace_pattern_and_label_same_name_respawn(
98
- self,
99
- ):
100
- # test pod with same name recovered
101
- namespace = "test-ns-2-" + self.get_random_string(10)
102
- delayed_1 = "delayed-2-1-" + self.get_random_string(10)
103
- delayed_2 = "delayed-2-2-" + self.get_random_string(10)
104
- label = "readiness-" + self.get_random_string(5)
105
- self.deploy_namespace(namespace, [])
106
- self.deploy_delayed_readiness_pod(delayed_1, namespace, 0, label)
107
- self.deploy_delayed_readiness_pod(delayed_2, namespace, 0, label)
108
- self.wait_pod(delayed_1, namespace)
109
- self.wait_pod(delayed_2, namespace)
110
- monitor_timeout = 45
111
- pod_delay = 0
112
- pods_and_namespaces = (
113
- self.lib_k8s.select_pods_by_namespace_pattern_and_label(
114
- "^test-ns-2-.*",
115
- f"test={label}",
116
- field_selector="status.phase=Running",
117
- )
118
- )
119
- pods_thread = self.lib_k8s.monitor_pods_by_namespace_pattern_and_label(
120
- "^test-ns-2-.*",
121
- f"test={label}",
122
- pods_and_namespaces,
123
- field_selector="status.phase=Running",
124
- max_timeout=monitor_timeout,
125
- )
126
-
127
- self.lib_k8s.delete_pod(delayed_1, namespace)
128
- time.sleep(3)
129
- self.deploy_delayed_readiness_pod(
130
- delayed_1, namespace, pod_delay, label
131
- )
132
- self.wait_pod(delayed_1, namespace)
133
- result = pods_thread.join()
134
- self.assertIsNone(result.error)
135
- self.assertEqual(len(result.recovered), 1)
136
- self.assertEqual(result.recovered[0].pod_name, delayed_1)
137
- self.assertEqual(result.recovered[0].namespace, namespace)
138
- self.assertTrue(result.recovered[0].pod_readiness_time > 0)
139
- self.assertTrue(result.recovered[0].pod_rescheduling_time > 0)
140
- self.assertTrue(result.recovered[0].total_recovery_time >= pod_delay)
141
- self.assertEqual(len(result.unrecovered), 0)
142
- self.background_delete_ns(namespace)
143
-
144
- def test_pods_by_label_respawn_timeout(self):
145
- # test pod will not recover before the timeout
146
- namespace = "test-ns-3-" + self.get_random_string(10)
147
- delayed_1 = "delayed-3-" + self.get_random_string(10)
148
- delayed_2 = "delayed-3-" + self.get_random_string(10)
149
- delayed_respawn = "delayed-respawn-3-" + self.get_random_string(10)
150
- label = "readiness-" + self.get_random_string(5)
151
-
152
- self.deploy_namespace(namespace, [])
153
- self.deploy_delayed_readiness_pod(delayed_1, namespace, 0, label)
154
- self.deploy_delayed_readiness_pod(delayed_2, namespace, 0, label)
155
- self.wait_pod(delayed_1, namespace)
156
- self.wait_pod(delayed_2, namespace)
157
- monitor_timeout = 20
158
- pod_delay = 30
159
- # pod with same name recovered
160
-
161
- pods_and_namespaces = self.lib_k8s.select_pods_by_label(
162
- f"test={label}", field_selector="status.phase=Running"
163
- )
164
- pods_thread = self.lib_k8s.monitor_pods_by_label(
165
- f"test={label}",
166
- pods_and_namespaces,
167
- field_selector="status.phase=Running",
168
- max_timeout=monitor_timeout,
169
- )
170
-
171
- self.background_delete_pod(delayed_1, namespace)
172
- self.deploy_delayed_readiness_pod(
173
- delayed_respawn, namespace, pod_delay, label
174
- )
175
- time.sleep(3)
176
- result = pods_thread.join()
177
- self.assertIsNone(result.error)
178
- self.assertEqual(len(result.unrecovered), 1)
179
- self.assertEqual(result.unrecovered[0].pod_name, delayed_respawn)
180
- self.assertEqual(result.unrecovered[0].namespace, namespace)
181
- self.assertEqual(len(result.recovered), 0)
182
- self.background_delete_ns(namespace)
183
-
184
- def test_pods_by_label_never_respawn(self):
185
- # test pod will never recover
186
- namespace = "test-ns-4-" + self.get_random_string(10)
187
- delayed_1 = "delayed-4-" + self.get_random_string(10)
188
- delayed_2 = "delayed-4-" + self.get_random_string(10)
189
- label = "readiness-" + self.get_random_string(5)
190
- self.deploy_namespace(namespace, [])
191
- self.deploy_delayed_readiness_pod(delayed_1, namespace, 0, label)
192
- self.deploy_delayed_readiness_pod(delayed_2, namespace, 0, label)
193
- self.wait_pod(delayed_1, namespace)
194
- self.wait_pod(delayed_2, namespace)
195
-
196
- monitor_timeout = 15
197
-
198
- pods_and_namespaces = self.lib_k8s.select_pods_by_label(
199
- f"test={label}", field_selector="status.phase=Running"
200
- )
201
- pods_thread = self.lib_k8s.monitor_pods_by_label(
202
- f"test={label}",
203
- pods_and_namespaces,
204
- field_selector="status.phase=Running",
205
- max_timeout=monitor_timeout,
206
- )
207
- self.background_delete_pod(delayed_1, namespace)
208
- time.sleep(3)
209
- result = pods_thread.join()
210
- self.assertIsNone(result.error)
211
- self.assertEqual(len(result.unrecovered), 0)
212
- self.assertEqual(len(result.recovered), 0)
213
- self.background_delete_ns(namespace)
214
-
215
- def test_flaky_tests(self):
216
- logging.warn("test_pods_by_label_multiple_respawn")
217
- logging.warn("test_pods_by_label_multiple_respawn_one_too_late")
218
- logging.warn("FLAKY TESTS NEED TO BE REFACTORED AND REENABLED")
219
-
220
- ######## FLAKY TEST NEEDS TO BE REFACTORED # NOQA
221
- # def test_pods_by_label_multiple_respawn(self):
222
- # # test pod will never recover
223
- # namespace = "test-ns-4-" + self.get_random_string(10)
224
- # delayed_1 = "delayed-4-" + self.get_random_string(10)
225
- # delayed_2 = "delayed-4-" + self.get_random_string(10)
226
- # delayed_3 = "delayed-4-" + self.get_random_string(10)
227
- # delayed_respawn_1 = "delayed-4-respawn-" + self.get_random_string(10)
228
- # delayed_respawn_2 = "delayed-4-respawn-" + self.get_random_string(10)
229
- # label = "readiness-" + self.get_random_string(5)
230
- # self.deploy_namespace(namespace, [])
231
- # self.deploy_delayed_readiness_pod(delayed_1, namespace, 0, label)
232
- # self.deploy_delayed_readiness_pod(delayed_2, namespace, 0, label)
233
- # self.deploy_delayed_readiness_pod(delayed_3, namespace, 0, label)
234
- # self.wait_pod(delayed_1, namespace)
235
- # self.wait_pod(delayed_2, namespace)
236
- # self.wait_pod(delayed_3, namespace)
237
- # monitor_timeout = 20
238
- # pod_delay = 2
239
- # pods_and_namespaces = self.lib_k8s.select_pods_by_label(
240
- # f"test={label}", field_selector="status.phase=Running"
241
- # )
242
- # pods_thread = self.lib_k8s.monitor_pods_by_label(
243
- # f"test={label}",
244
- # pods_and_namespaces,
245
- # field_selector="status.phase=Running",
246
- # max_timeout=monitor_timeout,
247
- # )
248
- #
249
- # self.background_delete_pod(delayed_1, namespace)
250
- # self.background_delete_pod(delayed_2, namespace)
251
- #
252
- # self.deploy_delayed_readiness_pod(
253
- # delayed_respawn_1, namespace, pod_delay, label
254
- # )
255
- # self.deploy_delayed_readiness_pod(
256
- # delayed_respawn_2, namespace, pod_delay, label
257
- # )
258
- # self.wait_pod(delayed_respawn_1, namespace)
259
- # self.wait_pod(delayed_respawn_2, namespace)
260
- # result = pods_thread.join()
261
- # self.background_delete_pod(delayed_3, namespace)
262
- # self.background_delete_pod(delayed_respawn_1, namespace)
263
- # self.background_delete_pod(delayed_respawn_2, namespace)
264
- # self.assertIsNone(result.error)
265
- # self.assertEqual(len(result.unrecovered), 0)
266
- # self.assertEqual(len(result.recovered), 2)
267
- # self.assertTrue(
268
- # delayed_respawn_1 in [p.pod_name for p in result.recovered]
269
- # )
270
- # self.assertTrue(
271
- # delayed_respawn_2 in [p.pod_name for p in result.recovered]
272
- # )
273
- # self.background_delete_ns(namespace)
274
-
275
- ######## FLAKY TEST NEEDS TO BE REFACTORED # NOQA
276
- # def test_pods_by_label_multiple_respawn_one_too_late(self):
277
- # # test pod will never recover
278
- # namespace = "test-ns-4-" + self.get_random_string(10)
279
- # delayed_1 = "delayed-4-" + self.get_random_string(10)
280
- # delayed_2 = "delayed-4-" + self.get_random_string(10)
281
- # delayed_3 = "delayed-4-" + self.get_random_string(10)
282
- # delayed_respawn_1 = "delayed-4-respawn-" + self.get_random_string(10)
283
- # delayed_respawn_2 = "delayed-4-respawn-" + self.get_random_string(10)
284
- # label = "readiness-" + self.get_random_string(5)
285
- # self.deploy_namespace(namespace, [])
286
- # self.deploy_delayed_readiness_pod(delayed_1, namespace, 0, label)
287
- # self.deploy_delayed_readiness_pod(delayed_2, namespace, 0, label)
288
- # self.deploy_delayed_readiness_pod(delayed_3, namespace, 0, label)
289
- # self.wait_pod(delayed_1, namespace)
290
- # self.wait_pod(delayed_2, namespace)
291
- # self.wait_pod(delayed_3, namespace)
292
- # monitor_timeout = 20
293
- # pod_delay = 2
294
- # pod_too_much_delay = 25
295
- # pods_and_namespaces = self.lib_k8s.select_pods_by_label(
296
- # f"test={label}", field_selector="status.phase=Running"
297
- # )
298
- # pods_thread = self.lib_k8s.monitor_pods_by_label(
299
- # f"test={label}",
300
- # pods_and_namespaces,
301
- # field_selector="status.phase=Running",
302
- # max_timeout=monitor_timeout,
303
- # )
304
- # self.background_delete_pod(delayed_1, namespace)
305
- # self.background_delete_pod(delayed_2, namespace)
306
- #
307
- # self.deploy_delayed_readiness_pod(
308
- # delayed_respawn_1, namespace, pod_delay, label
309
- # )
310
- # self.deploy_delayed_readiness_pod(
311
- # delayed_respawn_2, namespace, pod_too_much_delay, label
312
- # )
313
- # self.wait_pod(delayed_respawn_1, namespace)
314
- # result = pods_thread.join()
315
- # self.assertIsNone(result.error)
316
- # self.assertEqual(len(result.unrecovered), 1)
317
- # self.assertEqual(len(result.recovered), 1)
318
- # self.assertTrue(
319
- # delayed_respawn_1 in [p.pod_name for p in result.recovered]
320
- # )
321
- # self.assertTrue(
322
- # delayed_respawn_2 in [p.pod_name for p in result.unrecovered]
323
- # )
324
- # self.background_delete_ns(namespace)
325
-
326
- def test_pods_by_label_multiple_respawn_one_fails(self):
327
- # test pod will never recover
328
- namespace = "test-ns-4-" + self.get_random_string(10)
329
- delayed_1 = "delayed-4-" + self.get_random_string(10)
330
- delayed_2 = "delayed-4-" + self.get_random_string(10)
331
- delayed_3 = "delayed-4-" + self.get_random_string(10)
332
- delayed_respawn_1 = "delayed-4-respawn-" + self.get_random_string(10)
333
- label = "readiness-" + self.get_random_string(5)
334
- self.deploy_namespace(namespace, [])
335
- self.deploy_delayed_readiness_pod(delayed_1, namespace, 0, label)
336
- self.deploy_delayed_readiness_pod(delayed_2, namespace, 0, label)
337
- self.deploy_delayed_readiness_pod(delayed_3, namespace, 0, label)
338
- self.wait_pod(delayed_1, namespace)
339
- self.wait_pod(delayed_2, namespace)
340
- self.wait_pod(delayed_3, namespace)
341
-
342
- monitor_timeout = 10
343
- pod_delay = 1
344
- pods_and_namespaces = self.lib_k8s.select_pods_by_label(
345
- f"test={label}", field_selector="status.phase=Running"
346
- )
347
- pods_thread = self.lib_k8s.monitor_pods_by_label(
348
- f"test={label}",
349
- pods_and_namespaces,
350
- field_selector="status.phase=Running",
351
- max_timeout=monitor_timeout,
352
- )
353
-
354
- self.background_delete_pod(delayed_1, namespace)
355
- self.background_delete_pod(delayed_2, namespace)
356
- time.sleep(3)
357
- self.deploy_delayed_readiness_pod(
358
- delayed_respawn_1, namespace, pod_delay, label
359
- )
360
- result = pods_thread.join()
361
- self.assertEqual(len(result.unrecovered), 0)
362
- self.assertEqual(len(result.recovered), 1)
363
- self.background_delete_ns(namespace)
364
-
365
-
366
- if __name__ == "__main__":
367
- unittest.main()