pyxecm 1.4__py3-none-any.whl → 1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyxecm might be problematic. Click here for more details.
- pyxecm/__init__.py +5 -0
- pyxecm/avts.py +1065 -0
- pyxecm/coreshare.py +2532 -0
- pyxecm/customizer/__init__.py +4 -0
- pyxecm/customizer/browser_automation.py +164 -54
- pyxecm/customizer/customizer.py +588 -231
- pyxecm/customizer/k8s.py +143 -29
- pyxecm/customizer/m365.py +1434 -1323
- pyxecm/customizer/payload.py +15073 -5933
- pyxecm/customizer/pht.py +926 -0
- pyxecm/customizer/salesforce.py +866 -351
- pyxecm/customizer/sap.py +4 -4
- pyxecm/customizer/servicenow.py +1467 -0
- pyxecm/customizer/successfactors.py +1056 -0
- pyxecm/helper/__init__.py +2 -0
- pyxecm/helper/assoc.py +44 -1
- pyxecm/helper/data.py +1731 -0
- pyxecm/helper/web.py +170 -46
- pyxecm/helper/xml.py +170 -34
- pyxecm/otac.py +309 -23
- pyxecm/otawp.py +1810 -0
- pyxecm/otcs.py +5308 -2985
- pyxecm/otds.py +1909 -1954
- pyxecm/otmm.py +928 -0
- pyxecm/otpd.py +13 -10
- {pyxecm-1.4.dist-info → pyxecm-1.6.dist-info}/METADATA +5 -1
- pyxecm-1.6.dist-info/RECORD +32 -0
- {pyxecm-1.4.dist-info → pyxecm-1.6.dist-info}/WHEEL +1 -1
- pyxecm-1.4.dist-info/RECORD +0 -24
- {pyxecm-1.4.dist-info → pyxecm-1.6.dist-info}/LICENSE +0 -0
- {pyxecm-1.4.dist-info → pyxecm-1.6.dist-info}/top_level.txt +0 -0
pyxecm/customizer/k8s.py
CHANGED
|
@@ -154,7 +154,7 @@ class K8s:
|
|
|
154
154
|
)
|
|
155
155
|
except ApiException as exception:
|
|
156
156
|
logger.error(
|
|
157
|
-
"Failed to get Pod -> %s; error -> %s", pod_name, str(exception)
|
|
157
|
+
"Failed to get Pod -> '%s'; error -> %s", pod_name, str(exception)
|
|
158
158
|
)
|
|
159
159
|
return None
|
|
160
160
|
|
|
@@ -189,7 +189,7 @@ class K8s:
|
|
|
189
189
|
)
|
|
190
190
|
except ApiException as exception:
|
|
191
191
|
logger.error(
|
|
192
|
-
"Failed to list Pods with field_selector -> %s and label_selector -> %s; error -> %s",
|
|
192
|
+
"Failed to list Pods with field_selector -> '%s' and label_selector -> '%s'; error -> %s",
|
|
193
193
|
field_selector,
|
|
194
194
|
label_selector,
|
|
195
195
|
str(exception),
|
|
@@ -223,13 +223,13 @@ class K8s:
|
|
|
223
223
|
for cond in pod_status.status.conditions:
|
|
224
224
|
if cond.type == condition_name and cond.status == "True":
|
|
225
225
|
logger.info(
|
|
226
|
-
"Pod -> %s is in state -> %s!", pod_name, condition_name
|
|
226
|
+
"Pod -> '%s' is in state -> '%s'!", pod_name, condition_name
|
|
227
227
|
)
|
|
228
228
|
ready = True
|
|
229
229
|
break
|
|
230
230
|
else:
|
|
231
231
|
logger.info(
|
|
232
|
-
"Pod -> %s is not yet in state -> %s. Waiting...",
|
|
232
|
+
"Pod -> '%s' is not yet in state -> '%s'. Waiting...",
|
|
233
233
|
pod_name,
|
|
234
234
|
condition_name,
|
|
235
235
|
)
|
|
@@ -238,7 +238,7 @@ class K8s:
|
|
|
238
238
|
|
|
239
239
|
except ApiException as exception:
|
|
240
240
|
logger.error(
|
|
241
|
-
"Failed to wait for pod -> %s; error -> %s",
|
|
241
|
+
"Failed to wait for pod -> '%s'; error -> %s",
|
|
242
242
|
pod_name,
|
|
243
243
|
str(exception),
|
|
244
244
|
)
|
|
@@ -246,7 +246,12 @@ class K8s:
|
|
|
246
246
|
# end method definition
|
|
247
247
|
|
|
248
248
|
def exec_pod_command(
|
|
249
|
-
self,
|
|
249
|
+
self,
|
|
250
|
+
pod_name: str,
|
|
251
|
+
command: list,
|
|
252
|
+
max_retry: int = 3,
|
|
253
|
+
time_retry: int = 10,
|
|
254
|
+
container: str | None = None,
|
|
250
255
|
):
|
|
251
256
|
"""Execute a command inside a Kubernetes Pod (similar to kubectl exec on command line).
|
|
252
257
|
See: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/CoreV1Api.md#connect_get_namespaced_pod_exec
|
|
@@ -261,9 +266,9 @@ class K8s:
|
|
|
261
266
|
|
|
262
267
|
pod = self.get_pod(pod_name)
|
|
263
268
|
if not pod:
|
|
264
|
-
logger.error("Pod -> %s does not exist", pod_name)
|
|
269
|
+
logger.error("Pod -> '%s' does not exist", pod_name)
|
|
265
270
|
|
|
266
|
-
logger.
|
|
271
|
+
logger.debug("Execute command -> %s in pod -> '%s'", command, pod_name)
|
|
267
272
|
|
|
268
273
|
retry_counter = 1
|
|
269
274
|
|
|
@@ -274,6 +279,7 @@ class K8s:
|
|
|
274
279
|
pod_name,
|
|
275
280
|
self.get_namespace(),
|
|
276
281
|
command=command,
|
|
282
|
+
container=container,
|
|
277
283
|
stderr=True,
|
|
278
284
|
stdin=False,
|
|
279
285
|
stdout=True,
|
|
@@ -283,7 +289,7 @@ class K8s:
|
|
|
283
289
|
return response
|
|
284
290
|
except ApiException as exc:
|
|
285
291
|
logger.warning(
|
|
286
|
-
"Failed to execute command, retry (%s/%s) -> %s in pod -> %s; error -> %s",
|
|
292
|
+
"Failed to execute command, retry (%s/%s) -> %s in pod -> '%s'; error -> %s",
|
|
287
293
|
retry_counter,
|
|
288
294
|
max_retry,
|
|
289
295
|
command,
|
|
@@ -292,12 +298,12 @@ class K8s:
|
|
|
292
298
|
)
|
|
293
299
|
retry_counter = retry_counter + 1
|
|
294
300
|
exception = exc
|
|
295
|
-
logger.
|
|
301
|
+
logger.debug("Wait %s seconds before next retry...", str(time_retry))
|
|
296
302
|
time.sleep(time_retry)
|
|
297
303
|
continue
|
|
298
304
|
|
|
299
305
|
logger.error(
|
|
300
|
-
"Failed to execute command with %s retries -> %s in pod -> %s; error -> %s",
|
|
306
|
+
"Failed to execute command with %s retries -> %s in pod -> '%s'; error -> %s",
|
|
301
307
|
max_retry,
|
|
302
308
|
command,
|
|
303
309
|
pod_name,
|
|
@@ -360,7 +366,7 @@ class K8s:
|
|
|
360
366
|
)
|
|
361
367
|
except ApiException as exception:
|
|
362
368
|
logger.error(
|
|
363
|
-
"Failed to execute command -> %s in pod -> %s; error -> %s",
|
|
369
|
+
"Failed to execute command -> %s in pod -> '%s'; error -> %s",
|
|
364
370
|
command,
|
|
365
371
|
pod_name,
|
|
366
372
|
str(exception),
|
|
@@ -371,17 +377,17 @@ class K8s:
|
|
|
371
377
|
got_response = False
|
|
372
378
|
response.update(timeout=timeout)
|
|
373
379
|
if response.peek_stdout():
|
|
374
|
-
logger.
|
|
380
|
+
logger.debug(response.read_stdout().replace("\n", " "))
|
|
375
381
|
got_response = True
|
|
376
382
|
if response.peek_stderr():
|
|
377
383
|
if write_stderr_to_error_log:
|
|
378
384
|
logger.error(response.read_stderr().replace("\n", " "))
|
|
379
385
|
else:
|
|
380
|
-
logger.
|
|
386
|
+
logger.debug(response.read_stderr().replace("\n", " "))
|
|
381
387
|
got_response = True
|
|
382
388
|
if commands:
|
|
383
389
|
command = commands.pop(0)
|
|
384
|
-
logger.
|
|
390
|
+
logger.debug("Execute command -> %s in pod -> '%s'", command, pod_name)
|
|
385
391
|
response.write_stdin(command + "\n")
|
|
386
392
|
else:
|
|
387
393
|
# We continue as long as we get some response during timeout period
|
|
@@ -422,7 +428,7 @@ class K8s:
|
|
|
422
428
|
)
|
|
423
429
|
except ApiException as exception:
|
|
424
430
|
logger.error(
|
|
425
|
-
"Failed to delete Pod -> %s; error -> %s", pod_name, str(exception)
|
|
431
|
+
"Failed to delete Pod -> '%s'; error -> %s", pod_name, str(exception)
|
|
426
432
|
)
|
|
427
433
|
return None
|
|
428
434
|
|
|
@@ -456,7 +462,7 @@ class K8s:
|
|
|
456
462
|
)
|
|
457
463
|
except ApiException as exception:
|
|
458
464
|
logger.error(
|
|
459
|
-
"Failed to get Config Map -> %s; error -> %s",
|
|
465
|
+
"Failed to get Config Map -> '%s'; error -> %s",
|
|
460
466
|
config_map_name,
|
|
461
467
|
str(exception),
|
|
462
468
|
)
|
|
@@ -493,7 +499,7 @@ class K8s:
|
|
|
493
499
|
)
|
|
494
500
|
except ApiException as exception:
|
|
495
501
|
logger.error(
|
|
496
|
-
"Failed to list Config Maps with field_selector -> %s and label_selector -> %s; error -> %s",
|
|
502
|
+
"Failed to list Config Maps with field_selector -> '%s' and label_selector -> '%s'; error -> %s",
|
|
497
503
|
field_selector,
|
|
498
504
|
label_selector,
|
|
499
505
|
str(exception),
|
|
@@ -521,7 +527,7 @@ class K8s:
|
|
|
521
527
|
)
|
|
522
528
|
except ApiException as exception:
|
|
523
529
|
logger.error(
|
|
524
|
-
"Failed to find Config Map -> %s; error -> %s",
|
|
530
|
+
"Failed to find Config Map -> '%s'; error -> %s",
|
|
525
531
|
config_map_name,
|
|
526
532
|
str(exception),
|
|
527
533
|
)
|
|
@@ -556,7 +562,7 @@ class K8s:
|
|
|
556
562
|
)
|
|
557
563
|
except ApiException as exception:
|
|
558
564
|
logger.error(
|
|
559
|
-
"Failed to replace Config Map -> %s; error -> %s",
|
|
565
|
+
"Failed to replace Config Map -> '%s'; error -> %s",
|
|
560
566
|
config_map_name,
|
|
561
567
|
str(exception),
|
|
562
568
|
)
|
|
@@ -583,7 +589,7 @@ class K8s:
|
|
|
583
589
|
)
|
|
584
590
|
except ApiException as exception:
|
|
585
591
|
logger.error(
|
|
586
|
-
"Failed to get Stateful Set -> %s; error -> %s",
|
|
592
|
+
"Failed to get Stateful Set -> '%s'; error -> %s",
|
|
587
593
|
sts_name,
|
|
588
594
|
str(exception),
|
|
589
595
|
)
|
|
@@ -610,7 +616,7 @@ class K8s:
|
|
|
610
616
|
)
|
|
611
617
|
except ApiException as exception:
|
|
612
618
|
logger.error(
|
|
613
|
-
"Failed to get scaling (replicas) of Stateful Set -> %s; error -> %s",
|
|
619
|
+
"Failed to get scaling (replicas) of Stateful Set -> '%s'; error -> %s",
|
|
614
620
|
sts_name,
|
|
615
621
|
str(exception),
|
|
616
622
|
)
|
|
@@ -638,7 +644,7 @@ class K8s:
|
|
|
638
644
|
)
|
|
639
645
|
except ApiException as exception:
|
|
640
646
|
logger.error(
|
|
641
|
-
"Failed to patch Stateful Set -> %s with -> %s; error -> %s",
|
|
647
|
+
"Failed to patch Stateful Set -> '%s' with -> %s; error -> %s",
|
|
642
648
|
sts_name,
|
|
643
649
|
sts_body,
|
|
644
650
|
str(exception),
|
|
@@ -667,7 +673,7 @@ class K8s:
|
|
|
667
673
|
)
|
|
668
674
|
except ApiException as exception:
|
|
669
675
|
logger.error(
|
|
670
|
-
"Failed to scale Stateful Set -> %s to -> %s replicas; error -> %s",
|
|
676
|
+
"Failed to scale Stateful Set -> '%s' to -> %s replicas; error -> %s",
|
|
671
677
|
sts_name,
|
|
672
678
|
scale,
|
|
673
679
|
str(exception),
|
|
@@ -695,7 +701,9 @@ class K8s:
|
|
|
695
701
|
)
|
|
696
702
|
except ApiException as exception:
|
|
697
703
|
logger.error(
|
|
698
|
-
"Failed to get Service -> %s; error -> %s",
|
|
704
|
+
"Failed to get Service -> '%s'; error -> %s",
|
|
705
|
+
service_name,
|
|
706
|
+
str(exception),
|
|
699
707
|
)
|
|
700
708
|
return None
|
|
701
709
|
|
|
@@ -731,7 +739,7 @@ class K8s:
|
|
|
731
739
|
)
|
|
732
740
|
except ApiException as exception:
|
|
733
741
|
logger.error(
|
|
734
|
-
"Failed to list Services with field_selector -> %s and label_selector -> %s; error -> %s",
|
|
742
|
+
"Failed to list Services with field_selector -> '%s' and label_selector -> '%s'; error -> %s",
|
|
735
743
|
field_selector,
|
|
736
744
|
label_selector,
|
|
737
745
|
str(exception),
|
|
@@ -762,7 +770,7 @@ class K8s:
|
|
|
762
770
|
)
|
|
763
771
|
except ApiException as exception:
|
|
764
772
|
logger.error(
|
|
765
|
-
"Failed to patch Service -> %s with -> %s; error -> %s",
|
|
773
|
+
"Failed to patch Service -> '%s' with -> %s; error -> %s",
|
|
766
774
|
service_name,
|
|
767
775
|
service_body,
|
|
768
776
|
str(exception),
|
|
@@ -888,7 +896,7 @@ class K8s:
|
|
|
888
896
|
backend = path.backend
|
|
889
897
|
service = backend.service
|
|
890
898
|
|
|
891
|
-
logger.
|
|
899
|
+
logger.debug(
|
|
892
900
|
"Replace backend service -> %s (%s) with new backend service -> %s (%s)",
|
|
893
901
|
service.name,
|
|
894
902
|
service.port.number,
|
|
@@ -925,4 +933,110 @@ class K8s:
|
|
|
925
933
|
|
|
926
934
|
return self.patch_ingress(ingress_name, body)
|
|
927
935
|
|
|
928
|
-
|
|
936
|
+
def verify_pod_status(
|
|
937
|
+
self,
|
|
938
|
+
pod_name: str,
|
|
939
|
+
timeout: int = 1200,
|
|
940
|
+
total_containers: int = 1,
|
|
941
|
+
ready_containers: int = 1,
|
|
942
|
+
retry_interval: int = 30,
|
|
943
|
+
) -> bool:
|
|
944
|
+
"""
|
|
945
|
+
Verifies if a pod is in a 'Ready' state by checking the status of its containers.
|
|
946
|
+
|
|
947
|
+
This function waits for a Kubernetes pod to reach the 'Ready' state, where a specified number
|
|
948
|
+
of containers are ready. It checks the pod status at regular intervals and reports the status
|
|
949
|
+
using logs. If the pod does not reach the 'Ready' state within the specified timeout,
|
|
950
|
+
it returns `False`.
|
|
951
|
+
|
|
952
|
+
Args:
|
|
953
|
+
pod_name (str): The name of the pod to check the status for.
|
|
954
|
+
timeout (int, optional): The maximum time (in seconds) to wait for the pod to become ready. Defaults to 1200.
|
|
955
|
+
total_containers (int, optional): The total number of containers expected to be running in the pod. Defaults to 1.
|
|
956
|
+
ready_containers (int, optional): The minimum number of containers that need to be in a ready state. Defaults to 1.
|
|
957
|
+
retry_interval (int, optional): Time interval (in seconds) between each retry to check pod readiness. Defaults to 30.
|
|
958
|
+
|
|
959
|
+
Returns:
|
|
960
|
+
bool: Returns `True` if the pod reaches the 'Ready' state with the specified number of containers ready
|
|
961
|
+
within the timeout. Otherwise, returns `False`.
|
|
962
|
+
"""
|
|
963
|
+
|
|
964
|
+
def wait_for_pod_ready(pod_name: str, timeout: int) -> bool:
|
|
965
|
+
"""
|
|
966
|
+
Waits until the pod is in the 'Ready' state with the specified number of containers ready.
|
|
967
|
+
|
|
968
|
+
This internal function repeatedly checks the readiness of the pod, logging the
|
|
969
|
+
status of the containers. If the pod does not exist, it retries after waiting
|
|
970
|
+
and logs detailed information at each step.
|
|
971
|
+
|
|
972
|
+
Args:
|
|
973
|
+
pod_name (str): The name of the pod to check the status for.
|
|
974
|
+
timeout (int): The maximum time (in seconds) to wait for the pod to become ready.
|
|
975
|
+
|
|
976
|
+
Returns:
|
|
977
|
+
bool: Returns `True` if the pod is ready with the specified number of containers in a 'Ready' state.
|
|
978
|
+
Otherwise, returns `False`.
|
|
979
|
+
"""
|
|
980
|
+
elapsed_time = 0 # Initialize elapsed time
|
|
981
|
+
|
|
982
|
+
while elapsed_time < timeout:
|
|
983
|
+
pod = self.get_pod(pod_name)
|
|
984
|
+
|
|
985
|
+
if not pod:
|
|
986
|
+
logger.error(
|
|
987
|
+
"Pod -> %s does not exist, waiting 300 seconds to retry.",
|
|
988
|
+
pod_name,
|
|
989
|
+
)
|
|
990
|
+
time.sleep(300)
|
|
991
|
+
pod = self.get_pod(pod_name)
|
|
992
|
+
|
|
993
|
+
if not pod:
|
|
994
|
+
logger.error(
|
|
995
|
+
"Pod -> %s still does not exist after retry!", pod_name
|
|
996
|
+
)
|
|
997
|
+
return False
|
|
998
|
+
|
|
999
|
+
# Get the ready status of containers
|
|
1000
|
+
container_statuses = pod.status.container_statuses
|
|
1001
|
+
if container_statuses and all(
|
|
1002
|
+
container.ready for container in container_statuses
|
|
1003
|
+
):
|
|
1004
|
+
current_ready_containers = sum(
|
|
1005
|
+
1 for c in container_statuses if c.ready
|
|
1006
|
+
)
|
|
1007
|
+
total_containers_in_pod = len(container_statuses)
|
|
1008
|
+
|
|
1009
|
+
if (
|
|
1010
|
+
current_ready_containers >= ready_containers
|
|
1011
|
+
and total_containers_in_pod == total_containers
|
|
1012
|
+
):
|
|
1013
|
+
logger.info(
|
|
1014
|
+
"Pod -> %s is ready with %d/%d containers.",
|
|
1015
|
+
pod_name,
|
|
1016
|
+
current_ready_containers,
|
|
1017
|
+
total_containers_in_pod,
|
|
1018
|
+
)
|
|
1019
|
+
return True
|
|
1020
|
+
else:
|
|
1021
|
+
logger.debug(
|
|
1022
|
+
"Pod -> %s is not yet ready (%d/%d).",
|
|
1023
|
+
pod_name,
|
|
1024
|
+
current_ready_containers,
|
|
1025
|
+
total_containers_in_pod,
|
|
1026
|
+
)
|
|
1027
|
+
else:
|
|
1028
|
+
logger.debug("Pod -> %s is not yet ready.", pod_name)
|
|
1029
|
+
|
|
1030
|
+
logger.info(
|
|
1031
|
+
f"Waiting {retry_interval} seconds before next pod status check."
|
|
1032
|
+
)
|
|
1033
|
+
time.sleep(
|
|
1034
|
+
retry_interval
|
|
1035
|
+
) # Sleep for the retry interval before checking again
|
|
1036
|
+
elapsed_time += retry_interval
|
|
1037
|
+
|
|
1038
|
+
logger.error("Pod -> %s is not ready after %d seconds.", pod_name, timeout)
|
|
1039
|
+
return False
|
|
1040
|
+
|
|
1041
|
+
# Wait until the pod is ready
|
|
1042
|
+
return wait_for_pod_ready(pod_name, timeout)
|