pyxecm 2.0.0__py3-none-any.whl → 2.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyxecm might be problematic. Click here for more details.

Files changed (50) hide show
  1. pyxecm/__init__.py +2 -1
  2. pyxecm/avts.py +79 -33
  3. pyxecm/customizer/api/app.py +45 -796
  4. pyxecm/customizer/api/auth/__init__.py +1 -0
  5. pyxecm/customizer/api/{auth.py → auth/functions.py} +2 -64
  6. pyxecm/customizer/api/auth/router.py +78 -0
  7. pyxecm/customizer/api/common/__init__.py +1 -0
  8. pyxecm/customizer/api/common/functions.py +47 -0
  9. pyxecm/customizer/api/{metrics.py → common/metrics.py} +1 -1
  10. pyxecm/customizer/api/common/models.py +21 -0
  11. pyxecm/customizer/api/{payload_list.py → common/payload_list.py} +6 -1
  12. pyxecm/customizer/api/common/router.py +72 -0
  13. pyxecm/customizer/api/settings.py +25 -0
  14. pyxecm/customizer/api/terminal/__init__.py +1 -0
  15. pyxecm/customizer/api/terminal/router.py +87 -0
  16. pyxecm/customizer/api/v1_csai/__init__.py +1 -0
  17. pyxecm/customizer/api/v1_csai/router.py +87 -0
  18. pyxecm/customizer/api/v1_maintenance/__init__.py +1 -0
  19. pyxecm/customizer/api/v1_maintenance/functions.py +100 -0
  20. pyxecm/customizer/api/v1_maintenance/models.py +12 -0
  21. pyxecm/customizer/api/v1_maintenance/router.py +76 -0
  22. pyxecm/customizer/api/v1_otcs/__init__.py +1 -0
  23. pyxecm/customizer/api/v1_otcs/functions.py +61 -0
  24. pyxecm/customizer/api/v1_otcs/router.py +179 -0
  25. pyxecm/customizer/api/v1_payload/__init__.py +1 -0
  26. pyxecm/customizer/api/v1_payload/functions.py +179 -0
  27. pyxecm/customizer/api/v1_payload/models.py +51 -0
  28. pyxecm/customizer/api/v1_payload/router.py +499 -0
  29. pyxecm/customizer/browser_automation.py +567 -324
  30. pyxecm/customizer/customizer.py +204 -430
  31. pyxecm/customizer/guidewire.py +907 -43
  32. pyxecm/customizer/k8s.py +243 -56
  33. pyxecm/customizer/m365.py +104 -15
  34. pyxecm/customizer/payload.py +1943 -885
  35. pyxecm/customizer/pht.py +19 -2
  36. pyxecm/customizer/servicenow.py +22 -5
  37. pyxecm/customizer/settings.py +9 -6
  38. pyxecm/helper/xml.py +69 -0
  39. pyxecm/otac.py +1 -1
  40. pyxecm/otawp.py +2104 -1535
  41. pyxecm/otca.py +569 -0
  42. pyxecm/otcs.py +202 -38
  43. pyxecm/otds.py +35 -13
  44. {pyxecm-2.0.0.dist-info → pyxecm-2.0.2.dist-info}/METADATA +6 -32
  45. pyxecm-2.0.2.dist-info/RECORD +76 -0
  46. {pyxecm-2.0.0.dist-info → pyxecm-2.0.2.dist-info}/WHEEL +1 -1
  47. pyxecm-2.0.0.dist-info/RECORD +0 -54
  48. /pyxecm/customizer/api/{models.py → auth/models.py} +0 -0
  49. {pyxecm-2.0.0.dist-info → pyxecm-2.0.2.dist-info}/licenses/LICENSE +0 -0
  50. {pyxecm-2.0.0.dist-info → pyxecm-2.0.2.dist-info}/top_level.txt +0 -0
pyxecm/customizer/k8s.py CHANGED
@@ -202,7 +202,7 @@ class K8s:
202
202
  )
203
203
  except ApiException as e:
204
204
  if e.status == 404:
205
- self.logger.info("Pod -> '%s' not found (may be deleted).", pod_name)
205
+ self.logger.debug("Pod -> '%s' not found (may be deleted).", pod_name)
206
206
  return None
207
207
  else:
208
208
  self.logger.error("Failed to get Pod -> '%s'!", pod_name)
@@ -246,7 +246,7 @@ class K8s:
246
246
  )
247
247
  except ApiException:
248
248
  self.logger.error(
249
- "Failed to list Pods with field_selector -> '%s' and label_selector -> '%s'",
249
+ "Failed to list pods with field selector -> '%s' and label selector -> '%s'",
250
250
  field_selector,
251
251
  label_selector,
252
252
  )
@@ -273,7 +273,7 @@ class K8s:
273
273
  The number of seconds to wait between repetitive status checks.
274
274
 
275
275
  Returns:
276
- True once the pod reaches the condition - otherwise wait forever.
276
+ None
277
277
 
278
278
  """
279
279
 
@@ -319,6 +319,7 @@ class K8s:
319
319
  max_retry: int = 3,
320
320
  time_retry: int = 10,
321
321
  container: str | None = None,
322
+ timeout: int = 60,
322
323
  ) -> str:
323
324
  """Execute a command inside a Kubernetes Pod (similar to kubectl exec on command line).
324
325
 
@@ -336,6 +337,9 @@ class K8s:
336
337
  Wait time in seconds between retries.
337
338
  container (str):
338
339
  The container name if the pod runs multiple containers inside.
340
+ timeout (int):
341
+ Timeout duration that is waited for any response in seconds.
342
+ Each time a response is found in stdout or stderr we wait another timeout duration [60]
339
343
 
340
344
  Returns:
341
345
  str:
@@ -363,6 +367,7 @@ class K8s:
363
367
  stdin=False,
364
368
  stdout=True,
365
369
  tty=False,
370
+ _request_timeout=timeout,
366
371
  )
367
372
  except ApiException as exc:
368
373
  self.logger.warning(
@@ -382,7 +387,7 @@ class K8s:
382
387
  time.sleep(time_retry)
383
388
  continue
384
389
  else:
385
- self.logger.debug(response)
390
+ self.logger.debug("Command execution response -> %s", response if response else "<empty>")
386
391
  return response
387
392
 
388
393
  self.logger.error(
@@ -437,7 +442,7 @@ class K8s:
437
442
  self.logger.error("Pod -> '%s' does not exist", pod_name)
438
443
 
439
444
  if not commands:
440
- self.logger.error("No commands to execute on Pod ->'%s'!", pod_name)
445
+ self.logger.error("No commands to execute on pod ->'%s'!", pod_name)
441
446
  return None
442
447
 
443
448
  # Get first command - this should be the shell:
@@ -454,6 +459,7 @@ class K8s:
454
459
  stdout=True,
455
460
  tty=False,
456
461
  _preload_content=False, # This is important!
462
+ _request_timeout=timeout,
457
463
  )
458
464
  except ApiException:
459
465
  self.logger.error(
@@ -526,7 +532,7 @@ class K8s:
526
532
  )
527
533
  except ApiException:
528
534
  self.logger.error(
529
- "Failed to delete Pod -> '%s'",
535
+ "Failed to delete pod -> '%s'",
530
536
  pod_name,
531
537
  )
532
538
  return None
@@ -571,7 +577,7 @@ class K8s:
571
577
  )
572
578
  except ApiException:
573
579
  self.logger.error(
574
- "Failed to get Config Map -> '%s'",
580
+ "Failed to get config map -> '%s'",
575
581
  config_map_name,
576
582
  )
577
583
  return None
@@ -582,8 +588,8 @@ class K8s:
582
588
 
583
589
  def list_config_maps(
584
590
  self,
585
- field_selector: str = "",
586
- label_selector: str = "",
591
+ field_selector: str | None = None,
592
+ label_selector: str | None = None,
587
593
  ) -> V1ConfigMapList:
588
594
  """List all Kubernetes Config Maps in the current namespace.
589
595
 
@@ -616,7 +622,7 @@ class K8s:
616
622
  )
617
623
  except ApiException:
618
624
  self.logger.error(
619
- "Failed to list Config Maps with field_selector -> '%s' and label_selector -> '%s'",
625
+ "Failed to list config maps with field selector -> '%s' and label selector -> '%s'",
620
626
  field_selector,
621
627
  label_selector,
622
628
  )
@@ -648,7 +654,7 @@ class K8s:
648
654
  )
649
655
  except ApiException:
650
656
  self.logger.error(
651
- "Failed to find Config Map -> '%s'",
657
+ "Failed to find config map -> '%s'",
652
658
  config_map_name,
653
659
  )
654
660
  return None
@@ -692,7 +698,7 @@ class K8s:
692
698
  )
693
699
  except ApiException:
694
700
  self.logger.error(
695
- "Failed to replace Config Map -> '%s'",
701
+ "Failed to replace config map -> '%s'",
696
702
  config_map_name,
697
703
  )
698
704
  return None
@@ -724,7 +730,7 @@ class K8s:
724
730
  )
725
731
  except ApiException:
726
732
  self.logger.error(
727
- "Failed to get Stateful Set -> '%s'",
733
+ "Failed to get stateful set -> '%s'",
728
734
  sts_name,
729
735
  )
730
736
  return None
@@ -756,7 +762,7 @@ class K8s:
756
762
  )
757
763
  except ApiException:
758
764
  self.logger.error(
759
- "Failed to get scaling (replicas) of Stateful Set -> '%s'",
765
+ "Failed to get scaling (replicas) of stateful set -> '%s'",
760
766
  sts_name,
761
767
  )
762
768
  return None
@@ -791,9 +797,9 @@ class K8s:
791
797
  )
792
798
  except ApiException:
793
799
  self.logger.error(
794
- "Failed to patch Stateful Set -> '%s' with -> %s",
800
+ "Failed to patch stateful set -> '%s' with -> %s",
795
801
  sts_name,
796
- sts_body,
802
+ str(sts_body),
797
803
  )
798
804
  return None
799
805
 
@@ -826,7 +832,7 @@ class K8s:
826
832
  )
827
833
  except ApiException:
828
834
  self.logger.error(
829
- "Failed to scale Stateful Set -> '%s' to -> %s replicas",
835
+ "Failed to scale stateful set -> '%s' to -> %s replicas",
830
836
  sts_name,
831
837
  scale,
832
838
  )
@@ -858,7 +864,7 @@ class K8s:
858
864
  )
859
865
  except ApiException:
860
866
  self.logger.error(
861
- "Failed to get Service -> '%s'",
867
+ "Failed to get service -> '%s'",
862
868
  service_name,
863
869
  )
864
870
  return None
@@ -901,7 +907,7 @@ class K8s:
901
907
  )
902
908
  except ApiException:
903
909
  self.logger.error(
904
- "Failed to list Services with field_selector -> '%s' and label_selector -> '%s'",
910
+ "Failed to list services with field selector -> '%s' and label selector -> '%s'",
905
911
  field_selector,
906
912
  label_selector,
907
913
  )
@@ -938,7 +944,7 @@ class K8s:
938
944
  )
939
945
  except ApiException:
940
946
  self.logger.error(
941
- "Failed to patch Service -> '%s' with -> %s",
947
+ "Failed to patch service -> '%s' with -> %s",
942
948
  service_name,
943
949
  service_body,
944
950
  )
@@ -970,7 +976,7 @@ class K8s:
970
976
  )
971
977
  except ApiException:
972
978
  self.logger.error(
973
- "Failed to get Ingress -> '%s'!",
979
+ "Failed to get ingress -> '%s'!",
974
980
  ingress_name,
975
981
  )
976
982
  return None
@@ -1008,7 +1014,7 @@ class K8s:
1008
1014
  )
1009
1015
  except ApiException:
1010
1016
  self.logger.error(
1011
- "Failed to patch Ingress -> '%s' with -> %s",
1017
+ "Failed to patch ingress -> '%s' with -> %s",
1012
1018
  ingress_name,
1013
1019
  ingress_body,
1014
1020
  )
@@ -1275,9 +1281,21 @@ class K8s:
1275
1281
  self.logger.info("Pod -> '%s' has been deleted successfully.", pod_name)
1276
1282
  return True
1277
1283
 
1278
- self.logger.debug(
1279
- "Pod -> '%s' still exists. Waiting %s seconds before next check.",
1284
+ pod_status = self.get_core_v1_api().read_namespaced_pod_status(
1280
1285
  pod_name,
1286
+ self.get_namespace(),
1287
+ )
1288
+
1289
+ self.logger.info(
1290
+ "Pod -> '%s' still exists with conditions -> %s. Waiting %s seconds before next check.",
1291
+ pod_name,
1292
+ str(
1293
+ [
1294
+ pod_condition.type
1295
+ for pod_condition in pod_status.status.conditions
1296
+ if pod_condition.status == "True"
1297
+ ]
1298
+ ),
1281
1299
  retry_interval,
1282
1300
  )
1283
1301
  time.sleep(retry_interval)
@@ -1289,12 +1307,20 @@ class K8s:
1289
1307
 
1290
1308
  # end method definition
1291
1309
 
1292
- def restart_deployment(self, deployment_name: str) -> bool:
1310
+ def restart_deployment(
1311
+ self, deployment_name: str, force: bool = False, wait: bool = False, wait_timeout: int = 1800
1312
+ ) -> bool:
1293
1313
  """Restart a Kubernetes deployment using rolling restart.
1294
1314
 
1295
1315
  Args:
1296
1316
  deployment_name (str):
1297
1317
  Name of the Kubernetes deployment.
1318
+ force (bool):
1319
+ If True, all pod instances will be forcefully deleted. [False]
1320
+ wait (bool):
1321
+ If True, wait for the stateful set to be ready again. [False]
1322
+ wait_timeout (int):
1323
+ Maximum time to wait for the stateful set to be ready again (in seconds). [1800]
1298
1324
 
1299
1325
  Returns:
1300
1326
  bool:
@@ -1302,45 +1328,137 @@ class K8s:
1302
1328
 
1303
1329
  """
1304
1330
 
1305
- now = datetime.now(timezone.utc).isoformat(timespec="seconds") + "Z"
1331
+ success = True
1306
1332
 
1307
- body = {
1308
- "spec": {
1309
- "template": {
1310
- "metadata": {
1311
- "annotations": {
1312
- "kubectl.kubernetes.io/restartedAt": now,
1333
+ if not force:
1334
+ now = datetime.now(timezone.utc).isoformat(timespec="seconds") + "Z"
1335
+
1336
+ body = {
1337
+ "spec": {
1338
+ "template": {
1339
+ "metadata": {
1340
+ "annotations": {
1341
+ "kubectl.kubernetes.io/restartedAt": now,
1342
+ },
1313
1343
  },
1314
1344
  },
1315
1345
  },
1316
- },
1317
- }
1318
- try:
1319
- self.get_apps_v1_api().patch_namespaced_deployment(
1320
- deployment_name,
1321
- self.get_namespace(),
1322
- body,
1323
- pretty="true",
1324
- )
1346
+ }
1347
+ try:
1348
+ self.get_apps_v1_api().patch_namespaced_deployment(
1349
+ deployment_name,
1350
+ self.get_namespace(),
1351
+ body,
1352
+ pretty="true",
1353
+ )
1354
+ self.logger.info("Triggered restart of deployment -> '%s'.", deployment_name)
1325
1355
 
1326
- except ApiException:
1327
- self.logger.exception(
1328
- "Failed to restart deployment -> '%s'!",
1329
- deployment_name,
1356
+ except ApiException as api_exception:
1357
+ self.logger.error(
1358
+ "Failed to restart deployment -> '%s'; error -> %s!", deployment_name, str(api_exception)
1359
+ )
1360
+ return False
1361
+
1362
+ # If force is set, all pod instances will be forcefully deleted.
1363
+ elif force:
1364
+ self.logger.info("Force deleting all pods of deployment -> '%s'.", deployment_name)
1365
+
1366
+ try:
1367
+ # Get the Deployment to retrieve its pod labels
1368
+ deployment = self.get_apps_v1_api().read_namespaced_deployment(
1369
+ name=deployment_name, namespace=self.get_namespace()
1370
+ )
1371
+
1372
+ # Get the label selector for the Deployment
1373
+ label_selector = deployment.spec.selector.match_labels
1374
+
1375
+ # List pods matching the label selector
1376
+ pods = (
1377
+ self.get_core_v1_api()
1378
+ .list_namespaced_pod(
1379
+ namespace=self.get_namespace(),
1380
+ label_selector=",".join([f"{k}={v}" for k, v in label_selector.items()]),
1381
+ )
1382
+ .items
1383
+ )
1384
+
1385
+ # Loop through the pods and delete each one
1386
+ for pod in pods:
1387
+ pod_name = pod.metadata.name
1388
+ try:
1389
+ # Define the delete options with force and grace period set to 0
1390
+ body = client.V1DeleteOptions(grace_period_seconds=0, propagation_policy="Foreground")
1391
+
1392
+ # Call the delete_namespaced_pod method
1393
+ self.get_core_v1_api().delete_namespaced_pod(
1394
+ name=pod_name, namespace=self.get_namespace(), body=body
1395
+ )
1396
+ self.logger.info(
1397
+ "Pod '%s' in namespace '%s' has been deleted forcefully.", pod_name, self.get_namespace()
1398
+ )
1399
+ except Exception as e:
1400
+ self.logger.error("Error occurred while deleting pod '%s': %s", pod_name, e)
1401
+ success = False
1402
+
1403
+ except Exception as e:
1404
+ self.logger.error("Error occurred while getting Deployment '%s': %s", deployment_name, e)
1405
+ success = False
1406
+
1407
+ start_time = time.time()
1408
+ while wait:
1409
+ self.logger.info("Waiting for restart of deployment -> '%s' to complete.", deployment_name)
1410
+ # Get the deployment
1411
+ deployment = self.get_apps_v1_api().read_namespaced_deployment_status(deployment_name, self.get_namespace())
1412
+
1413
+ # Check the availability status
1414
+ ready_replicas = deployment.status.ready_replicas or 0
1415
+ updated_replicas = deployment.status.updated_replicas or 0
1416
+ unavailable_replicas = deployment.status.unavailable_replicas or 0
1417
+ total_replicas = deployment.status.replicas or 0
1418
+ desired_replicas = deployment.spec.replicas or 0
1419
+
1420
+ self.logger.debug(
1421
+ "Deployment status -> updated pods: %s/%s -> ready replicas: %s/%s",
1422
+ updated_replicas,
1423
+ desired_replicas,
1424
+ ready_replicas,
1425
+ total_replicas,
1330
1426
  )
1331
- return False
1332
1427
 
1333
- else:
1334
- return True
1428
+ if (
1429
+ updated_replicas == desired_replicas
1430
+ and unavailable_replicas == 0
1431
+ and total_replicas == desired_replicas
1432
+ ):
1433
+ self.logger.info("Restart of deployment -> '%s' completed successfully", deployment_name)
1434
+ break
1435
+
1436
+ if (time.time() - start_time) > wait_timeout:
1437
+ self.logger.error("Timed out waiting for restart of deployment -> '%s' to complete.", deployment_name)
1438
+ success = False
1439
+ break
1440
+
1441
+ # Sleep for a while before checking again
1442
+ time.sleep(20)
1443
+
1444
+ return success
1335
1445
 
1336
1446
  # end method definition
1337
1447
 
1338
- def restart_stateful_set(self, sts_name: str) -> bool:
1448
+ def restart_stateful_set(
1449
+ self, sts_name: str, force: bool = False, wait: bool = False, wait_timeout: int = 1800
1450
+ ) -> bool:
1339
1451
  """Restart a Kubernetes stateful set using rolling restart.
1340
1452
 
1341
1453
  Args:
1342
1454
  sts_name (str):
1343
1455
  Name of the Kubernetes statefulset.
1456
+ force (bool, optional):
1457
+ If True, all pod instances will be forcefully deleted. [False]
1458
+ wait (bool, optional):
1459
+ If True, wait for the stateful set to be ready again. [False]
1460
+ wait_timeout (int, optional):
1461
+ Maximum time to wait for the stateful set to be ready again (in seconds). [1800]
1344
1462
 
1345
1463
  Returns:
1346
1464
  bool:
@@ -1348,6 +1466,8 @@ class K8s:
1348
1466
 
1349
1467
  """
1350
1468
 
1469
+ success = True
1470
+
1351
1471
  now = datetime.now(timezone.utc).isoformat(timespec="seconds") + "Z"
1352
1472
 
1353
1473
  body = {
@@ -1361,17 +1481,84 @@ class K8s:
1361
1481
  },
1362
1482
  },
1363
1483
  }
1484
+
1364
1485
  try:
1365
1486
  self.get_apps_v1_api().patch_namespaced_stateful_set(sts_name, self.get_namespace(), body, pretty="true")
1487
+ self.logger.info("Triggered restart of stateful set -> '%s'.", sts_name)
1366
1488
 
1367
- except ApiException:
1368
- self.logger.exception(
1369
- "Failed to restart stateful set -> '%s'!",
1370
- sts_name,
1371
- )
1489
+ except ApiException as api_exception:
1490
+ self.logger.error("Failed to restart stateful set -> '%s'; error -> %s!", sts_name, str(api_exception))
1372
1491
  return False
1373
1492
 
1374
- else:
1375
- return True
1493
+ # If force is set, all pod instances will be forcefully deleted.
1494
+ if force:
1495
+ self.logger.info("Force deleting all pods of stateful set -> '%s'.", sts_name)
1496
+
1497
+ try:
1498
+ # Get the StatefulSet
1499
+ statefulset = self.get_apps_v1_api().read_namespaced_stateful_set(
1500
+ name=sts_name, namespace=self.get_namespace()
1501
+ )
1502
+
1503
+ # Loop through the replicas of the StatefulSet
1504
+ for i in range(statefulset.spec.replicas):
1505
+ pod_name = f"{statefulset.metadata.name}-{i}"
1506
+ try:
1507
+ # Define the delete options with force and grace period set to 0
1508
+ body = client.V1DeleteOptions(grace_period_seconds=0, propagation_policy="Foreground")
1509
+
1510
+ # Call the delete_namespaced_pod method
1511
+ self.get_core_v1_api().delete_namespaced_pod(
1512
+ name=pod_name, namespace=self.get_namespace(), body=body
1513
+ )
1514
+ self.logger.info(
1515
+ "Pod -> '%s' in namespace -> '%s' has been deleted forcefully.",
1516
+ pod_name,
1517
+ self.get_namespace(),
1518
+ )
1519
+
1520
+ except Exception as e:
1521
+ self.logger.error("Error occurred while deleting pod -> '%s': %s", pod_name, str(e))
1522
+ success = False
1523
+
1524
+ except Exception as e:
1525
+ self.logger.error("Error occurred while getting stateful set -> '%s': %s", sts_name, str(e))
1526
+ success = False
1527
+
1528
+ start_time = time.time()
1529
+
1530
+ while wait:
1531
+ time.sleep(10) # Add delay before checking that the stateful set is ready again.
1532
+ self.logger.info("Waiting for restart of stateful set -> '%s' to complete...", sts_name)
1533
+ # Get the deployment
1534
+ statefulset = self.get_apps_v1_api().read_namespaced_stateful_set_status(sts_name, self.get_namespace())
1535
+
1536
+ # Check the availability status
1537
+ available_replicas = statefulset.status.available_replicas or 0
1538
+ desired_replicas = statefulset.spec.replicas or 0
1539
+
1540
+ current_revision = statefulset.status.current_revision or ""
1541
+ update_revision = statefulset.status.update_revision or ""
1542
+
1543
+ self.logger.debug(
1544
+ "Stateful set status -> available pods: %s/%s, revision updated: %s",
1545
+ available_replicas,
1546
+ desired_replicas,
1547
+ current_revision == update_revision,
1548
+ )
1549
+
1550
+ if available_replicas == desired_replicas and update_revision == current_revision:
1551
+ self.logger.info("Stateful set -> '%s' completed restart successfully", sts_name)
1552
+ break
1553
+
1554
+ if (time.time() - start_time) > wait_timeout:
1555
+ self.logger.error("Timed out waiting for restart of stateful set -> '%s' to complete.", sts_name)
1556
+ success = False
1557
+ break
1558
+
1559
+ # Sleep for a while before checking again
1560
+ time.sleep(10)
1561
+
1562
+ return success
1376
1563
 
1377
1564
  # end method definition