pyxecm 1.5__py3-none-any.whl → 1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyxecm might be problematic. Click here for more details.

@@ -52,6 +52,10 @@ import sys
52
52
  import time
53
53
  from dataclasses import dataclass, field
54
54
  from datetime import datetime
55
+ import uuid
56
+ import xml.etree.ElementTree as ET
57
+ import json
58
+ import re
55
59
 
56
60
  # from packaging.version import Version
57
61
 
@@ -59,7 +63,8 @@ import requests
59
63
 
60
64
  # OpenText specific modules:
61
65
  import yaml
62
- from pyxecm import OTAC, OTCS, OTDS, OTIV, OTPD, OTMM, CoreShare
66
+ from pyxecm import OTAC, OTCS, OTDS, OTIV, OTPD, OTMM, CoreShare, OTAWP
67
+ from pyxecm.avts import AVTS
63
68
  from pyxecm.customizer.k8s import K8s
64
69
  from pyxecm.customizer.m365 import M365
65
70
  from pyxecm.customizer.payload import Payload
@@ -105,6 +110,7 @@ class CustomizerSettingsOTDS:
105
110
  admin_partition: str = "otds.admin"
106
111
  public_url: str = os.environ.get("OTDS_PUBLIC_URL")
107
112
  password: str = os.environ.get("OTDS_PASSWORD")
113
+ bindPassword: str = os.environ.get("BINB_PASSWORD")
108
114
  disable_password_policy: bool = True
109
115
  enable_audit: bool = True
110
116
 
@@ -124,6 +130,7 @@ class CustomizerSettingsOTCS:
124
130
  port_backend: int = os.environ.get("OTCS_SERVICE_PORT_OTCS", 8080)
125
131
  port_frontend: int = 80
126
132
  base_path: str = "/cs/cs"
133
+ feme_uri: str = os.environ.get("FEME_URI", "ws://feme:4242")
127
134
  admin: str = os.environ.get("OTCS_ADMIN", "admin")
128
135
  password: str = os.environ.get("OTCS_PASSWORD")
129
136
  partition: str = os.environ.get("OTCS_PARTITION", "Content Server Members")
@@ -217,11 +224,13 @@ class CustomizerSettingsOTAWP:
217
224
  resource_name: str = "awp"
218
225
  access_role_name: str = "Access to " + resource_name
219
226
  admin: str = os.environ.get("OTAWP_ADMIN", "sysadmin")
220
- password: str = os.environ.get("OTAWP_PASSWORD")
227
+ password: str = os.environ.get("OTCS_PASSWORD")
221
228
  public_protocol: str = os.environ.get("OTAWP_PROTOCOL", "https")
222
229
  public_url: str = os.environ.get("OTAWP_PUBLIC_URL")
223
230
  k8s_statefulset: str = "appworks"
224
231
  k8s_configmap: str = "appworks-config-ymls"
232
+ port: int = os.environ.get("OTAWP_SERVICE_PORT", 8080)
233
+ protocol: str = os.environ.get("OTPD_PROTOCOL", "http")
225
234
 
226
235
 
227
236
  @dataclass
@@ -262,6 +271,19 @@ class CustomizerSettingsAviator:
262
271
  enabled: bool = os.environ.get("AVIATOR_ENABLED", "false").lower() == "true"
263
272
 
264
273
 
274
+ @dataclass
275
+ class CustomizerSettingsAVTS:
276
+ """Class for Aviator Search (AVTS) related settings"""
277
+
278
+ enabled: bool = os.environ.get("AVTS_ENABLED", "false").lower() == "true"
279
+ otds_url = os.environ.get("AVTS_OTDS_URL", "")
280
+ client_id = os.environ.get("AVTS_CLIENT_ID", "")
281
+ client_secret = os.environ.get("AVTS_CLIENT_SECRET", "")
282
+ base_url = os.environ.get("AVTS_BASE_URL", "")
283
+ username = os.environ.get("AVTS_USERNAME", "")
284
+ password = os.environ.get("AVTS_PASSWORD", "")
285
+
286
+
265
287
  class Customizer:
266
288
  """Customizer Class to control the cusomization automation
267
289
 
@@ -281,6 +303,7 @@ class Customizer:
281
303
  m365: CustomizerSettingsM365 = CustomizerSettingsM365(),
282
304
  core_share: CustomizerSettingsCoreShare = CustomizerSettingsCoreShare(),
283
305
  aviator: CustomizerSettingsAviator = CustomizerSettingsAviator(),
306
+ avts: CustomizerSettingsAVTS = CustomizerSettingsAVTS(),
284
307
  ):
285
308
  self.settings = settings
286
309
 
@@ -314,6 +337,9 @@ class Customizer:
314
337
  # Aviator variables:
315
338
  self.aviator_settings = aviator
316
339
 
340
+ # Aviator Search variables:
341
+ self.avts_settings = avts
342
+
317
343
  # Initialize Objects for later assignment
318
344
  self.otds_object: OTDS | None = None
319
345
  self.otcs_object: OTCS | None = None
@@ -326,6 +352,7 @@ class Customizer:
326
352
  self.m365_object: M365 | None = None
327
353
  self.core_share_object: CoreShare | None = None
328
354
  self.browser_automation_object: BrowserAutomation | None = None
355
+ self.otawp_object: OTAWP | None = None
329
356
 
330
357
  # end initializer
331
358
 
@@ -613,7 +640,51 @@ class Customizer:
613
640
 
614
641
  # end method definition
615
642
 
616
- def init_coreshare(self) -> M365:
643
+ def init_avts(self) -> AVTS:
644
+ """Initialize the Core Share object we use to talk to the Core Share API.
645
+
646
+ Args:
647
+ None
648
+ Returns:
649
+ object: CoreShare object or None if the object couldn't be created or
650
+ the authentication fails.
651
+ """
652
+
653
+ logger.info(
654
+ "Aviator Search Base URL = %s", self.avts_settings.base_url
655
+ )
656
+ logger.info(
657
+ "Aviator Search OTDS URL = %s", self.avts_settings.otds_url
658
+ )
659
+ logger.info(
660
+ "Aviator Search Client ID = %s", self.avts_settings.client_id
661
+ )
662
+ logger.debug(
663
+ "Aviator Search Client Secret = %s",
664
+ self.avts_settings.client_secret,
665
+ )
666
+ logger.info(
667
+ "Aviator Search User ID = %s", self.avts_settings.username
668
+ )
669
+ logger.debug(
670
+ "Aviator Search User Password = %s",
671
+ self.avts_settings.password,
672
+ )
673
+
674
+ avts_object = AVTS(
675
+ otds_url=self.avts_settings.otds_url,
676
+ base_url=self.avts_settings.base_url,
677
+ client_id=self.avts_settings.client_id,
678
+ client_secret=self.avts_settings.client_secret,
679
+ username=self.avts_settings.username,
680
+ password=self.avts_settings.password,
681
+ )
682
+
683
+ return avts_object
684
+
685
+ # end method definition
686
+
687
+ def init_coreshare(self) -> CoreShare:
617
688
  """Initialize the Core Share object we use to talk to the Core Share API.
618
689
 
619
690
  Args:
@@ -772,6 +843,7 @@ class Customizer:
772
843
  username=self.otds_settings.username,
773
844
  password=self.otds_settings.password,
774
845
  otds_ticket=self.otds_settings.otds_ticket,
846
+ bindPassword=self.otds_settings.bindPassword
775
847
  )
776
848
 
777
849
  logger.info("Authenticating to OTDS...")
@@ -921,6 +993,10 @@ class Customizer:
921
993
  "OTCS K8s Backend Pods = %s",
922
994
  self.otcs_settings.k8s_statefulset_backend,
923
995
  )
996
+ logger.info(
997
+ "FEME URI = %s",
998
+ self.otcs_settings.feme_uri,
999
+ )
924
1000
 
925
1001
  logger.debug("Checking if OTCS object has already been initialized")
926
1002
 
@@ -938,6 +1014,7 @@ class Customizer:
938
1014
  resource_name,
939
1015
  otds_ticket=otds_ticket,
940
1016
  base_path=self.otcs_settings.base_path,
1017
+ feme_uri=self.otcs_settings.feme_uri,
941
1018
  )
942
1019
 
943
1020
  # It is important to wait for OTCS to be configured - otherwise we
@@ -1164,7 +1241,7 @@ class Customizer:
1164
1241
  awp_resource = self.otds_object.get_resource(self.otawp_settings.resource_name)
1165
1242
  if not awp_resource:
1166
1243
  logger.info(
1167
- "OTDS resource -> %s for AppWorks Platform does not yet exist. Creating...",
1244
+ "OTDS resource -> '%s' for AppWorks Platform does not yet exist. Creating...",
1168
1245
  self.otawp_settings.resource_name,
1169
1246
  )
1170
1247
  # Create a Python dict with the special payload we need for AppWorks:
@@ -1377,10 +1454,10 @@ class Customizer:
1377
1454
  ]
1378
1455
 
1379
1456
  awp_resource = self.otds_object.add_resource(
1380
- self.otawp_settings.resource_name,
1381
- "AppWorks Platform",
1382
- "AppWorks Platform",
1383
- additional_payload,
1457
+ name=self.otawp_settings.resource_name,
1458
+ description="AppWorks Platform",
1459
+ display_name="AppWorks Platform",
1460
+ additional_payload=additional_payload,
1384
1461
  )
1385
1462
  else:
1386
1463
  logger.info(
@@ -1559,6 +1636,15 @@ class Customizer:
1559
1636
  self.otawp_settings.product_name,
1560
1637
  "USERS",
1561
1638
  )
1639
+ otawp_object = OTAWP(
1640
+ self.otawp_settings.protocol,
1641
+ self.otawp_settings.k8s_statefulset,
1642
+ str(self.otawp_settings.port),
1643
+ "sysadmin",
1644
+ self.otawp_settings.password,
1645
+ "",
1646
+ )
1647
+ return otawp_object
1562
1648
 
1563
1649
  # end method definition
1564
1650
 
@@ -1585,14 +1671,20 @@ class Customizer:
1585
1671
 
1586
1672
  logger.info("Deactivate Liveness probe for pod -> '%s'", pod_name)
1587
1673
  self.k8s_object.exec_pod_command(
1588
- pod_name, ["/bin/sh", "-c", "touch /tmp/keepalive"]
1674
+ pod_name,
1675
+ ["/bin/sh", "-c", "touch /tmp/keepalive"],
1676
+ container="otcs-frontend-container",
1589
1677
  )
1590
1678
  logger.info("Restarting pod -> '%s'", pod_name)
1591
1679
  self.k8s_object.exec_pod_command(
1592
- pod_name, ["/bin/sh", "-c", "/opt/opentext/cs/stop_csserver"]
1680
+ pod_name,
1681
+ ["/bin/sh", "-c", "/opt/opentext/cs/stop_csserver"],
1682
+ container="otcs-frontend-container",
1593
1683
  )
1594
1684
  self.k8s_object.exec_pod_command(
1595
- pod_name, ["/bin/sh", "-c", "/opt/opentext/cs/start_csserver"]
1685
+ pod_name,
1686
+ ["/bin/sh", "-c", "/opt/opentext/cs/start_csserver"],
1687
+ container="otcs-frontend-container",
1596
1688
  )
1597
1689
 
1598
1690
  # Restart all backends:
@@ -1601,14 +1693,20 @@ class Customizer:
1601
1693
 
1602
1694
  logger.info("Deactivate Liveness probe for pod -> '%s'", pod_name)
1603
1695
  self.k8s_object.exec_pod_command(
1604
- pod_name, ["/bin/sh", "-c", "touch /tmp/keepalive"]
1696
+ pod_name,
1697
+ ["/bin/sh", "-c", "touch /tmp/keepalive"],
1698
+ container="otcs-admin-container",
1605
1699
  )
1606
1700
  logger.info("Restarting pod -> '%s'", pod_name)
1607
1701
  self.k8s_object.exec_pod_command(
1608
- pod_name, ["/bin/sh", "-c", "/opt/opentext/cs/stop_csserver"]
1702
+ pod_name,
1703
+ ["/bin/sh", "-c", "/opt/opentext/cs/stop_csserver"],
1704
+ container="otcs-admin-container",
1609
1705
  )
1610
1706
  self.k8s_object.exec_pod_command(
1611
- pod_name, ["/bin/sh", "-c", "/opt/opentext/cs/start_csserver"]
1707
+ pod_name,
1708
+ ["/bin/sh", "-c", "/opt/opentext/cs/start_csserver"],
1709
+ container="otcs-admin-container",
1612
1710
  )
1613
1711
 
1614
1712
  logger.info("Re-Authenticating to OTCS after restart of pods...")
@@ -1625,7 +1723,9 @@ class Customizer:
1625
1723
 
1626
1724
  logger.info("Reactivate Liveness probe for pod -> '%s'", pod_name)
1627
1725
  self.k8s_object.exec_pod_command(
1628
- pod_name, ["/bin/sh", "-c", "rm /tmp/keepalive"]
1726
+ pod_name,
1727
+ ["/bin/sh", "-c", "rm /tmp/keepalive"],
1728
+ container="otcs-frontend-container",
1629
1729
  )
1630
1730
 
1631
1731
  for x in range(0, self.otcs_settings.replicas_backend):
@@ -1633,7 +1733,9 @@ class Customizer:
1633
1733
 
1634
1734
  logger.info("Reactivate Liveness probe for pod -> '%s'", pod_name)
1635
1735
  self.k8s_object.exec_pod_command(
1636
- pod_name, ["/bin/sh", "-c", "rm /tmp/keepalive"]
1736
+ pod_name,
1737
+ ["/bin/sh", "-c", "rm /tmp/keepalive"],
1738
+ container="otcs-admin-container",
1637
1739
  )
1638
1740
 
1639
1741
  logger.info("Restart OTCS frontend and backend pods has been completed.")
@@ -1822,7 +1924,7 @@ class Customizer:
1822
1924
  self.log_header("Initialize OTAWP")
1823
1925
 
1824
1926
  # Configure required OTDS resources as AppWorks doesn't do this on its own:
1825
- self.init_otawp()
1927
+ self.otawp_object = self.init_otawp()
1826
1928
  else:
1827
1929
  self.settings.placeholder_values["OTAWP_RESOURCE_ID"] = ""
1828
1930
 
@@ -1898,6 +2000,15 @@ class Customizer:
1898
2000
  logger.error("Failed to initialize Microsoft 365!")
1899
2001
  sys.exit()
1900
2002
 
2003
+ if self.avts_settings.enabled:
2004
+ self.log_header("Initialize Aviator Search")
2005
+ self.avts_object = self.init_avts()
2006
+ if not self.avts_object:
2007
+ logger.error("Failed to initialize Aviator Search")
2008
+ sys.exit()
2009
+ else:
2010
+ self.avts_object = None
2011
+
1901
2012
  self.log_header("Processing Payload")
1902
2013
 
1903
2014
  cust_payload_list = []
@@ -1914,7 +2025,9 @@ class Customizer:
1914
2025
 
1915
2026
  # do we have additional payload as an external file?
1916
2027
  if os.path.exists(self.settings.cust_payload_external):
1917
- for filename in os.scandir(self.settings.cust_payload_external):
2028
+ for filename in sorted(
2029
+ os.scandir(self.settings.cust_payload_external), key=lambda e: e.name
2030
+ ):
1918
2031
  if filename.is_file() and os.path.getsize(filename) > 0:
1919
2032
  logger.info("Found external payload file -> '%s'", filename.path)
1920
2033
  cust_payload_list.append(filename.path)
@@ -1948,6 +2061,8 @@ class Customizer:
1948
2061
  stop_on_error=self.settings.stop_on_error,
1949
2062
  aviator_enabled=self.aviator_settings.enabled,
1950
2063
  upload_status_files=self.otcs_settings.upload_status_files,
2064
+ otawp_object=self.otawp_object,
2065
+ avts_object=self.avts_object,
1951
2066
  )
1952
2067
  # Load the payload file and initialize the payload sections:
1953
2068
  if not payload_object.init_payload():
@@ -2101,4 +2216,30 @@ class Customizer:
2101
2216
  )
2102
2217
  )
2103
2218
 
2104
- # end method definition
2219
+
2220
+ if __name__ == "__main__":
2221
+ logging.basicConfig(
2222
+ format="%(asctime)s %(levelname)s [%(name)s] %(message)s",
2223
+ datefmt="%d-%b-%Y %H:%M:%S",
2224
+ level=logging.INFO,
2225
+ handlers=[
2226
+ logging.StreamHandler(sys.stdout),
2227
+ ],
2228
+ )
2229
+
2230
+ my_customizer = Customizer(
2231
+ otcs=CustomizerSettingsOTCS(
2232
+ hostname="otcs.local.xecm.cloud",
2233
+ hostname_backend="otcs-admin-0",
2234
+ hostname_frontend="otcs-frontend",
2235
+ protocol="http",
2236
+ port_backend=8080,
2237
+ ),
2238
+ otds=CustomizerSettingsOTDS(hostname="otds"),
2239
+ otpd=CustomizerSettingsOTPD(enabled=False),
2240
+ otac=CustomizerSettingsOTAC(enabled=False),
2241
+ k8s=CustomizerSettingsK8S(enabled=True),
2242
+ otiv=CustomizerSettingsOTIV(enabled=False),
2243
+ )
2244
+
2245
+ my_customizer.customization_run()
pyxecm/customizer/k8s.py CHANGED
@@ -154,7 +154,7 @@ class K8s:
154
154
  )
155
155
  except ApiException as exception:
156
156
  logger.error(
157
- "Failed to get Pod -> %s; error -> %s", pod_name, str(exception)
157
+ "Failed to get Pod -> '%s'; error -> %s", pod_name, str(exception)
158
158
  )
159
159
  return None
160
160
 
@@ -189,7 +189,7 @@ class K8s:
189
189
  )
190
190
  except ApiException as exception:
191
191
  logger.error(
192
- "Failed to list Pods with field_selector -> %s and label_selector -> %s; error -> %s",
192
+ "Failed to list Pods with field_selector -> '%s' and label_selector -> '%s'; error -> %s",
193
193
  field_selector,
194
194
  label_selector,
195
195
  str(exception),
@@ -223,13 +223,13 @@ class K8s:
223
223
  for cond in pod_status.status.conditions:
224
224
  if cond.type == condition_name and cond.status == "True":
225
225
  logger.info(
226
- "Pod -> %s is in state -> %s!", pod_name, condition_name
226
+ "Pod -> '%s' is in state -> '%s'!", pod_name, condition_name
227
227
  )
228
228
  ready = True
229
229
  break
230
230
  else:
231
231
  logger.info(
232
- "Pod -> %s is not yet in state -> %s. Waiting...",
232
+ "Pod -> '%s' is not yet in state -> '%s'. Waiting...",
233
233
  pod_name,
234
234
  condition_name,
235
235
  )
@@ -238,7 +238,7 @@ class K8s:
238
238
 
239
239
  except ApiException as exception:
240
240
  logger.error(
241
- "Failed to wait for pod -> %s; error -> %s",
241
+ "Failed to wait for pod -> '%s'; error -> %s",
242
242
  pod_name,
243
243
  str(exception),
244
244
  )
@@ -246,7 +246,12 @@ class K8s:
246
246
  # end method definition
247
247
 
248
248
  def exec_pod_command(
249
- self, pod_name: str, command: list, max_retry: int = 3, time_retry: int = 10
249
+ self,
250
+ pod_name: str,
251
+ command: list,
252
+ max_retry: int = 3,
253
+ time_retry: int = 10,
254
+ container: str | None = None,
250
255
  ):
251
256
  """Execute a command inside a Kubernetes Pod (similar to kubectl exec on command line).
252
257
  See: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/CoreV1Api.md#connect_get_namespaced_pod_exec
@@ -261,9 +266,9 @@ class K8s:
261
266
 
262
267
  pod = self.get_pod(pod_name)
263
268
  if not pod:
264
- logger.error("Pod -> %s does not exist", pod_name)
269
+ logger.error("Pod -> '%s' does not exist", pod_name)
265
270
 
266
- logger.debug("Execute command -> %s in pod -> %s", command, pod_name)
271
+ logger.debug("Execute command -> %s in pod -> '%s'", command, pod_name)
267
272
 
268
273
  retry_counter = 1
269
274
 
@@ -274,6 +279,7 @@ class K8s:
274
279
  pod_name,
275
280
  self.get_namespace(),
276
281
  command=command,
282
+ container=container,
277
283
  stderr=True,
278
284
  stdin=False,
279
285
  stdout=True,
@@ -283,7 +289,7 @@ class K8s:
283
289
  return response
284
290
  except ApiException as exc:
285
291
  logger.warning(
286
- "Failed to execute command, retry (%s/%s) -> %s in pod -> %s; error -> %s",
292
+ "Failed to execute command, retry (%s/%s) -> %s in pod -> '%s'; error -> %s",
287
293
  retry_counter,
288
294
  max_retry,
289
295
  command,
@@ -297,7 +303,7 @@ class K8s:
297
303
  continue
298
304
 
299
305
  logger.error(
300
- "Failed to execute command with %s retries -> %s in pod -> %s; error -> %s",
306
+ "Failed to execute command with %s retries -> %s in pod -> '%s'; error -> %s",
301
307
  max_retry,
302
308
  command,
303
309
  pod_name,
@@ -360,7 +366,7 @@ class K8s:
360
366
  )
361
367
  except ApiException as exception:
362
368
  logger.error(
363
- "Failed to execute command -> %s in pod -> %s; error -> %s",
369
+ "Failed to execute command -> %s in pod -> '%s'; error -> %s",
364
370
  command,
365
371
  pod_name,
366
372
  str(exception),
@@ -381,7 +387,7 @@ class K8s:
381
387
  got_response = True
382
388
  if commands:
383
389
  command = commands.pop(0)
384
- logger.debug("Execute command -> %s in pod -> %s", command, pod_name)
390
+ logger.debug("Execute command -> %s in pod -> '%s'", command, pod_name)
385
391
  response.write_stdin(command + "\n")
386
392
  else:
387
393
  # We continue as long as we get some response during timeout period
@@ -422,7 +428,7 @@ class K8s:
422
428
  )
423
429
  except ApiException as exception:
424
430
  logger.error(
425
- "Failed to delete Pod -> %s; error -> %s", pod_name, str(exception)
431
+ "Failed to delete Pod -> '%s'; error -> %s", pod_name, str(exception)
426
432
  )
427
433
  return None
428
434
 
@@ -456,7 +462,7 @@ class K8s:
456
462
  )
457
463
  except ApiException as exception:
458
464
  logger.error(
459
- "Failed to get Config Map -> %s; error -> %s",
465
+ "Failed to get Config Map -> '%s'; error -> %s",
460
466
  config_map_name,
461
467
  str(exception),
462
468
  )
@@ -493,7 +499,7 @@ class K8s:
493
499
  )
494
500
  except ApiException as exception:
495
501
  logger.error(
496
- "Failed to list Config Maps with field_selector -> %s and label_selector -> %s; error -> %s",
502
+ "Failed to list Config Maps with field_selector -> '%s' and label_selector -> '%s'; error -> %s",
497
503
  field_selector,
498
504
  label_selector,
499
505
  str(exception),
@@ -521,7 +527,7 @@ class K8s:
521
527
  )
522
528
  except ApiException as exception:
523
529
  logger.error(
524
- "Failed to find Config Map -> %s; error -> %s",
530
+ "Failed to find Config Map -> '%s'; error -> %s",
525
531
  config_map_name,
526
532
  str(exception),
527
533
  )
@@ -556,7 +562,7 @@ class K8s:
556
562
  )
557
563
  except ApiException as exception:
558
564
  logger.error(
559
- "Failed to replace Config Map -> %s; error -> %s",
565
+ "Failed to replace Config Map -> '%s'; error -> %s",
560
566
  config_map_name,
561
567
  str(exception),
562
568
  )
@@ -583,7 +589,7 @@ class K8s:
583
589
  )
584
590
  except ApiException as exception:
585
591
  logger.error(
586
- "Failed to get Stateful Set -> %s; error -> %s",
592
+ "Failed to get Stateful Set -> '%s'; error -> %s",
587
593
  sts_name,
588
594
  str(exception),
589
595
  )
@@ -610,7 +616,7 @@ class K8s:
610
616
  )
611
617
  except ApiException as exception:
612
618
  logger.error(
613
- "Failed to get scaling (replicas) of Stateful Set -> %s; error -> %s",
619
+ "Failed to get scaling (replicas) of Stateful Set -> '%s'; error -> %s",
614
620
  sts_name,
615
621
  str(exception),
616
622
  )
@@ -638,7 +644,7 @@ class K8s:
638
644
  )
639
645
  except ApiException as exception:
640
646
  logger.error(
641
- "Failed to patch Stateful Set -> %s with -> %s; error -> %s",
647
+ "Failed to patch Stateful Set -> '%s' with -> %s; error -> %s",
642
648
  sts_name,
643
649
  sts_body,
644
650
  str(exception),
@@ -667,7 +673,7 @@ class K8s:
667
673
  )
668
674
  except ApiException as exception:
669
675
  logger.error(
670
- "Failed to scale Stateful Set -> %s to -> %s replicas; error -> %s",
676
+ "Failed to scale Stateful Set -> '%s' to -> %s replicas; error -> %s",
671
677
  sts_name,
672
678
  scale,
673
679
  str(exception),
@@ -695,7 +701,9 @@ class K8s:
695
701
  )
696
702
  except ApiException as exception:
697
703
  logger.error(
698
- "Failed to get Service -> %s; error -> %s", service_name, str(exception)
704
+ "Failed to get Service -> '%s'; error -> %s",
705
+ service_name,
706
+ str(exception),
699
707
  )
700
708
  return None
701
709
 
@@ -731,7 +739,7 @@ class K8s:
731
739
  )
732
740
  except ApiException as exception:
733
741
  logger.error(
734
- "Failed to list Services with field_selector -> %s and label_selector -> %s; error -> %s",
742
+ "Failed to list Services with field_selector -> '%s' and label_selector -> '%s'; error -> %s",
735
743
  field_selector,
736
744
  label_selector,
737
745
  str(exception),
@@ -762,7 +770,7 @@ class K8s:
762
770
  )
763
771
  except ApiException as exception:
764
772
  logger.error(
765
- "Failed to patch Service -> %s with -> %s; error -> %s",
773
+ "Failed to patch Service -> '%s' with -> %s; error -> %s",
766
774
  service_name,
767
775
  service_body,
768
776
  str(exception),
@@ -925,4 +933,110 @@ class K8s:
925
933
 
926
934
  return self.patch_ingress(ingress_name, body)
927
935
 
928
- # end method definition
936
+ def verify_pod_status(
937
+ self,
938
+ pod_name: str,
939
+ timeout: int = 1200,
940
+ total_containers: int = 1,
941
+ ready_containers: int = 1,
942
+ retry_interval: int = 30,
943
+ ) -> bool:
944
+ """
945
+ Verifies if a pod is in a 'Ready' state by checking the status of its containers.
946
+
947
+ This function waits for a Kubernetes pod to reach the 'Ready' state, where a specified number
948
+ of containers are ready. It checks the pod status at regular intervals and reports the status
949
+ using logs. If the pod does not reach the 'Ready' state within the specified timeout,
950
+ it returns `False`.
951
+
952
+ Args:
953
+ pod_name (str): The name of the pod to check the status for.
954
+ timeout (int, optional): The maximum time (in seconds) to wait for the pod to become ready. Defaults to 1200.
955
+ total_containers (int, optional): The total number of containers expected to be running in the pod. Defaults to 1.
956
+ ready_containers (int, optional): The minimum number of containers that need to be in a ready state. Defaults to 1.
957
+ retry_interval (int, optional): Time interval (in seconds) between each retry to check pod readiness. Defaults to 30.
958
+
959
+ Returns:
960
+ bool: Returns `True` if the pod reaches the 'Ready' state with the specified number of containers ready
961
+ within the timeout. Otherwise, returns `False`.
962
+ """
963
+
964
+ def wait_for_pod_ready(pod_name: str, timeout: int) -> bool:
965
+ """
966
+ Waits until the pod is in the 'Ready' state with the specified number of containers ready.
967
+
968
+ This internal function repeatedly checks the readiness of the pod, logging the
969
+ status of the containers. If the pod does not exist, it retries after waiting
970
+ and logs detailed information at each step.
971
+
972
+ Args:
973
+ pod_name (str): The name of the pod to check the status for.
974
+ timeout (int): The maximum time (in seconds) to wait for the pod to become ready.
975
+
976
+ Returns:
977
+ bool: Returns `True` if the pod is ready with the specified number of containers in a 'Ready' state.
978
+ Otherwise, returns `False`.
979
+ """
980
+ elapsed_time = 0 # Initialize elapsed time
981
+
982
+ while elapsed_time < timeout:
983
+ pod = self.get_pod(pod_name)
984
+
985
+ if not pod:
986
+ logger.error(
987
+ "Pod -> %s does not exist, waiting 300 seconds to retry.",
988
+ pod_name,
989
+ )
990
+ time.sleep(300)
991
+ pod = self.get_pod(pod_name)
992
+
993
+ if not pod:
994
+ logger.error(
995
+ "Pod -> %s still does not exist after retry!", pod_name
996
+ )
997
+ return False
998
+
999
+ # Get the ready status of containers
1000
+ container_statuses = pod.status.container_statuses
1001
+ if container_statuses and all(
1002
+ container.ready for container in container_statuses
1003
+ ):
1004
+ current_ready_containers = sum(
1005
+ 1 for c in container_statuses if c.ready
1006
+ )
1007
+ total_containers_in_pod = len(container_statuses)
1008
+
1009
+ if (
1010
+ current_ready_containers >= ready_containers
1011
+ and total_containers_in_pod == total_containers
1012
+ ):
1013
+ logger.info(
1014
+ "Pod -> %s is ready with %d/%d containers.",
1015
+ pod_name,
1016
+ current_ready_containers,
1017
+ total_containers_in_pod,
1018
+ )
1019
+ return True
1020
+ else:
1021
+ logger.debug(
1022
+ "Pod -> %s is not yet ready (%d/%d).",
1023
+ pod_name,
1024
+ current_ready_containers,
1025
+ total_containers_in_pod,
1026
+ )
1027
+ else:
1028
+ logger.debug("Pod -> %s is not yet ready.", pod_name)
1029
+
1030
+ logger.info(
1031
+ f"Waiting {retry_interval} seconds before next pod status check."
1032
+ )
1033
+ time.sleep(
1034
+ retry_interval
1035
+ ) # Sleep for the retry interval before checking again
1036
+ elapsed_time += retry_interval
1037
+
1038
+ logger.error("Pod -> %s is not ready after %d seconds.", pod_name, timeout)
1039
+ return False
1040
+
1041
+ # Wait until the pod is ready
1042
+ return wait_for_pod_ready(pod_name, timeout)