mas-cli 10.1.1__py3-none-any.whl → 10.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mas-cli might be problematic. Click here for more details.
- mas/cli/__init__.py +1 -1
- mas/cli/cli.py +8 -4
- mas/cli/displayMixins.py +9 -0
- mas/cli/install/app.py +26 -15
- mas/cli/install/argParser.py +95 -89
- mas/cli/install/settings/additionalConfigs.py +65 -3
- mas/cli/install/summarizer.py +7 -0
- mas/cli/templates/ibm-mas-tekton.yaml +105 -105
- mas/cli/update/__init__.py +11 -0
- mas/cli/update/app.py +612 -0
- mas/cli/update/argParser.py +120 -0
- {mas_cli-10.1.1.data → mas_cli-10.2.0.data}/scripts/mas-cli +9 -3
- {mas_cli-10.1.1.dist-info → mas_cli-10.2.0.dist-info}/METADATA +1 -1
- {mas_cli-10.1.1.dist-info → mas_cli-10.2.0.dist-info}/RECORD +16 -13
- {mas_cli-10.1.1.dist-info → mas_cli-10.2.0.dist-info}/WHEEL +0 -0
- {mas_cli-10.1.1.dist-info → mas_cli-10.2.0.dist-info}/top_level.txt +0 -0
mas/cli/update/app.py
ADDED
|
@@ -0,0 +1,612 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# *****************************************************************************
|
|
3
|
+
# Copyright (c) 2024 IBM Corporation and other Contributors.
|
|
4
|
+
#
|
|
5
|
+
# All rights reserved. This program and the accompanying materials
|
|
6
|
+
# are made available under the terms of the Eclipse Public License v1.0
|
|
7
|
+
# which accompanies this distribution, and is available at
|
|
8
|
+
# http://www.eclipse.org/legal/epl-v10.html
|
|
9
|
+
#
|
|
10
|
+
# *****************************************************************************
|
|
11
|
+
|
|
12
|
+
import re
|
|
13
|
+
import logging
|
|
14
|
+
import logging.handlers
|
|
15
|
+
from halo import Halo
|
|
16
|
+
from prompt_toolkit import print_formatted_text, HTML
|
|
17
|
+
from prompt_toolkit.completion import WordCompleter
|
|
18
|
+
|
|
19
|
+
from openshift.dynamic.exceptions import NotFoundError, ResourceNotFoundError
|
|
20
|
+
|
|
21
|
+
from ..cli import BaseApp
|
|
22
|
+
from ..validators import StorageClassValidator
|
|
23
|
+
from .argParser import updateArgParser
|
|
24
|
+
|
|
25
|
+
from mas.devops.ocp import createNamespace, getStorageClasses, getConsoleURL
|
|
26
|
+
from mas.devops.mas import listMasInstances
|
|
27
|
+
from mas.devops.tekton import preparePipelinesNamespace, installOpenShiftPipelines, updateTektonDefinitions, launchUpdatePipeline
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
class UpdateApp(BaseApp):
|
|
33
|
+
|
|
34
|
+
def update(self, argv):
|
|
35
|
+
"""
|
|
36
|
+
Uninstall MAS instance
|
|
37
|
+
"""
|
|
38
|
+
self.args = updateArgParser.parse_args(args=argv)
|
|
39
|
+
self.noConfirm = self.args.no_confirm
|
|
40
|
+
|
|
41
|
+
if self.args.mas_catalog_version:
|
|
42
|
+
# Non-interactive mode
|
|
43
|
+
logger.debug("Maximo Operator Catalog version is set, so we assume already connected to the desired OCP")
|
|
44
|
+
requiredParams = ["mas_catalog_version"]
|
|
45
|
+
optionalParams = [
|
|
46
|
+
"db2_namespace",
|
|
47
|
+
"mongodb_namespace",
|
|
48
|
+
"mongodb_v5_upgrade",
|
|
49
|
+
"mongodb_v6_upgrade",
|
|
50
|
+
"kafka_namespace",
|
|
51
|
+
"kafka_provider",
|
|
52
|
+
"dro_migration",
|
|
53
|
+
"dro_storage_class",
|
|
54
|
+
"dro_namespace"
|
|
55
|
+
]
|
|
56
|
+
for key, value in vars(self.args).items():
|
|
57
|
+
# These fields we just pass straight through to the parameters and fail if they are not set
|
|
58
|
+
if key in requiredParams:
|
|
59
|
+
if value is None:
|
|
60
|
+
self.fatalError(f"{key} must be set")
|
|
61
|
+
self.setParam(key, value)
|
|
62
|
+
|
|
63
|
+
# These fields we just pass straight through to the parameters
|
|
64
|
+
elif key in optionalParams:
|
|
65
|
+
if value is not None:
|
|
66
|
+
self.setParam(key, value)
|
|
67
|
+
|
|
68
|
+
# Arguments that we don't need to do anything with
|
|
69
|
+
elif key in ["skip_pre_check", "no_confirm", "help"]:
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
# Fail if there's any arguments we don't know how to handle
|
|
73
|
+
else:
|
|
74
|
+
print(f"Unknown option: {key} {value}")
|
|
75
|
+
self.fatalError(f"Unknown option: {key} {value}")
|
|
76
|
+
else:
|
|
77
|
+
# Interactive mode
|
|
78
|
+
self.printH1("Set Target OpenShift Cluster")
|
|
79
|
+
# Connect to the target cluster
|
|
80
|
+
self.connect()
|
|
81
|
+
|
|
82
|
+
if self.dynamicClient is None:
|
|
83
|
+
self.fatalError("The Kubernetes dynamic Client is not available. See log file for details")
|
|
84
|
+
|
|
85
|
+
self.reviewCurrentCatalog()
|
|
86
|
+
self.reviewMASInstance()
|
|
87
|
+
|
|
88
|
+
if self.args.mas_catalog_version is None:
|
|
89
|
+
# Interactive mode
|
|
90
|
+
self.chooseCatalog()
|
|
91
|
+
|
|
92
|
+
# Validations
|
|
93
|
+
self.validateCatalog()
|
|
94
|
+
|
|
95
|
+
self.printH1("Dependency Update Checks")
|
|
96
|
+
with Halo(text='Checking for IBM Watson Discovery', spinner=self.spinner) as h:
|
|
97
|
+
if self.isWatsonDiscoveryInstalled():
|
|
98
|
+
h.stop_and_persist(symbol=self.failureIcon, text=f"IBM Watson Discovery is installed")
|
|
99
|
+
self.fatalError("Watson Discovery is currently installed in the instance of Cloud Pak for Data that is managed by the MAS CLI (in the ibm-cpd namespace), this is no longer supported and the update can not proceed as a result. Please contact IBM support for assistance")
|
|
100
|
+
else:
|
|
101
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"IBM Watson Discovery is not installed")
|
|
102
|
+
|
|
103
|
+
with Halo(text='Checking for IBM Certificate-Manager', spinner=self.spinner) as h:
|
|
104
|
+
if self.isIBMCertManagerInstalled():
|
|
105
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"IBM Certificate-Manager will be replaced by Red Hat Certificate-Manager")
|
|
106
|
+
self.setParam("cert_manager_action", "install")
|
|
107
|
+
self.setParam("cert_manager_provider", "redhat")
|
|
108
|
+
self.printHighlight([
|
|
109
|
+
"<u>Migration Notice</u>",
|
|
110
|
+
"IBM Certificate-Manager is currently running in the ${CERT_MANAGER_NAMESPACE} namespace",
|
|
111
|
+
"This will be uninstalled and replaced by Red Hat Certificate-Manager as part of this update",
|
|
112
|
+
""
|
|
113
|
+
])
|
|
114
|
+
else:
|
|
115
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"IBM Certificate-Manager is not installed")
|
|
116
|
+
|
|
117
|
+
self.detectUDS()
|
|
118
|
+
self.detectGrafana4()
|
|
119
|
+
self.detectMongoDb()
|
|
120
|
+
self.detectDb2uOrKafka("db2")
|
|
121
|
+
self.detectDb2uOrKafka("kafka")
|
|
122
|
+
self.detectCP4D()
|
|
123
|
+
|
|
124
|
+
print()
|
|
125
|
+
|
|
126
|
+
self.printH1("Review Settings")
|
|
127
|
+
self.printDescription([
|
|
128
|
+
"Connected to:",
|
|
129
|
+
f" - <u>{getConsoleURL(self.dynamicClient)}</u>"
|
|
130
|
+
])
|
|
131
|
+
|
|
132
|
+
self.printH2("IBM Maximo Operator Catalog")
|
|
133
|
+
self.printSummary("Installed Catalog", self.installedCatalogId)
|
|
134
|
+
self.printSummary("Updated Catalog", self.getParam("mas_catalog_version"))
|
|
135
|
+
|
|
136
|
+
self.printH2("Supported Dependency Updates")
|
|
137
|
+
if self.getParam("db2_namespace") != "":
|
|
138
|
+
self.printSummary("IBM Db2", f"All Db2uCluster instances in {self.getParam('db2_namespace')}")
|
|
139
|
+
else:
|
|
140
|
+
self.printSummary("IBM Db2", "No action required")
|
|
141
|
+
|
|
142
|
+
if self.getParam("mongodb_namespace") != "":
|
|
143
|
+
self.printSummary("MongoDb CE", f"All MongoDbCommunity instances in {self.getParam('mongodb_namespace')}")
|
|
144
|
+
else:
|
|
145
|
+
self.printSummary("MongoDb CE", "No action required")
|
|
146
|
+
|
|
147
|
+
if self.getParam("kafka_namespace") != "":
|
|
148
|
+
self.printSummary("Apache Kafka", f"All Kafka instances in {self.getParam('kafka_namespace')}")
|
|
149
|
+
else:
|
|
150
|
+
self.printSummary("Apache Kafka", "No action required")
|
|
151
|
+
|
|
152
|
+
if self.getParam("cp4d_update") != "":
|
|
153
|
+
self.printSummary("IBM Cloud Pak for Data", f"Platform and services in ibm-cpd")
|
|
154
|
+
else:
|
|
155
|
+
self.printSummary("IBM Cloud Pak for Data", "No action required")
|
|
156
|
+
|
|
157
|
+
self.printH2("Required Migrations")
|
|
158
|
+
self.printSummary("IBM Certificate-Manager", "Migrate to Red Hat Certificate-Manager" if self.getParam("cert_manager_action") != "" else "No action required")
|
|
159
|
+
self.printSummary("IBM User Data Services", "Migrate to IBM Data Reporter Operator" if self.getParam("dro_migration") != "" else "No action required")
|
|
160
|
+
self.printSummary("Grafana v4 Operator", "Migrate to Grafana v5 Operator" if self.getParam("grafana_v5_upgrade") != "" else "No action required")
|
|
161
|
+
|
|
162
|
+
if not self.noConfirm:
|
|
163
|
+
print()
|
|
164
|
+
self.printDescription([
|
|
165
|
+
"Please carefully review your choices above, correcting mistakes now is much easier than after the update has begun"
|
|
166
|
+
])
|
|
167
|
+
continueWithUpdate = self.yesOrNo("Proceed with these settings")
|
|
168
|
+
|
|
169
|
+
# Prepare the namespace and launch the installation pipeline
|
|
170
|
+
if self.noConfirm or continueWithUpdate:
|
|
171
|
+
self.printH1("Launch Update")
|
|
172
|
+
pipelinesNamespace = f"mas-pipelines"
|
|
173
|
+
|
|
174
|
+
with Halo(text='Validating OpenShift Pipelines installation', spinner=self.spinner) as h:
|
|
175
|
+
installOpenShiftPipelines(self.dynamicClient)
|
|
176
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"OpenShift Pipelines Operator is installed and ready to use")
|
|
177
|
+
|
|
178
|
+
with Halo(text=f'Preparing namespace ({pipelinesNamespace})', spinner=self.spinner) as h:
|
|
179
|
+
createNamespace(self.dynamicClient, pipelinesNamespace)
|
|
180
|
+
preparePipelinesNamespace(dynClient=self.dynamicClient)
|
|
181
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"Namespace is ready ({pipelinesNamespace})")
|
|
182
|
+
|
|
183
|
+
with Halo(text=f'Installing latest Tekton definitions (v{self.version})', spinner=self.spinner) as h:
|
|
184
|
+
updateTektonDefinitions(pipelinesNamespace, self.tektonDefsPath)
|
|
185
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"Latest Tekton definitions are installed (v{self.version})")
|
|
186
|
+
|
|
187
|
+
with Halo(text=f"Submitting PipelineRun for MAS update", spinner=self.spinner) as h:
|
|
188
|
+
pipelineURL = launchUpdatePipeline(dynClient=self.dynamicClient, params=self.params)
|
|
189
|
+
if pipelineURL is not None:
|
|
190
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"PipelineRun for MAS update submitted")
|
|
191
|
+
print_formatted_text(HTML(f"\nView progress:\n <Cyan><u>{pipelineURL}</u></Cyan>\n"))
|
|
192
|
+
else:
|
|
193
|
+
h.stop_and_persist(symbol=self.failureIcon, text=f"Failed to submit PipelineRun for MAS update, see log file for details")
|
|
194
|
+
print()
|
|
195
|
+
|
|
196
|
+
def reviewCurrentCatalog(self) -> None:
|
|
197
|
+
catalogsAPI = self.dynamicClient.resources.get(api_version="operators.coreos.com/v1alpha1", kind="CatalogSource")
|
|
198
|
+
try:
|
|
199
|
+
catalog = catalogsAPI.get(name="ibm-operator-catalog", namespace="openshift-marketplace")
|
|
200
|
+
catalogDisplayName = catalog.spec.displayName
|
|
201
|
+
catalogImage = catalog.spec.image
|
|
202
|
+
|
|
203
|
+
m = re.match(r".+(?P<catalogId>v[89]-(?P<catalogVersion>[0-9]+)-amd64)", catalogDisplayName)
|
|
204
|
+
if m:
|
|
205
|
+
# catalogId = v8-yymmdd-amd64
|
|
206
|
+
# catalogVersion = yymmdd
|
|
207
|
+
self.installedCatalogId = m.group("catalogId")
|
|
208
|
+
elif re.match(r".+v8-amd64", catalogDisplayName):
|
|
209
|
+
self.installedCatalogId = "v8-amd64"
|
|
210
|
+
else:
|
|
211
|
+
self.installedCatalogId = None
|
|
212
|
+
self.printWarning(f"Unable to determine identity & version of currently installed ibm-maximo-operator-catalog")
|
|
213
|
+
|
|
214
|
+
self.printH1("Review Installed Catalog")
|
|
215
|
+
self.printDescription([
|
|
216
|
+
f"The currently installed Maximo Operator Catalog is <u>{catalogDisplayName}</u>",
|
|
217
|
+
f" <u>{catalogImage}</u>"
|
|
218
|
+
])
|
|
219
|
+
except NotFoundError as e:
|
|
220
|
+
self.fatalError("Unable to locate existing install of the IBM Maximo Operator Catalog", e)
|
|
221
|
+
|
|
222
|
+
def reviewMASInstance(self) -> None:
|
|
223
|
+
self.printH1("Review MAS Instances")
|
|
224
|
+
self.printDescription(["The following MAS intances are installed on the target cluster and will be affected by the catalog update:"])
|
|
225
|
+
try:
|
|
226
|
+
suites = listMasInstances(self.dynamicClient)
|
|
227
|
+
for suite in suites:
|
|
228
|
+
self.printDescription([f"- <u>{suite['metadata']['name']}</u> v{suite['status']['versions']['reconciled']}"])
|
|
229
|
+
except ResourceNotFoundError as e:
|
|
230
|
+
self.fatalError("No MAS instances were detected on the cluster (Suite.core.mas.ibm.com/v1 API is not available). See log file for details")
|
|
231
|
+
|
|
232
|
+
def chooseCatalog(self) -> None:
|
|
233
|
+
self.printH1("Select IBM Maximo Operator Catalog Version")
|
|
234
|
+
self.printDescription([
|
|
235
|
+
"Select MAS Catalog",
|
|
236
|
+
" 1) June 25 2024 Update (MAS 9.0.0, 8.11.12, & 8.10.15)",
|
|
237
|
+
" 2) May 28 2024 Update (MAS 8.11.11 & 8.10.14)"
|
|
238
|
+
])
|
|
239
|
+
|
|
240
|
+
catalogOptions = [
|
|
241
|
+
"v9-240625-amd64", "v8-240528-amd64"
|
|
242
|
+
]
|
|
243
|
+
self.promptForListSelect("Select catalog version", catalogOptions, "mas_catalog_version", default=1)
|
|
244
|
+
|
|
245
|
+
def validateCatalog(self) -> None:
|
|
246
|
+
if self.installedCatalogId is not None and self.installedCatalogId > self.getParam("mas_catalog_version"):
|
|
247
|
+
self.fatalError(f"Selected catalog is older than the currently installed catalog. Unable to update catalog from {self.installedCatalogId} to {self.getParam('mas_catalog_version')}")
|
|
248
|
+
|
|
249
|
+
def isWatsonDiscoveryInstalled(self) -> bool:
|
|
250
|
+
try:
|
|
251
|
+
wdAPI = self.dynamicClient.resources.get(api_version="discovery.watson.ibm.com/v1", kind="WatsonDiscovery")
|
|
252
|
+
wds = wdAPI.get(namespace="ibm-cpd").to_dict()['items']
|
|
253
|
+
if len(wds) > 0:
|
|
254
|
+
return True
|
|
255
|
+
return False
|
|
256
|
+
except (ResourceNotFoundError, NotFoundError) as e:
|
|
257
|
+
# Watson Discovery has never been installed on this cluster
|
|
258
|
+
return False
|
|
259
|
+
|
|
260
|
+
def isIBMCertManagerInstalled(self) -> bool:
|
|
261
|
+
"""
|
|
262
|
+
Check whether the deprecated IBM Certificate-Manager is installed, if it is then we will
|
|
263
|
+
automatically migrate to Red Hat Certificate-Manager
|
|
264
|
+
"""
|
|
265
|
+
|
|
266
|
+
try:
|
|
267
|
+
# Check if 'ibm-common-services' namespace exist, this will throw NotFoundError exception when not found
|
|
268
|
+
namespaceAPI = self.dynamicClient.resources.get(api_version="v1", kind="Namespace")
|
|
269
|
+
namespaceAPI.get(name="ibm-common-services")
|
|
270
|
+
|
|
271
|
+
podsAPI = self.dynamicClient.resources.get(api_version="v1", kind="Pod")
|
|
272
|
+
podsList = podsAPI.get(namespace="ibm-common-services")
|
|
273
|
+
for pod in podsList.items:
|
|
274
|
+
if pod is not None and "cert-manager-cainjector" in pod.metadata.name:
|
|
275
|
+
logger.debug("Found IBM Certificate-Manager in ibm-common-services namespace")
|
|
276
|
+
return True
|
|
277
|
+
logger.debug("There is an ibm-common-services namespace, but we did not find the IBM Certificate-Manager installation")
|
|
278
|
+
return False
|
|
279
|
+
except NotFoundError:
|
|
280
|
+
logger.debug("There is no ibm-common-services namespace")
|
|
281
|
+
return False
|
|
282
|
+
|
|
283
|
+
def detectGrafana4(self) -> bool:
|
|
284
|
+
with Halo(text='Checking for Grafana Operator v4', spinner=self.spinner) as h:
|
|
285
|
+
try:
|
|
286
|
+
grafanaAPI = self.dynamicClient.resources.get(api_version="integreatly.org/v1alpha1", kind="Grafana")
|
|
287
|
+
grafanaVersion4s = grafanaAPI.get().to_dict()["items"]
|
|
288
|
+
|
|
289
|
+
# For testing, comment out the lines above and set grafanaVersion4s to a simple list
|
|
290
|
+
# grafanaVersion4s = ["hello"]
|
|
291
|
+
if len(grafanaVersion4s) > 0:
|
|
292
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"Grafana Operator v4 instance will be updated to v5")
|
|
293
|
+
self.printDescription([
|
|
294
|
+
"<u>Dependency Upgrade Notice</u>",
|
|
295
|
+
"Grafana Operator v4 is currently installed and will be updated to v5",
|
|
296
|
+
"- Grafana v5 instance will have a new URL and admin password",
|
|
297
|
+
"- User accounts set up in the v4 instance will not be migrated"
|
|
298
|
+
])
|
|
299
|
+
self.setParam("grafana_v5_upgrade", "true")
|
|
300
|
+
else:
|
|
301
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"Grafana Operator v4 is not installed")
|
|
302
|
+
return
|
|
303
|
+
except (ResourceNotFoundError, NotFoundError) as e:
|
|
304
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"Grafana Operator v4 is not installed")
|
|
305
|
+
|
|
306
|
+
def detectMongoDb(self) -> None:
|
|
307
|
+
with Halo(text='Checking for MongoDb CE', spinner=self.spinner) as h:
|
|
308
|
+
# TODO: Replace this with a lookup to just use whatever is already set up
|
|
309
|
+
# because we should not be changing the scale of the mongodb cluster during
|
|
310
|
+
# and update
|
|
311
|
+
if self.isSNO():
|
|
312
|
+
self.setParam("mongodb_replicas", "1")
|
|
313
|
+
else:
|
|
314
|
+
self.setParam("mongodb_replicas", "3")
|
|
315
|
+
|
|
316
|
+
# Determine the namespace
|
|
317
|
+
try:
|
|
318
|
+
mongoDbAPI = self.dynamicClient.resources.get(api_version="mongodbcommunity.mongodb.com/v1", kind="MongoDBCommunity")
|
|
319
|
+
mongoClusters = mongoDbAPI.get().to_dict()["items"]
|
|
320
|
+
|
|
321
|
+
if len(mongoClusters) > 0:
|
|
322
|
+
mongoNamespace = mongoClusters[0]["metadata"]["namespace"]
|
|
323
|
+
currentMongoVersion = mongoClusters[0]["status"]["version"]
|
|
324
|
+
|
|
325
|
+
self.setParam("mongodb_namespace", mongoNamespace)
|
|
326
|
+
|
|
327
|
+
# Important:
|
|
328
|
+
# This CLI can run independent of the ibm.mas_devops collection, so we cannot reference
|
|
329
|
+
# the case bundles in there anymore
|
|
330
|
+
# Longer term we will centralise this information inside the mas-devops python collection,
|
|
331
|
+
# where it can be made available to both the ansible collection and this python package.
|
|
332
|
+
mongoVersions = {
|
|
333
|
+
"v8-240528-amd64": "5.0.23",
|
|
334
|
+
"v9-240625-amd64": "6.0.12"
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
targetMongoVersion = mongoVersions[self.getParam('mas_catalog_version')]
|
|
338
|
+
self.setParam("mongodb_version", targetMongoVersion)
|
|
339
|
+
|
|
340
|
+
targetMongoVersionMajor = targetMongoVersion.split(".")[0]
|
|
341
|
+
currentMongoVersionMajor = currentMongoVersion.split(".")[0]
|
|
342
|
+
|
|
343
|
+
if targetMongoVersionMajor > currentMongoVersionMajor:
|
|
344
|
+
# Let users know that Mongo will be upgraded if existing MongoDb major.minor version
|
|
345
|
+
# is lower than the target major version
|
|
346
|
+
# We don't show this message for normal updates, e.g. 5.0.1 to 5.0.2
|
|
347
|
+
if self.noConfirm and self.getParam(f"mongodb_v{targetMongoVersionMajor}_upgrade") != "true":
|
|
348
|
+
# The user has chosen not to provide confirmation but has not provided the flag to pre-approve the mongo major version update
|
|
349
|
+
h.stop_and_persist(symbol=self.failureIcon, text=f"MongoDb CE {currentMongoVersion} needs to be updated to {targetMongoVersion}")
|
|
350
|
+
self.showMongoDependencyUpdateNotice(currentMongoVersion, targetMongoVersion)
|
|
351
|
+
self.fatalError(f"By choosing {self.getParam('mas_catalog_version')} you must confirm MongoDb update to version {targetMongoVersionMajor} using '--mongodb-v{targetMongoVersionMajor}-upgrade' when using '--no-confirm'")
|
|
352
|
+
elif self.getParam(f"mongodb_v{targetMongoVersionMajor}_upgrade") != "true":
|
|
353
|
+
# The user has not pre-approved the major version update
|
|
354
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"MongoDb CE {currentMongoVersion} needs to be updated to {targetMongoVersion}")
|
|
355
|
+
self.showMongoDependencyUpdateNotice(currentMongoVersion, targetMongoVersion)
|
|
356
|
+
if not self.yesOrNo(f"Confirm update from MongoDb {currentMongoVersion} to {targetMongoVersion}", f"mongodb_v{targetMongoVersionMajor}_upgrade"):
|
|
357
|
+
# If the user did not approve the update, abort
|
|
358
|
+
exit(1)
|
|
359
|
+
print()
|
|
360
|
+
else:
|
|
361
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"MongoDb CE will be updated from {currentMongoVersion} to {targetMongoVersion}")
|
|
362
|
+
self.showMongoDependencyUpdateNotice(currentMongoVersion, targetMongoVersion)
|
|
363
|
+
elif targetMongoVersion < currentMongoVersion:
|
|
364
|
+
h.stop_and_persist(symbol=self.failureIcon, text=f"MongoDb CE {currentMongoVersion} cannot be downgraded to {targetMongoVersion}")
|
|
365
|
+
self.showMongoDependencyUpdateNotice(currentMongoVersion, targetMongoVersion)
|
|
366
|
+
self.fatalError(f"Existing MongoDB Community Edition installation at version {currentMongoVersion} cannot be downgraded to version {targetMongoVersion}")
|
|
367
|
+
else:
|
|
368
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"MongoDb CE is aleady installed at version {targetMongoVersion}")
|
|
369
|
+
else:
|
|
370
|
+
# There's no MongoDb instance installed in the cluster, so nothing to do
|
|
371
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"No MongoDb CE instances found")
|
|
372
|
+
except (ResourceNotFoundError, NotFoundError) as e:
|
|
373
|
+
# There's no MongoDb instance installed in the cluster, so nothing to do
|
|
374
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"MongoDb CE is not installed")
|
|
375
|
+
|
|
376
|
+
def showMongoDependencyUpdateNotice(self, currentMongoVersion, targetMongoVersion) -> None:
|
|
377
|
+
self.printHighlight([
|
|
378
|
+
"",
|
|
379
|
+
"<u>Dependency Update Notice</u>",
|
|
380
|
+
f"MongoDB Community Edition is currently running version {currentMongoVersion} and will be updated to {targetMongoVersion}",
|
|
381
|
+
"It is recommended that you backup your MongoDB instance before proceeding:",
|
|
382
|
+
" <u>https://www.ibm.com/docs/en/mas-cd/continuous-delivery?topic=suite-backing-up-mongodb-maximo-application</u>",
|
|
383
|
+
""
|
|
384
|
+
])
|
|
385
|
+
|
|
386
|
+
def showUDSUpdateNotice(self) -> None:
|
|
387
|
+
self.printHighlight([
|
|
388
|
+
"",
|
|
389
|
+
"<u>Dependency Update Notice</u>",
|
|
390
|
+
"IBM User Data Services (UDS) is currently installed and will be replaced by IBM Data Reporter Operator (DRO)",
|
|
391
|
+
"UDS will be uninstalled and <u>all MAS instances</u> will be re-configured to use DRO",
|
|
392
|
+
""
|
|
393
|
+
])
|
|
394
|
+
|
|
395
|
+
def detectUDS(self) -> None:
|
|
396
|
+
with Halo(text='Checking for IBM User Data Services', spinner=self.spinner) as h:
|
|
397
|
+
try:
|
|
398
|
+
analyticsProxyAPI = self.dynamicClient.resources.get(api_version="uds.ibm.com/v1", kind="AnalyticsProxy")
|
|
399
|
+
analyticsProxies = analyticsProxyAPI.get(namespace="ibm-common-services").to_dict()['items']
|
|
400
|
+
|
|
401
|
+
# Useful for testing: comment out the two lines above and set analyticsProxies to a
|
|
402
|
+
# simple list to trigger to UDS migration logic.
|
|
403
|
+
# analyticsProxies = ["foo"]
|
|
404
|
+
if len(analyticsProxies) == 0:
|
|
405
|
+
logger.debug("UDS is not currently installed on this cluster")
|
|
406
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"IBM User Data Services is not installed")
|
|
407
|
+
else:
|
|
408
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"IBM User Data Services must be migrated to IBM Data Reporter Operator")
|
|
409
|
+
|
|
410
|
+
if self.noConfirm and self.getParam("dro_migration") != "true":
|
|
411
|
+
# The user has chosen not to provide confirmation but has not provided the flag to pre-approve the migration
|
|
412
|
+
h.stop_and_persist(symbol=self.failureIcon, text=f"IBM User Data Services needs to be migrated to IBM Data Reporter Operator")
|
|
413
|
+
self.showUDSUpdateNotice()
|
|
414
|
+
self.fatalError(f"By choosing {self.getParam('mas_catalog_version')} you must confirm the migration to DRO using '--dro-migration' when using '--no-confirm'")
|
|
415
|
+
elif self.noConfirm and self.getParam("dro_storage_class") is None:
|
|
416
|
+
# The user has not provided the storage class to use for DRO, but has disabled confirmations/interactive prompts
|
|
417
|
+
h.stop_and_persist(symbol=self.failureIcon, text=f"IBM User Data Services needs to be migrated to IBM Data Reporter Operator")
|
|
418
|
+
self.showUDSUpdateNotice()
|
|
419
|
+
self.fatalError(f"By choosing {self.getParam('mas_catalog_version')} you must provide the storage class to use for the migration to DRO using '--dro-storage-class' when using '--no-confirm'")
|
|
420
|
+
else:
|
|
421
|
+
h.stop_and_persist(symbol=self.successIcon, text="IBM User Data Services needs to be migrated to IBM Data Reporter Operator")
|
|
422
|
+
self.showUDSUpdateNotice()
|
|
423
|
+
if not self.yesOrNo("Confirm migration from UDS to DRO", "dro_migration"):
|
|
424
|
+
# If the user did not approve the update, abort
|
|
425
|
+
exit(1)
|
|
426
|
+
self.printDescription([
|
|
427
|
+
"",
|
|
428
|
+
"Select the storage class for DRO to use from the list below:"
|
|
429
|
+
])
|
|
430
|
+
for storageClass in getStorageClasses(self.dynamicClient):
|
|
431
|
+
print_formatted_text(HTML(f"<LightSlateGrey> - {storageClass.metadata.name}</LightSlateGrey>"))
|
|
432
|
+
self.promptForString("DRO storage class", "dro_storage_class", validator=StorageClassValidator())
|
|
433
|
+
|
|
434
|
+
except (ResourceNotFoundError, NotFoundError) as e:
|
|
435
|
+
# UDS has never been installed on this cluster
|
|
436
|
+
logger.debug("UDS has not been installed on this cluster before")
|
|
437
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"IBM User Data Services is not installed")
|
|
438
|
+
|
|
439
|
+
def detectCP4D(self) -> bool:
|
|
440
|
+
# Important:
|
|
441
|
+
# This CLI can run independent of the ibm.mas_devops collection, so we cannot reference
|
|
442
|
+
# the case bundles in there anymore
|
|
443
|
+
# Longer term we will centralise this information inside the mas-devops python collection,
|
|
444
|
+
# where it can be made available to both the ansible collection and this python package.
|
|
445
|
+
cp4dVersions = {
|
|
446
|
+
"v8-240528-amd64": "4.6.6",
|
|
447
|
+
"v9-240625-amd64": "4.8.0"
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
with Halo(text='Checking for IBM Cloud Pak for Data', spinner=self.spinner) as h:
|
|
451
|
+
try:
|
|
452
|
+
cpdAPI = self.dynamicClient.resources.get(api_version="cpd.ibm.com/v1", kind="Ibmcpd")
|
|
453
|
+
cpds = cpdAPI.get().to_dict()["items"]
|
|
454
|
+
|
|
455
|
+
# For testing, comment out the lines above and set cpds to a simple list
|
|
456
|
+
# cpds = [{
|
|
457
|
+
# "metadata": {"namespace": "ibm-cpd" },
|
|
458
|
+
# "spec": {
|
|
459
|
+
# "version": "4.6.6",
|
|
460
|
+
# "storageClass": "default",
|
|
461
|
+
# "zenCoreMetadbStorageClass": "default"
|
|
462
|
+
# }
|
|
463
|
+
# }]
|
|
464
|
+
|
|
465
|
+
if len(cpds) > 0:
|
|
466
|
+
cpdInstanceNamespace = cpds[0]["metadata"]["namespace"]
|
|
467
|
+
cpdInstanceVersion = cpds[0]["spec"]["version"]
|
|
468
|
+
cpdTargetVersion = cp4dVersions[self.getParam("mas_catalog_version")]
|
|
469
|
+
|
|
470
|
+
currentCpdVersionMajorMinor = f"{cpdInstanceVersion.split('.')[0]}.{cpdInstanceVersion.split('.')[1]}"
|
|
471
|
+
targetCpdVersionMajorMinor = f"{cpdTargetVersion.split('.')[0]}.{cpdTargetVersion.split('.')[1]}"
|
|
472
|
+
|
|
473
|
+
if cpdInstanceVersion < cpdTargetVersion:
|
|
474
|
+
# We have to update CP4D
|
|
475
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"IBM Cloud Pak for Data {cpdInstanceVersion} needs to be updated to {cpdTargetVersion}")
|
|
476
|
+
|
|
477
|
+
if currentCpdVersionMajorMinor < targetCpdVersionMajorMinor:
|
|
478
|
+
# We only show the "backup first" notice for minor CP4D updates
|
|
479
|
+
self.printHighlight([
|
|
480
|
+
""
|
|
481
|
+
"<u>Dependency Update Notice</u>",
|
|
482
|
+
f"Cloud Pak For Data is currently running version {cpdInstanceVersion} and will be updated to version {cpdTargetVersion}",
|
|
483
|
+
"It is recommended that you backup your Cloud Pak for Data instance before proceeding:",
|
|
484
|
+
" <u>https://www.ibm.com/docs/en/cloud-paks/cp-data/4.8.x?topic=administering-backing-up-restoring-cloud-pak-data</u>"
|
|
485
|
+
])
|
|
486
|
+
|
|
487
|
+
# Lookup the storage classes already used by CP4D
|
|
488
|
+
# Note: this should be done by the Ansible role, but isn't
|
|
489
|
+
if "storageClass" in cpds[0]["spec"]:
|
|
490
|
+
cpdFileStorage = cpds[0]["spec"]["storageClass"]
|
|
491
|
+
elif "fileStorageClass" in cpds[0]["spec"]:
|
|
492
|
+
cpdFileStorage = cpds[0]["spec"]["fileStorageClass"]
|
|
493
|
+
else:
|
|
494
|
+
self.fatalError("Unable to determine the file storage class used in IBM Cloud Pak for Data")
|
|
495
|
+
|
|
496
|
+
if "zenCoreMetadbStorageClass" in cpds[0]["spec"]:
|
|
497
|
+
cpdBlockStorage = cpds[0]["spec"]["zenCoreMetadbStorageClass"]
|
|
498
|
+
elif "blockStorageClass" in cpds[0]["spec"]:
|
|
499
|
+
cpdBlockStorage = cpds[0]["spec"]["blockStorageClass"]
|
|
500
|
+
else:
|
|
501
|
+
self.fatalError("Unable to determine the block storage class used in IBM Cloud Pak for Data")
|
|
502
|
+
|
|
503
|
+
# Set the desired storage classes (the same ones already in use)
|
|
504
|
+
self.setParam("storage_class_rwx", cpdFileStorage)
|
|
505
|
+
self.setParam("storage_class_rwo", cpdBlockStorage)
|
|
506
|
+
|
|
507
|
+
# Set the desired target version
|
|
508
|
+
self.setParam("cpd_product_version", cpdTargetVersion)
|
|
509
|
+
self.setParam("cp4d_update", "true")
|
|
510
|
+
self.setParam("skip_entitlement_key_flag", "true")
|
|
511
|
+
|
|
512
|
+
self.detectCpdService('WS', 'ws.cpd.ibm.com/v1beta1', 'Watson Studio', "cp4d_update_ws")
|
|
513
|
+
self.detectCpdService('WmlBase', 'wml.cpd.ibm.com/v1beta1', 'Watson Machine Learning', "cp4d_update_wml")
|
|
514
|
+
self.detectCpdService('AnalyticsEngine', 'ae.cpd.ibm.com/v1', 'Analytics Engine', "cp4d_update_spark")
|
|
515
|
+
self.detectCpdService('WOService', 'wos.cpd.ibm.com/v1', 'Watson Openscale', "cp4d_update_wos")
|
|
516
|
+
self.detectCpdService('Spss', 'spssmodeler.cpd.ibm.com/v1', 'SPSS Modeler', "cp4d_update_spss")
|
|
517
|
+
self.detectCpdService('CAService', 'ca.cpd.ibm.com/v1', 'Cognos Analytics', "cp4d_update_cognos")
|
|
518
|
+
else:
|
|
519
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"IBM Cloud Pak for Data is already installed at version {cpdTargetVersion}")
|
|
520
|
+
else:
|
|
521
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"No IBM Cloud Pak for Data instance found")
|
|
522
|
+
except (ResourceNotFoundError, NotFoundError) as e:
|
|
523
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"IBM Cloud Pak for Data is not installed")
|
|
524
|
+
|
|
525
|
+
def detectCpdService(self, kind: str, api: str, name: str, param: str) -> None:
|
|
526
|
+
try:
|
|
527
|
+
cpdServiceAPI = self.dynamicClient.resources.get(api_version=api, kind=kind)
|
|
528
|
+
cpdServices = cpdServiceAPI.get().to_dict()["items"]
|
|
529
|
+
|
|
530
|
+
if len(cpdServices) > 0:
|
|
531
|
+
logger.debug(f"{name} is included in CP4D update")
|
|
532
|
+
self.setParam(param, "true")
|
|
533
|
+
else:
|
|
534
|
+
logger.debug(f"{name} is not included in CP4D update")
|
|
535
|
+
self.setParam(param, "false")
|
|
536
|
+
|
|
537
|
+
except (ResourceNotFoundError, NotFoundError) as e:
|
|
538
|
+
# No action required for this service
|
|
539
|
+
logger.debug(f"{name} is not included in CP4D update: {e}")
|
|
540
|
+
self.setParam(param, "false")
|
|
541
|
+
|
|
542
|
+
def detectDb2uOrKafka(self, mode: str) -> bool:
|
|
543
|
+
if mode == "db2":
|
|
544
|
+
haloStartingMessage = "Checking for Db2uCluster instances to update"
|
|
545
|
+
apiVersion = "db2u.databases.ibm.com/v1"
|
|
546
|
+
kind = "Db2uCluster"
|
|
547
|
+
paramName = "db2_namespace"
|
|
548
|
+
elif mode == "kafka":
|
|
549
|
+
haloStartingMessage = "Checking for Kafka instances to update"
|
|
550
|
+
apiVersion = "kafka.strimzi.io/v1beta2"
|
|
551
|
+
kind = "Kafka"
|
|
552
|
+
paramName = "kafka_namespace"
|
|
553
|
+
else:
|
|
554
|
+
self.fatalError("Unexpected error")
|
|
555
|
+
|
|
556
|
+
with Halo(text=haloStartingMessage, spinner=self.spinner) as h:
|
|
557
|
+
try:
|
|
558
|
+
k8sAPI = self.dynamicClient.resources.get(api_version=apiVersion, kind=kind)
|
|
559
|
+
instances = k8sAPI.get().to_dict()["items"]
|
|
560
|
+
|
|
561
|
+
logger.debug(f"Found {len(instances)} {kind} instances on the cluster")
|
|
562
|
+
if len(instances) > 0:
|
|
563
|
+
# If the user provided the namespace using --db2-namespace then we don't have any work to do here
|
|
564
|
+
if self.getParam(paramName) == "":
|
|
565
|
+
namespaces = set()
|
|
566
|
+
for instance in instances:
|
|
567
|
+
namespaces.add(instance["metadata"]["namespace"])
|
|
568
|
+
|
|
569
|
+
if len(namespaces) == 1:
|
|
570
|
+
# If db2u is only in one namespace, we will update that
|
|
571
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"{len(instances)} {kind}s ({apiVersion}) in namespace '{list(namespaces)[0]}' will be updated")
|
|
572
|
+
logger.debug(f"There is only one namespace containing {kind}s so we will target that one: {namespaces}")
|
|
573
|
+
self.setParam(paramName, list(namespaces)[0])
|
|
574
|
+
elif self.noConfirm:
|
|
575
|
+
# If db2u is in multiple namespaces and user has disabled prompts then we must error
|
|
576
|
+
h.stop_and_persist(symbol=self.failureIcon, text=f"{len(instances)} {kind}s ({apiVersion}) were found in multiple namespaces")
|
|
577
|
+
logger.warning(f"There are multiple namespaces containing {kind}s and user has enable --no-confirm without setting --{mode}-namespace: {namespaces.keys()}")
|
|
578
|
+
self.fatalError(f"{kind}s are installed in multiple namespaces. You must instruct which one to update using the '--{mode}-namespace' argument")
|
|
579
|
+
else:
|
|
580
|
+
# Otherwise, provide user the list of namespaces we found and ask them to pick on
|
|
581
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"{len(instances)} {kind}s ({apiVersion}) found in multiple namespaces")
|
|
582
|
+
logger.debug(f"There are multiple namespaces containing {kind}s, user must choose: {namespaces}")
|
|
583
|
+
self.printDescription([
|
|
584
|
+
f"{kind}s were found in multiple namespaces, select the namespace to target from the list below:"
|
|
585
|
+
])
|
|
586
|
+
for ns in sorted(namespaces):
|
|
587
|
+
self.printDescription([f"1. {ns}"])
|
|
588
|
+
self.promptForListSelect("Select namespace", sorted(namespaces), paramName)
|
|
589
|
+
else:
|
|
590
|
+
logger.debug(f"Found no instances of {kind} to update")
|
|
591
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"Found no {kind} ({apiVersion}) instances to update")
|
|
592
|
+
except (ResourceNotFoundError, NotFoundError) as e:
|
|
593
|
+
logger.debug(f"{kind}.{apiVersion} is not available in the cluster")
|
|
594
|
+
h.stop_and_persist(symbol=self.successIcon, text=f"{kind}.{apiVersion} is not available in the cluster")
|
|
595
|
+
|
|
596
|
+
# With Kafka we also have to determine the provider (strimzi or redhat)
|
|
597
|
+
if mode == "kafka" and self.getParam("kafka_namespace") != "" and self.getParam("kafka_provider") == "":
|
|
598
|
+
try:
|
|
599
|
+
subAPI = self.dynamicClient.resources.get(api_version="operators.coreos.com/v1alpha1", kind="Subscription")
|
|
600
|
+
subs = subAPI.get().to_dict()["items"]
|
|
601
|
+
|
|
602
|
+
for sub in subs:
|
|
603
|
+
if sub["spec"]["name"] == "amq-streams":
|
|
604
|
+
self.setParam("kafka_provider", "redhat")
|
|
605
|
+
elif sub["spec"]["name"] == "strimzi-kafka-operator":
|
|
606
|
+
self.setParam("kafka_provider", "strimzi")
|
|
607
|
+
except (ResourceNotFoundError, NotFoundError) as e:
|
|
608
|
+
pass
|
|
609
|
+
|
|
610
|
+
# If the param is still undefined then there is a big problem
|
|
611
|
+
if self.getParam("kafka_provider") == "":
|
|
612
|
+
self.fatalError("Unable to determine whether the installed Kafka instance is managed by Strimzi or Red Hat AMQ Streams")
|