cloudpub 1.5.0__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cloudpub/error.py CHANGED
@@ -21,5 +21,9 @@ class NotFoundError(ValueError):
21
21
  """Represent a missing resource."""
22
22
 
23
23
 
24
+ class ConflictError(RuntimeError):
25
+ """Report a submission conflict error."""
26
+
27
+
24
28
  class Timeout(Exception):
25
29
  """Represent a missing resource."""
@@ -711,11 +711,21 @@ class ProductProperty(AzureProductLinkedResource):
711
711
  `Schema definition for ProductProperty <https://schema.mp.microsoft.com/schema/property/2022-03-01-preview2>`_
712
712
  """ # noqa E501
713
713
 
714
+ schema: str = field(
715
+ validator=instance_of(str),
716
+ metadata={
717
+ "alias": MS_SCHEMA,
718
+ "const": "https://schema.mp.microsoft.com/schema/property/2022-03-01-preview2",
719
+ },
720
+ )
721
+ """
722
+ The `resource schema`_ for Graph API."""
723
+
714
724
  kind: str
715
725
  """Expected to be ``azureVM``"""
716
726
 
717
727
  terms_of_use: Optional[str] = field(
718
- validator=optional(instance_of(str)), metadata={"alias": "termsOfUse"}
728
+ validator=optional(instance_of(str)), metadata={"alias": "termsOfUseUrl"}
719
729
  )
720
730
  """The product terms of use."""
721
731
 
@@ -2,22 +2,24 @@
2
2
  import json
3
3
  import logging
4
4
  import os
5
+ from enum import IntEnum
5
6
  from typing import Any, Dict, Iterator, List, Optional, Tuple, Union, cast
6
7
 
7
8
  from deepdiff import DeepDiff
8
9
  from requests import HTTPError
9
- from tenacity import retry
10
- from tenacity.retry import retry_if_result
10
+ from tenacity import RetryError, Retrying, retry
11
+ from tenacity.retry import retry_if_exception_type, retry_if_result
11
12
  from tenacity.stop import stop_after_attempt, stop_after_delay
12
- from tenacity.wait import wait_chain, wait_fixed
13
+ from tenacity.wait import wait_fixed
13
14
 
14
15
  from cloudpub.common import BaseService
15
- from cloudpub.error import InvalidStateError, NotFoundError
16
+ from cloudpub.error import ConflictError, InvalidStateError, NotFoundError, Timeout
16
17
  from cloudpub.models.ms_azure import (
17
18
  RESOURCE_MAPING,
18
19
  AzureResource,
19
20
  ConfigureStatus,
20
21
  CustomerLeads,
22
+ DiskVersion,
21
23
  Listing,
22
24
  ListingAsset,
23
25
  ListingTrailer,
@@ -38,6 +40,7 @@ from cloudpub.models.ms_azure import (
38
40
  from cloudpub.ms_azure.session import PartnerPortalSession
39
41
  from cloudpub.ms_azure.utils import (
40
42
  AzurePublishingMetadata,
43
+ TechnicalConfigLookUpData,
41
44
  create_disk_version_from_scratch,
42
45
  is_azure_job_not_complete,
43
46
  is_sas_present,
@@ -69,6 +72,15 @@ AZURE_PRODUCT_RESOURCES = Union[
69
72
  ]
70
73
 
71
74
 
75
+ class SasFoundStatus(IntEnum):
76
+ """Represent the submission target level of SAS found in a given product."""
77
+
78
+ missing = 0
79
+ draft = 1
80
+ preview = 2
81
+ live = 3
82
+
83
+
72
84
  class AzureService(BaseService[AzurePublishingMetadata]):
73
85
  """Service provider for Microsoft Azure using the Product Ingestion API."""
74
86
 
@@ -80,18 +92,31 @@ class AzureService(BaseService[AzurePublishingMetadata]):
80
92
  CONFIGURE_SCHEMA = "https://schema.mp.microsoft.com/schema/configure/{AZURE_API_VERSION}"
81
93
  DIFF_EXCLUDES = [r"root\['resources'\]\[[0-9]+\]\['url'\]"]
82
94
 
83
- def __init__(self, credentials: Dict[str, str]):
95
+ def __init__(
96
+ self,
97
+ credentials: Dict[str, str],
98
+ retry_interval: Union[int, float] = 300,
99
+ retry_timeout: Union[int, float] = 3600 * 24 * 7,
100
+ ):
84
101
  """
85
102
  Create a new AuzureService object.
86
103
 
87
104
  Args:
88
105
  credentials (dict)
89
106
  Dictionary with Azure credentials to authenticate on Product Ingestion API.
107
+ retry_interval (int, float)
108
+ The wait time interval in seconds for retrying jobs.
109
+ Defaults to 300
110
+ retry_timeout (int, float)
111
+ The max time in seconds to attempt retries.
112
+ Defaults to 7 days.
90
113
  """
91
114
  self.session = PartnerPortalSession.make_graph_api_session(
92
115
  auth_keys=credentials, schema_version=self.AZURE_SCHEMA_VERSION
93
116
  )
94
117
  self._products: List[ProductSummary] = []
118
+ self.retry_interval = retry_interval
119
+ self.retry_timeout = retry_timeout
95
120
 
96
121
  def _configure(self, data: Dict[str, Any]) -> ConfigureStatus:
97
122
  """
@@ -103,7 +128,10 @@ class AzureService(BaseService[AzurePublishingMetadata]):
103
128
  Returns:
104
129
  The job ID to track its status alongside the initial status.
105
130
  """
106
- log.debug("Received the following data to create/modify: %s" % json.dumps(data, indent=2))
131
+ if log.isEnabledFor(logging.DEBUG):
132
+ log.debug(
133
+ "Received the following data to create/modify: %s", json.dumps(data, indent=2)
134
+ )
107
135
  resp = self.session.post(path="configure", json=data)
108
136
  self._raise_for_status(response=resp)
109
137
  rsp_data = resp.json()
@@ -121,7 +149,7 @@ class AzureService(BaseService[AzurePublishingMetadata]):
121
149
  Returns:
122
150
  The updated job status.
123
151
  """
124
- log.debug(f"Query job details for \"{job_id}\"")
152
+ log.debug("Query job details for \"%s\"", job_id)
125
153
  resp = self.session.get(path=f"configure/{job_id}/status")
126
154
 
127
155
  # We don't want to fail if there's a server error thus we make a fake
@@ -129,9 +157,11 @@ class AzureService(BaseService[AzurePublishingMetadata]):
129
157
  if resp.status_code >= 500:
130
158
  log.warning(
131
159
  (
132
- f"Got HTTP {resp.status_code} from server when querying job {job_id} status."
133
- " Considering the job_status as \"pending\"."
134
- )
160
+ "Got HTTP %s from server when querying job %s status."
161
+ " Considering the job_status as \"pending\".",
162
+ ),
163
+ resp.status_code,
164
+ job_id,
135
165
  )
136
166
  return ConfigureStatus.from_json(
137
167
  {
@@ -145,15 +175,27 @@ class AzureService(BaseService[AzurePublishingMetadata]):
145
175
  log.debug("Query Job details response: %s", parsed_resp)
146
176
  return parsed_resp
147
177
 
148
- @retry(
149
- retry=retry_if_result(predicate=is_azure_job_not_complete),
150
- wait=wait_chain(
151
- *[wait_fixed(wait=60)] # First wait for 1 minute # noqa: W503
152
- + [wait_fixed(wait=10 * 60)] # Then wait for 10 minutes # noqa: W503
153
- + [wait_fixed(wait=30 * 60)] # Finally wait each 30 minutes # noqa: W503
154
- ),
155
- stop=stop_after_delay(max_delay=60 * 60 * 24 * 7), # Give up after retrying for 7 days
156
- )
178
+ def query_job_status(self, job_id: str) -> ConfigureStatus:
179
+ """Query the job status for a given Job ID.
180
+
181
+ It will raise error if any invalid state is detected.
182
+
183
+ Args:
184
+ job_id (str): The job ID to query details from.
185
+
186
+ Returns:
187
+ ConfigureStatus: The ConfigureStatus from JobID
188
+ Raises:
189
+ InvalidStateError: If the job has failed.
190
+ """
191
+ job_details = self._query_job_details(job_id=job_id)
192
+ if job_details.job_result == "failed":
193
+ error_message = f"Job {job_id} failed: \n{job_details.errors}"
194
+ self._raise_error(InvalidStateError, error_message)
195
+ elif job_details.job_result == "succeeded":
196
+ log.debug("Job %s succeeded", job_id)
197
+ return job_details
198
+
157
199
  def _wait_for_job_completion(self, job_id: str) -> ConfigureStatus:
158
200
  """
159
201
  Wait until the specified job ID is complete.
@@ -172,29 +214,32 @@ class AzureService(BaseService[AzurePublishingMetadata]):
172
214
  Raises:
173
215
  InvalidStateError if the job failed
174
216
  """
175
- job_details = self._query_job_details(job_id=job_id)
176
- if job_details.job_result == "failed":
177
- error_message = f"Job {job_id} failed: \n{job_details.errors}"
178
- self._raise_error(InvalidStateError, error_message)
179
- elif job_details.job_result == "succeeded":
180
- log.debug(f"Job {job_id} succeeded")
181
- return job_details
217
+ r = Retrying(
218
+ retry=retry_if_result(predicate=is_azure_job_not_complete),
219
+ wait=wait_fixed(self.retry_interval),
220
+ stop=stop_after_delay(max_delay=self.retry_timeout),
221
+ )
222
+ try:
223
+ return r(self.query_job_status, job_id)
224
+ except RetryError:
225
+ self._raise_error(Timeout, f"Time out waiting for job {job_id}")
182
226
 
183
- def configure(self, resource: AzureResource) -> ConfigureStatus:
227
+ def configure(self, resources: List[AzureResource]) -> ConfigureStatus:
184
228
  """
185
229
  Create or update a resource and wait until it's done.
186
230
 
187
231
  Args:
188
- resource (AzureResource):
189
- The resource to create/modify in Azure.
232
+ resources (List[AzureResource]):
233
+ The list of resources to create/modify in Azure.
190
234
  Returns:
191
235
  dict: The result of job execution
192
236
  """
193
237
  data = {
194
238
  "$schema": self.CONFIGURE_SCHEMA.format(AZURE_API_VERSION=self.AZURE_API_VERSION),
195
- "resources": [resource.to_json()],
239
+ "resources": [x.to_json() for x in resources],
196
240
  }
197
- log.info("Data to configure: %s", json.dumps(data, indent=2))
241
+ if log.isEnabledFor(logging.DEBUG):
242
+ log.debug("Data to configure: %s", json.dumps(data, indent=2))
198
243
  res = self._configure(data=data)
199
244
  return self._wait_for_job_completion(job_id=res.job_id)
200
245
 
@@ -205,7 +250,7 @@ class AzureService(BaseService[AzurePublishingMetadata]):
205
250
  params: Dict[str, str] = {}
206
251
 
207
252
  while has_next:
208
- log.debug("Requesting the products list.")
253
+ log.info("Requesting the products list.")
209
254
  resp = self.session.get(path="/product", params=params)
210
255
  data = self._assert_dict(resp)
211
256
 
@@ -230,11 +275,26 @@ class AzureService(BaseService[AzurePublishingMetadata]):
230
275
  Returns:
231
276
  list: A list with ProductSummary for all products in Azure.
232
277
  """
278
+ log.info("Listing the products on Azure server.")
233
279
  if not self._products:
234
280
  self._products = [p for p in self.products]
235
281
  return self._products
236
282
 
237
- def get_product(self, product_id: str, first_target: str = "preview") -> Product:
283
+ def get_productid(self, product_name: str) -> str:
284
+ """Retrieve the desired product ID for the requested product name.
285
+
286
+ Args:
287
+ product_name (str): the product's name to retrieve its product ID.
288
+ Returns:
289
+ The requested product ID when found.
290
+ Raises NotFoundError when the product was not found.
291
+ """
292
+ for product in self.list_products():
293
+ if product.identity.name == product_name:
294
+ return product.id
295
+ raise NotFoundError(f"No such product with name {product_name}")
296
+
297
+ def get_product(self, product_id: str, target: str) -> Product:
238
298
  """
239
299
  Return the requested Product by its ID.
240
300
 
@@ -246,37 +306,31 @@ class AzureService(BaseService[AzurePublishingMetadata]):
246
306
  Args:
247
307
  product_durable_id (str)
248
308
  The product UUID
249
- first_target (str, optional)
250
- The first target to lookup into. Defaults to ``preview``.
309
+ target (str)
310
+ The submission target to retrieve the product from.
251
311
  Returns:
252
312
  Product: the requested product
253
313
  """
254
- targets = [first_target]
255
- for tgt in ["preview", "draft", "live"]:
256
- if tgt not in targets:
257
- targets.append(tgt)
258
-
259
- for t in targets:
260
- log.debug("Requesting the product ID \"%s\" with state \"%s\".", product_id, t)
261
- try:
262
- resp = self.session.get(
263
- path=f"/resource-tree/product/{product_id}", params={"targetType": t}
264
- )
265
- data = self._assert_dict(resp)
266
- return Product.from_json(data)
267
- except (ValueError, HTTPError):
268
- log.debug("Couldn't find the product \"%s\" with state \"%s\"", product_id, t)
314
+ log.info("Requesting the product ID \"%s\" with state \"%s\".", product_id, target)
315
+ try:
316
+ resp = self.session.get(
317
+ path=f"/resource-tree/product/{product_id}", params={"targetType": target}
318
+ )
319
+ data = self._assert_dict(resp)
320
+ return Product.from_json(data)
321
+ except (ValueError, HTTPError):
322
+ log.debug("Couldn't find the product \"%s\" with state \"%s\"", product_id, target)
269
323
  self._raise_error(NotFoundError, f"No such product with id \"{product_id}\"")
270
324
 
271
- def get_product_by_name(self, product_name: str, first_target: str = "preview") -> Product:
325
+ def get_product_by_name(self, product_name: str, target: str) -> Product:
272
326
  """
273
327
  Return the requested Product by its name from Legacy CPP API.
274
328
 
275
329
  Args:
276
330
  product_name (str)
277
331
  The product name according to Legacy CPP API.
278
- first_target (str, optional)
279
- The first target to lookup into. Defaults to ``preview``.
332
+ target (str, optional)
333
+ The submission target to retrieve the product from.
280
334
  Returns:
281
335
  Product: the requested product when found
282
336
  Raises:
@@ -285,7 +339,7 @@ class AzureService(BaseService[AzurePublishingMetadata]):
285
339
  for product in self.products:
286
340
  if product.identity.name == product_name:
287
341
  log.debug("Product alias \"%s\" has the ID \"%s\"", product_name, product.id)
288
- return self.get_product(product.id, first_target=first_target)
342
+ return self.get_product(product.id, target=target)
289
343
  self._raise_error(NotFoundError, f"No such product with name \"{product_name}\"")
290
344
 
291
345
  def get_submissions(self, product_id: str) -> List[ProductSubmission]:
@@ -314,6 +368,7 @@ class AzureService(BaseService[AzurePublishingMetadata]):
314
368
  Returns:
315
369
  Optional[ProductSubmission]: The requested submission when found.
316
370
  """
371
+ log.info("Looking up for submission in state \"%s\" for \"%s\"", state, product_id)
317
372
  submissions = self.get_submissions(product_id)
318
373
  for sub in submissions:
319
374
  if sub.target.targetType == state:
@@ -369,47 +424,64 @@ class AzureService(BaseService[AzurePublishingMetadata]):
369
424
  self._raise_error(NotFoundError, f"No such plan with name \"{plan_name}\"")
370
425
 
371
426
  def get_product_plan_by_name(
372
- self, product_name: str, plan_name: str
427
+ self,
428
+ product_name: str,
429
+ plan_name: str,
430
+ target: str,
373
431
  ) -> Tuple[Product, PlanSummary]:
374
432
  """Return a tuple with the desired Product and Plan after iterating over all targets.
375
433
 
376
434
  Args:
377
435
  product_name (str): The name of the product to search for
378
436
  plan_name (str): The name of the plan to search for
379
-
437
+ target (str)
438
+ The submission target to retrieve the product/plan from.
380
439
  Returns:
381
440
  Tuple[Product, PlanSummary]: The Product and PlanSummary when fonud
382
441
  Raises:
383
- NotFoundError whenever all targets are exhausted and no information was found
384
- """
385
- targets = ["preview", "draft", "live"]
386
-
387
- for tgt in targets:
388
- try:
389
- product = self.get_product_by_name(product_name, first_target=tgt)
390
- plan = self.get_plan_by_name(product, plan_name)
391
- return product, plan
392
- except NotFoundError:
393
- continue
394
- self._raise_error(
395
- NotFoundError, f"No such plan with name \"{plan_name} for {product_name}\""
396
- )
442
+ NotFoundError whenever no information was found in the respective submission target.
443
+ """
444
+ try:
445
+ product = self.get_product_by_name(product_name, target=target)
446
+ plan = self.get_plan_by_name(product, plan_name)
447
+ return product, plan
448
+ except NotFoundError:
449
+ self._raise_error(
450
+ NotFoundError, f"No such plan with name \"{plan_name} for {product_name}\""
451
+ )
397
452
 
398
- def diff_offer(self, product: Product, first_target="preview") -> DeepDiff:
453
+ def diff_offer(self, product: Product, target: str) -> DeepDiff:
399
454
  """Compute the difference between the provided product and the one in the remote.
400
455
 
401
456
  Args:
402
457
  product (Product)
403
458
  The local product to diff with the remote one.
404
- first_target (str)
405
- The first target to lookup into. Defaults to ``preview``.
459
+ target (str)
460
+ The submission target to retrieve the product from.
406
461
  Returns:
407
462
  DeepDiff: The diff data.
408
463
  """
409
- remote = self.get_product(product.id, first_target=first_target)
464
+ remote = self.get_product(product.id, target=target)
410
465
  return DeepDiff(remote.to_json(), product.to_json(), exclude_regex_paths=self.DIFF_EXCLUDES)
411
466
 
412
- def submit_to_status(self, product_id: str, status: str) -> ConfigureStatus:
467
+ def diff_two_offers(self, last_offer: Product, prev_offer: Product) -> DeepDiff:
468
+ """Compute the difference between two provided products.
469
+
470
+ Args:
471
+ last_offer (Product)
472
+ The lastest offer state to diff
473
+ prev_offer (Product)
474
+ The previous offer state to diff
475
+ Returns:
476
+ DeepDiff: The diff data.
477
+ """
478
+ return DeepDiff(
479
+ prev_offer.to_json(), last_offer.to_json(), exclude_regex_paths=self.DIFF_EXCLUDES
480
+ )
481
+
482
+ def submit_to_status(
483
+ self, product_id: str, status: str, resources: Optional[List[AzureResource]] = None
484
+ ) -> ConfigureStatus:
413
485
  """
414
486
  Send a submission request to Microsoft with a new Product status.
415
487
 
@@ -418,9 +490,12 @@ class AzureService(BaseService[AzurePublishingMetadata]):
418
490
  The product ID to submit the new status.
419
491
  status (str)
420
492
  The new status: 'preview' or 'live'
493
+ resources (optional(list(AzureRerouce)))
494
+ Additional resources for modular push.
421
495
  Returns:
422
496
  The response from configure request.
423
497
  """
498
+ log.info("Submitting the status of \"%s\" to \"%s\"", product_id, status)
424
499
  # We need to get the previous state of the given one to request the submission
425
500
  prev_state_mapping = {
426
501
  "preview": "draft",
@@ -437,34 +512,55 @@ class AzureService(BaseService[AzurePublishingMetadata]):
437
512
 
438
513
  # Update the status with the expected one
439
514
  submission.target.targetType = status
515
+ cfg_res: List[AzureResource] = [submission]
516
+ if resources:
517
+ log.info("Performing a modular push to \"%s\" for \"%s\"", status, product_id)
518
+ cfg_res = resources + cfg_res
440
519
  log.debug("Set the status \"%s\" to submission.", status)
520
+ return self.configure(resources=cfg_res)
441
521
 
442
- return self.configure(resource=submission)
443
-
444
- @retry(
445
- wait=wait_fixed(300),
446
- stop=stop_after_delay(max_delay=60 * 60 * 24 * 7), # Give up after retrying for 7 days,
447
- reraise=True,
448
- )
449
522
  def ensure_can_publish(self, product_id: str) -> None:
450
523
  """
451
524
  Ensure the offer is not already being published.
452
525
 
453
- It will wait for up to 7 days retrying to make sure it's possible to publish before
454
- giving up and raising.
526
+ It will raise ConflictError if a publish is already in progress in any submission target.
455
527
 
456
528
  Args:
457
529
  product_id (str)
458
530
  The product ID to check the offer's publishing status
459
531
  Raises:
460
- RuntimeError: whenever a publishing is already in progress.
532
+ ConflictError: whenever a publishing is already in progress for any submission target.
461
533
  """
462
- submission_targets = ["preview", "live"]
534
+ log.info("Ensuring no other publishing jobs are in progress for \"%s\"", product_id)
463
535
 
464
- for target in submission_targets:
465
- sub = self.get_submission_state(product_id, state=target)
466
- if sub and sub.status and sub.status == "running":
467
- raise RuntimeError(f"The offer {product_id} is already being published to {target}")
536
+ for sub in self.get_submissions(product_id):
537
+ if sub and sub.status and sub.status != "completed":
538
+ msg = (
539
+ f"The offer {product_id} is already being published to "
540
+ f"{sub.target.targetType}: {sub.status}/{sub.result}"
541
+ )
542
+ log.error(msg)
543
+ raise ConflictError(msg)
544
+
545
+ def wait_active_publishing(self, product_id: str) -> None:
546
+ """
547
+ Wait when there's an existing submission in progress.
548
+
549
+ Args:
550
+ product_id (str)
551
+ The product ID of to verify the submissions state.
552
+ """
553
+ r = Retrying(
554
+ retry=retry_if_exception_type(ConflictError),
555
+ wait=wait_fixed(self.retry_interval),
556
+ stop=stop_after_delay(max_delay=self.retry_timeout),
557
+ )
558
+ log.info("Checking for active changes on %s.", product_id)
559
+
560
+ try:
561
+ r(self.ensure_can_publish, product_id)
562
+ except RetryError:
563
+ self._raise_error(Timeout, f"Timed out waiting for {product_id} to be unlocked")
468
564
 
469
565
  def get_plan_tech_config(self, product: Product, plan: PlanSummary) -> VMIPlanTechConfig:
470
566
  """
@@ -491,6 +587,86 @@ class AzureService(BaseService[AzurePublishingMetadata]):
491
587
  )
492
588
  return tconfigs[0] # It should have only one VMIPlanTechConfig per plan.
493
589
 
590
+ def get_modular_resources_to_publish(
591
+ self, product: Product, tech_config: VMIPlanTechConfig
592
+ ) -> List[AzureResource]:
593
+ """Return the required resources for a modular publishing.
594
+
595
+ According to Microsoft docs:
596
+ "For a modular publish, all resources are required except for the product level details
597
+ (for example, listing, availability, packages, reseller) as applicable to your
598
+ product type."
599
+
600
+ Args:
601
+ product (Product): The original product to filter the resources from
602
+ tech_config (VMIPlanTechConfig): The updated tech config to publish
603
+
604
+ Returns:
605
+ List[AzureResource]: _description_
606
+ """
607
+ # The following resources shouldn't be required:
608
+ # -> customer-leads
609
+ # -> test-drive
610
+ # -> *listing*
611
+ # -> reseller
612
+ # -> price-and-availability-*
613
+ # NOTE: The "submission" resource will be already added by the "submit_to_status" method
614
+ #
615
+ # With that it needs only the related "product", "property" and "plan" resources alongisde
616
+ # the updated tech_config
617
+ product_id = tech_config.product_id
618
+ plan_id = tech_config.plan_id
619
+ prod_res = cast(
620
+ List[ProductSummary],
621
+ [
622
+ prd
623
+ for prd in self.filter_product_resources(product=product, resource="product")
624
+ if prd.id == product_id
625
+ ],
626
+ )[0]
627
+ property = cast(
628
+ List[ProductProperty],
629
+ [
630
+ prop
631
+ for prop in self.filter_product_resources(product=product, resource="property")
632
+ if prop.product_id == product_id # type: ignore [union-attr]
633
+ ],
634
+ )[0]
635
+ plan_res = cast(
636
+ List[PlanSummary],
637
+ [
638
+ pln
639
+ for pln in self.filter_product_resources(product=product, resource="plan")
640
+ if pln.id == plan_id
641
+ ],
642
+ )[0]
643
+ return [prod_res, property, plan_res, tech_config]
644
+
645
+ def compute_targets(self, product_id: str) -> List[str]:
646
+ """List all the possible publishing targets order to seek data from Azure.
647
+
648
+ It also returns the ordered list of targets with the following precedence:
649
+ ``live`` -> ``preview`` -> ``draft``
650
+
651
+ Args:
652
+ product_id (str)
653
+ The product_id to retrieve all existing submission targets.
654
+
655
+ Returns:
656
+ List[Str]: The ordered list with targets to lookup.
657
+ """
658
+ all_targets = ["live", "preview", "draft"]
659
+ computed_targets = []
660
+
661
+ # We cannot simply return all targets above because the existing product might
662
+ # lack one of them. So now we need to filter out unexisting targets.
663
+ product_submissions = self.get_submissions(product_id)
664
+ product_targets = [s.target.targetType for s in product_submissions]
665
+ for t in all_targets:
666
+ if t in product_targets:
667
+ computed_targets.append(t)
668
+ return computed_targets
669
+
494
670
  def _is_submission_in_preview(self, current: ProductSubmission) -> bool:
495
671
  """Return True if the latest submission state is "preview", False otherwise.
496
672
 
@@ -518,42 +694,33 @@ class AzureService(BaseService[AzurePublishingMetadata]):
518
694
  stop=stop_after_attempt(3),
519
695
  reraise=True,
520
696
  )
521
- def _publish_preview(self, product: Product, product_name: str) -> None:
697
+ def _publish_preview(
698
+ self, product: Product, product_name: str, resources: Optional[List[AzureResource]] = None
699
+ ) -> None:
522
700
  """
523
- Submit the product to 'preview' if it's not already in this state.
701
+ Submit the product to 'preview' after going through Azure Marketplace Validatoin.
524
702
 
525
703
  This is required to execute the validation pipeline on Azure side.
526
704
 
527
705
  Args:
528
706
  product
529
- The product with changes to publish live
707
+ The product with changes to publish to preview
530
708
  product_name
531
709
  The product name to display in logs.
710
+ resources:
711
+ Additional resources for modular push.
532
712
  """
533
- # We just want to set the ProductSubmission to 'preview' if it's not in this status.
534
- #
535
- # The `preview` stage runs the Azure pipeline which takes up to 4 days.
536
- # Meanwhile the `submit_for_status` will be blocked querying the `job_status`until
537
- # all the Azure verification pipeline finishes.
538
- submission: ProductSubmission = cast(
539
- List[ProductSubmission],
540
- self.filter_product_resources(product=product, resource="submission"),
541
- )[0]
542
- if not self._is_submission_in_preview(submission):
543
- log.info(
544
- "Submitting the product \"%s (%s)\" to \"preview\"." % (product_name, product.id)
713
+ res = self.submit_to_status(product_id=product.id, status='preview', resources=resources)
714
+
715
+ if res.job_result != 'succeeded' or not self.get_submission_state(
716
+ product.id, state="preview"
717
+ ):
718
+ errors = "\n".join(res.errors)
719
+ failure_msg = (
720
+ f"Failed to submit the product {product_name} ({product.id}) to preview. "
721
+ f"Status: {res.job_result} Errors: {errors}"
545
722
  )
546
- res = self.submit_to_status(product_id=product.id, status='preview')
547
-
548
- if res.job_result != 'succeeded' or not self.get_submission_state(
549
- product.id, state="preview"
550
- ):
551
- errors = "\n".join(res.errors)
552
- failure_msg = (
553
- f"Failed to submit the product {product.id} to preview. "
554
- f"Status: {res.job_result} Errors: {errors}"
555
- )
556
- raise RuntimeError(failure_msg)
723
+ raise RuntimeError(failure_msg)
557
724
 
558
725
  @retry(
559
726
  wait=wait_fixed(wait=60),
@@ -572,17 +739,133 @@ class AzureService(BaseService[AzurePublishingMetadata]):
572
739
  """
573
740
  # Note: the offer can only go `live` after successfully being changed to `preview`
574
741
  # which takes up to 4 days.
575
- log.info("Submitting the product \"%s (%s)\" to \"live\"." % (product_name, product.id))
576
742
  res = self.submit_to_status(product_id=product.id, status='live')
577
743
 
578
744
  if res.job_result != 'succeeded' or not self.get_submission_state(product.id, state="live"):
579
745
  errors = "\n".join(res.errors)
580
746
  failure_msg = (
581
- f"Failed to submit the product {product.id} to live. "
747
+ f"Failed to submit the product {product_name} ({product.id}) to live. "
582
748
  f"Status: {res.job_result} Errors: {errors}"
583
749
  )
584
750
  raise RuntimeError(failure_msg)
585
751
 
752
+ def _overwrite_disk_version(
753
+ self,
754
+ metadata: AzurePublishingMetadata,
755
+ product_name: str,
756
+ plan_name: str,
757
+ source: VMImageSource,
758
+ target: str,
759
+ ) -> TechnicalConfigLookUpData:
760
+ """Private method to overwrite the technical config with a new DiskVersion.
761
+
762
+ Args:
763
+ metadata (AzurePublishingMetadata): the incoming publishing metadata
764
+ product_name (str): the product (offer) name
765
+ plan_name (str): the plan name
766
+ source (VMImageSource): the source VMI to create and overwrite the new DiskVersion
767
+ target (str): the submission target.
768
+
769
+ Returns:
770
+ TechnicalConfigLookUpData: The overwritten tech_config for the product/plan
771
+ """
772
+ product, plan = self.get_product_plan_by_name(product_name, plan_name, target)
773
+ log.warning(
774
+ "Overwriting the plan \"%s\" on \"%s\" with the given image: \"%s\".",
775
+ plan_name,
776
+ target,
777
+ metadata.image_path,
778
+ )
779
+ tech_config = self.get_plan_tech_config(product, plan)
780
+ disk_version = create_disk_version_from_scratch(metadata, source)
781
+ tech_config.disk_versions = [disk_version]
782
+ return {
783
+ "metadata": metadata,
784
+ "tech_config": tech_config,
785
+ "sas_found": False,
786
+ "product": product,
787
+ "plan": plan,
788
+ "target": target,
789
+ }
790
+
791
+ def _look_up_sas_on_technical_config(
792
+ self, metadata: AzurePublishingMetadata, product_name: str, plan_name: str, target: str
793
+ ) -> TechnicalConfigLookUpData:
794
+ """Private method to lookup for the TechnicalConfig of a given target.
795
+
796
+ Args:
797
+ metadata (AzurePublishingMetadata): the incoming publishing metadata.
798
+ product_name (str): the product (offer) name
799
+ plan_name (str): the plan name
800
+ target (str): the submission target to look up the TechnicalConfig object
801
+
802
+ Returns:
803
+ TechnicalConfigLookUpData: The data retrieved for the given submission target.
804
+ """
805
+ product, plan = self.get_product_plan_by_name(product_name, plan_name, target)
806
+ log.info(
807
+ "Retrieving the technical config for \"%s\" on \"%s\".",
808
+ metadata.destination,
809
+ target,
810
+ )
811
+ tech_config = self.get_plan_tech_config(product, plan)
812
+ sas_found = False
813
+
814
+ if is_sas_present(tech_config, metadata.image_path, metadata.check_base_sas_only):
815
+ log.info(
816
+ "The destination \"%s\" on \"%s\" already contains the SAS URI: \"%s\".",
817
+ metadata.destination,
818
+ target,
819
+ metadata.image_path,
820
+ )
821
+ sas_found = True
822
+ return {
823
+ "metadata": metadata,
824
+ "tech_config": tech_config,
825
+ "sas_found": sas_found,
826
+ "product": product,
827
+ "plan": plan,
828
+ "target": target,
829
+ }
830
+
831
+ def _create_or_update_disk_version(
832
+ self,
833
+ tech_config_lookup: TechnicalConfigLookUpData,
834
+ source: VMImageSource,
835
+ disk_version: Optional[DiskVersion],
836
+ ) -> DiskVersion:
837
+ """Private method to create/update the DiskVersion of a given TechnicalConfig object.
838
+
839
+ Args:
840
+ tech_config_lookup (TechnicalConfigLookUpData): the incoming data to process
841
+ source (VMImageSource): the new VMI source to attach
842
+ disk_version (Optional[DiskVersion]): the disk version if it exists (for updates).
843
+
844
+ Returns:
845
+ DiskVersion: The updated DiskVersion
846
+ """
847
+ metadata = tech_config_lookup["metadata"]
848
+ target = tech_config_lookup["target"]
849
+ tech_config = tech_config_lookup["tech_config"]
850
+
851
+ # Check the images of the selected DiskVersion if it exists
852
+ if disk_version:
853
+ log.info(
854
+ "DiskVersion \"%s\" exists in \"%s\" on \"%s\" for the image \"%s\".",
855
+ disk_version.version_number,
856
+ metadata.destination,
857
+ target,
858
+ metadata.image_path,
859
+ )
860
+ # Update the disk version with the new SAS
861
+ disk_version = set_new_sas_disk_version(disk_version, metadata, source)
862
+ return disk_version
863
+ # The disk version doesn't exist, we need to create one from scratch
864
+ log.info("The DiskVersion doesn't exist, creating one from scratch.")
865
+ disk_version = create_disk_version_from_scratch(metadata, source)
866
+ tech_config.disk_versions.append(disk_version)
867
+ return disk_version
868
+
586
869
  def publish(self, metadata: AzurePublishingMetadata) -> None:
587
870
  """
588
871
  Associate a VM image with a given product listing (destination) and publish it if required.
@@ -596,71 +879,103 @@ class AzureService(BaseService[AzurePublishingMetadata]):
596
879
  # "product-name/plan-name"
597
880
  product_name = metadata.destination.split("/")[0]
598
881
  plan_name = metadata.destination.split("/")[-1]
599
- product, plan = self.get_product_plan_by_name(product_name, plan_name)
882
+ product_id = self.get_productid(product_name)
883
+ sas_in_target = SasFoundStatus.missing
884
+ self.wait_active_publishing(product_id=product_id)
600
885
  log.info(
601
- "Preparing to associate the image with the plan \"%s\" from product \"%s\""
602
- % (plan_name, product_name)
886
+ "Preparing to associate the image \"%s\" with the plan \"%s\" from product \"%s\"",
887
+ metadata.image_path,
888
+ plan_name,
889
+ product_name,
603
890
  )
604
891
 
605
- # 2. Retrieve the VM Technical configuration for the given plan
606
- log.debug("Retrieving the technical config for \"%s\"." % metadata.destination)
607
- tech_config = self.get_plan_tech_config(product, plan)
608
-
609
- # 3. Prepare the Disk Version
610
- log.debug("Creating the VMImageResource with SAS: \"%s\"" % metadata.image_path)
892
+ # 2. Prepare the Disk Version
893
+ log.info("Creating the VMImageResource with SAS for image: \"%s\"", metadata.image_path)
611
894
  sas = OSDiskURI(uri=metadata.image_path)
612
895
  source = VMImageSource(source_type="sasUri", os_disk=sas.to_json(), data_disks=[])
613
896
 
897
+ # 3. Set the new Disk Version into the product/plan if required
898
+ #
614
899
  # Note: If `overwrite` is True it means we can set this VM image as the only one in the
615
900
  # plan's technical config and discard all other VM images which may've been present.
616
- disk_version = None # just to make mypy happy
617
901
  if metadata.overwrite is True:
618
- log.warning("Overwriting the plan %s with the given image.", plan_name)
619
- disk_version = create_disk_version_from_scratch(metadata, source)
620
- tech_config.disk_versions = [disk_version]
621
-
622
- # We just want to append a new image if the SAS is not already present.
623
- elif not is_sas_present(tech_config, metadata.image_path, metadata.check_base_sas_only):
624
- # Here we can have the metadata.disk_version set or empty.
625
- # When set we want to get the existing disk_version which matches its value.
626
- log.debug("Scanning the disk versions from %s" % metadata.destination)
627
- disk_version = seek_disk_version(tech_config, metadata.disk_version)
628
-
629
- # Check the images of the selected DiskVersion if it exists
630
- if disk_version:
631
- log.debug(
632
- "DiskVersion \"%s\" exists in \"%s\"."
633
- % (disk_version.version_number, metadata.destination)
634
- )
635
- disk_version = set_new_sas_disk_version(disk_version, metadata, source)
636
-
637
- else: # The disk version doesn't exist, we need to create one from scratch
638
- log.debug("The DiskVersion doesn't exist, creating one from scratch.")
639
- disk_version = create_disk_version_from_scratch(metadata, source)
640
- tech_config.disk_versions.append(disk_version)
902
+ target = "draft" # It's expected to exist for whenever product.
903
+ res = self._overwrite_disk_version(metadata, product_name, plan_name, source, target)
904
+ tech_config = res["tech_config"]
641
905
  else:
642
- log.info(
643
- "The destination \"%s\" already contains the SAS URI: \"%s\"."
644
- % (metadata.destination, metadata.image_path)
645
- )
906
+ # Otherwise we need to check whether SAS isn't already present
907
+ # in any of the targets "preview", "live" or "draft" and if not attach and publish it.
908
+ for target in self.compute_targets(product_id):
909
+ res = self._look_up_sas_on_technical_config(
910
+ metadata, product_name, plan_name, target
911
+ )
912
+ tech_config = res["tech_config"]
913
+ # We don't want to seek for SAS anymore as it was already found
914
+ if res["sas_found"]:
915
+ sas_in_target = SasFoundStatus[target]
916
+ break
917
+ else:
918
+ # At this point there's no SAS URI in any target so we can safely add it
919
+
920
+ # Here we can have the metadata.disk_version set or empty.
921
+ # When set we want to get the existing disk_version which matches its value.
922
+ log.info(
923
+ "Scanning the disk versions from \"%s\" on \"%s\" for the image \"%s\"",
924
+ metadata.destination,
925
+ target,
926
+ metadata.image_path,
927
+ )
928
+ dv = seek_disk_version(tech_config, metadata.disk_version)
929
+ self._create_or_update_disk_version(res, source, dv)
646
930
 
647
931
  # 4. With the updated disk_version we should adjust the SKUs and submit the changes
648
- if disk_version:
649
- log.debug("Updating SKUs for \"%s\"." % metadata.destination)
932
+ if sas_in_target == SasFoundStatus.missing:
933
+ log.info("Updating SKUs for \"%s\" on \"%s\".", metadata.destination, target)
650
934
  tech_config.skus = update_skus(
651
935
  disk_versions=tech_config.disk_versions,
652
936
  generation=metadata.generation,
653
937
  plan_name=plan_name,
654
938
  old_skus=tech_config.skus,
655
939
  )
656
- log.debug("Updating the technical configuration for \"%s\"." % metadata.destination)
657
- self.configure(resource=tech_config)
940
+ log.info(
941
+ "Updating the technical configuration for \"%s\" on \"%s\".",
942
+ metadata.destination,
943
+ target,
944
+ )
945
+ self.configure(resources=[tech_config])
658
946
 
659
947
  # 5. Proceed to publishing if it was requested.
660
948
  # Note: The publishing will only occur if it made changes in disk_version.
661
- if disk_version and not metadata.keepdraft:
662
- logdiff(self.diff_offer(product))
663
- self.ensure_can_publish(product.id)
949
+ if not metadata.keepdraft:
950
+ product = res["product"]
951
+ # Get the submission state
952
+ submission: ProductSubmission = cast(
953
+ List[ProductSubmission],
954
+ self.filter_product_resources(product=product, resource="submission"),
955
+ )[0]
956
+
957
+ # We should only publish if there are new changes OR
958
+ # the existing offer was already in preview
959
+ if sas_in_target <= SasFoundStatus.draft or self._is_submission_in_preview(submission):
960
+ log.info(
961
+ "Publishing the new changes for \"%s\" on plan \"%s\"", product_name, plan_name
962
+ )
963
+ logdiff(self.diff_offer(product, target))
964
+ self.ensure_can_publish(product.id)
965
+
966
+ # According to the documentation we only need to pass the
967
+ # required resources for modular publish on "preview"
968
+ # https://learn.microsoft.com/en-us/partner-center/marketplace-offers/product-ingestion-api#method-2-publish-specific-draft-resources-also-known-as-modular-publish # noqa: E501
969
+ modular_resources = None
970
+ if metadata.modular_push:
971
+ modular_resources = self.get_modular_resources_to_publish(product, tech_config)
972
+ if sas_in_target < SasFoundStatus.preview:
973
+ self._publish_preview(product, product_name, resources=modular_resources)
974
+ if sas_in_target < SasFoundStatus.live:
975
+ self._publish_live(product, product_name)
664
976
 
665
- self._publish_preview(product, product_name)
666
- self._publish_live(product, product_name)
977
+ log.info(
978
+ "Finished publishing the image \"%s\" to \"%s\"",
979
+ metadata.image_path,
980
+ metadata.destination,
981
+ )
@@ -24,7 +24,7 @@ class AccessToken:
24
24
  """
25
25
  self.expires_on = datetime.fromtimestamp(int(json["expires_on"]))
26
26
  self.access_token = json["access_token"]
27
- log.debug(f"Obtained token with expiration date on {self.expires_on}")
27
+ log.debug("Obtained token with expiration date on %s", self.expires_on)
28
28
 
29
29
  def is_expired(self) -> bool:
30
30
  """Return True if the token is expired and False otherwise."""
@@ -108,7 +108,7 @@ class PartnerPortalSession:
108
108
  "AZURE_API_SECRET",
109
109
  ]
110
110
  for key in mandatory_keys:
111
- log.debug(f"Validating mandatory key \"{key}\"")
111
+ log.debug("Validating mandatory key \"%s\"", key)
112
112
  if key not in auth_keys.keys() or not auth_keys.get(key):
113
113
  err_msg = f'The key/value for "{key}" must be set.'
114
114
  log.error(err_msg)
@@ -117,7 +117,7 @@ class PartnerPortalSession:
117
117
 
118
118
  def _login(self) -> AccessToken:
119
119
  """Retrieve the authentication token from Microsoft."""
120
- log.info("Retrieving the bearer token from Microsoft")
120
+ log.debug("Retrieving the bearer token from Microsoft")
121
121
  url = self.LOGIN_URL_TMPL.format(**self.auth_keys)
122
122
 
123
123
  headers = {
@@ -156,7 +156,7 @@ class PartnerPortalSession:
156
156
  params = {}
157
157
  params.update(self._mandatory_params)
158
158
 
159
- log.info(f"Sending a {method} request to {path}")
159
+ log.debug("Sending a %s request to %s", method, path)
160
160
  formatted_url = self._prefix_url.format(**self.auth_keys)
161
161
  url = join_url(formatted_url, path)
162
162
  return self.session.request(method, url=url, params=params, headers=headers, **kwargs)
@@ -1,7 +1,7 @@
1
1
  # SPDX-License-Identifier: GPL-3.0-or-later
2
2
  import logging
3
3
  from operator import attrgetter
4
- from typing import Any, Dict, List, Optional, Tuple
4
+ from typing import Any, Dict, List, Optional, TypedDict
5
5
 
6
6
  from deepdiff import DeepDiff
7
7
 
@@ -9,6 +9,8 @@ from cloudpub.common import PublishingMetadata # Cannot circular import AzurePu
9
9
  from cloudpub.models.ms_azure import (
10
10
  ConfigureStatus,
11
11
  DiskVersion,
12
+ PlanSummary,
13
+ Product,
12
14
  VMImageDefinition,
13
15
  VMImageSource,
14
16
  VMIPlanTechConfig,
@@ -54,6 +56,11 @@ class AzurePublishingMetadata(PublishingMetadata):
54
56
  check_base_sas_only (bool, optional):
55
57
  Indicates to skip checking SAS parameters when set as ``True``.
56
58
  Default to ``False``
59
+ modular_push (bool, optional):
60
+ Indicate whether to perform a modular push or not.
61
+ The modular push causes the effect to only publish
62
+ the changed plan instead of the whole offer to preview/live.
63
+ Default to ``False``.
57
64
  **kwargs
58
65
  Arguments for :class:`~cloudpub.common.PublishingMetadata`.
59
66
  """
@@ -64,6 +71,7 @@ class AzurePublishingMetadata(PublishingMetadata):
64
71
  self.recommended_sizes = recommended_sizes or []
65
72
  self.legacy_sku_id = kwargs.pop("legacy_sku_id", None)
66
73
  self.check_base_sas_only = kwargs.pop("check_base_sas_only", False)
74
+ self.modular_push = kwargs.pop("modular_push", None) or False
67
75
 
68
76
  if generation == "V1" or not support_legacy:
69
77
  self.legacy_sku_id = None
@@ -107,6 +115,17 @@ class AzurePublishingMetadata(PublishingMetadata):
107
115
  raise ValueError(f"Invalid SAS URI \"{self.image_path}\". Expected: http/https URL.")
108
116
 
109
117
 
118
+ class TechnicalConfigLookUpData(TypedDict):
119
+ """A typed dict to be used for private methods data exchange."""
120
+
121
+ metadata: AzurePublishingMetadata
122
+ tech_config: VMIPlanTechConfig
123
+ sas_found: bool
124
+ product: Product
125
+ plan: PlanSummary
126
+ target: str
127
+
128
+
110
129
  def get_image_type_mapping(architecture: str, generation: str) -> str:
111
130
  """Return the image type required by VMImageDefinition."""
112
131
  gen_map = {
@@ -150,22 +169,23 @@ def is_sas_eq(sas1: str, sas2: str, base_only=False) -> bool:
150
169
 
151
170
  # Base URL differs
152
171
  if base_sas1 != base_sas2:
153
- log.debug("Got different base SAS: %s - Expected: %s" % (base_sas1, base_sas2))
172
+ log.debug("Got different base SAS: %s - Expected: %s", base_sas1, base_sas2)
154
173
  return False
155
174
 
156
175
  if not base_only:
157
176
  # Parameters lengh differs
158
177
  if len(params_sas1) != len(params_sas2):
159
178
  log.debug(
160
- "Got different lengh of SAS parameters: len(%s) - Expected len(%s)"
161
- % (params_sas1, params_sas2)
179
+ "Got different lengh of SAS parameters: len(%s) - Expected len(%s)",
180
+ params_sas1,
181
+ params_sas2,
162
182
  )
163
183
  return False
164
184
 
165
185
  # Parameters values differs
166
186
  for k, v in params_sas1.items():
167
187
  if v != params_sas2.get(k, None):
168
- log.debug("The SAS parameter %s doesn't match %s." % (v, params_sas2.get(k, None)))
188
+ log.debug("The SAS parameter %s doesn't match %s.", v, params_sas2.get(k, None))
169
189
  return False
170
190
 
171
191
  # Equivalent SAS
@@ -203,8 +223,8 @@ def is_azure_job_not_complete(job_details: ConfigureStatus) -> bool:
203
223
  Returns:
204
224
  bool: False if job completed, True otherwise
205
225
  """
206
- log.debug(f"Checking if the job \"{job_details.job_id}\" is still running")
207
- log.debug(f"job {job_details.job_id} is in {job_details.job_status} state")
226
+ log.debug("Checking if the job \"%s\" is still running", job_details.job_id)
227
+ log.debug("job %s is in %s state", job_details.job_id, job_details.job_status)
208
228
  if job_details.job_status != "completed":
209
229
  return True
210
230
  return False
@@ -221,56 +241,6 @@ def is_legacy_gen_supported(metadata: AzurePublishingMetadata) -> bool:
221
241
  return metadata.architecture == "x64" and metadata.support_legacy
222
242
 
223
243
 
224
- def prepare_vm_images(
225
- metadata: AzurePublishingMetadata,
226
- gen1: Optional[VMImageDefinition],
227
- gen2: Optional[VMImageDefinition],
228
- source: VMImageSource,
229
- ) -> List[VMImageDefinition]:
230
- """
231
- Update the vm_images list with the proper SAS based in existing generation(s).
232
-
233
- Args:
234
- metadata (AzurePublishingMetadata)
235
- The VHD publishing metadata.
236
- gen1 (VMImageDefinition, optional)
237
- The VMImageDefinition for Gen1 VHD.
238
- If not set the argument `gen2` must be set.
239
- gen2 (VMImageDefinition, optional)
240
- The VMImageDefinition for Gen2 VHD.
241
- If not set the argument `gen1` must be set.
242
- source (VMImageSource):
243
- The VMImageSource with the updated SAS URI.
244
- Returns:
245
- list: A new list containing the expected VMImageDefinition(s)
246
- """
247
- if not gen1 and not gen2:
248
- msg = "At least one argument of \"gen1\" or \"gen2\" must be set."
249
- log.error(msg)
250
- raise ValueError(msg)
251
-
252
- raw_source = source.to_json()
253
- json_gen1 = {
254
- "imageType": get_image_type_mapping(metadata.architecture, "V1"),
255
- "source": raw_source,
256
- }
257
- json_gen2 = {
258
- "imageType": get_image_type_mapping(metadata.architecture, "V2"),
259
- "source": raw_source,
260
- }
261
-
262
- if metadata.generation == "V2":
263
- # In this case we need to set a V2 SAS URI
264
- gen2_new = VMImageDefinition.from_json(json_gen2)
265
- if is_legacy_gen_supported(metadata): # and in this case a V1 as well
266
- gen1_new = VMImageDefinition.from_json(json_gen1)
267
- return [gen2_new, gen1_new]
268
- return [gen2_new]
269
- else:
270
- # It's expected to be a Gen1 only, let's get rid of Gen2
271
- return [VMImageDefinition.from_json(json_gen1)]
272
-
273
-
274
244
  def _all_skus_present(old_skus: List[VMISku], disk_versions: List[DiskVersion]) -> bool:
275
245
  image_types = set()
276
246
  for sku in old_skus:
@@ -465,47 +435,6 @@ def seek_disk_version(
465
435
  return None
466
436
 
467
437
 
468
- def vm_images_by_generation(
469
- disk_version: DiskVersion, architecture: str
470
- ) -> Tuple[Optional[VMImageDefinition], ...]:
471
- """
472
- Return a tuple containing the Gen1 and Gen2 VHD images in this order.
473
-
474
- If one of the images doesn't exist it will return None in the expected tuple position.
475
-
476
- Args:
477
- disk_version
478
- The disk version to retrieve the VMImageDefinitions from
479
- architecture
480
- The expected architecture for the VMImageDefinition.
481
- Returns:
482
- Gen1 and Gen2 VMImageDefinitions when they exist.
483
- """
484
- log.debug("Sorting the VMImageDefinition by generation.")
485
- # Here we have 3 possibilities:
486
- # 1. vm_images => "Gen1" only
487
- # 2. vm_images => "Gen2" only
488
- # 3. vm_images => "Gen1" and "Gen2"
489
-
490
- # So let's get the first image whatever it is
491
- img = disk_version.vm_images.pop(0)
492
-
493
- # If first `img` is Gen2 we set the other one as `img_legacy`
494
- if img.image_type == get_image_type_mapping(architecture, "V2"):
495
- img_legacy = disk_version.vm_images.pop(0) if len(disk_version.vm_images) > 0 else None
496
-
497
- else: # Otherwise we set it as `img_legacy` and get the gen2
498
- img_legacy = img
499
- img = (
500
- disk_version.vm_images.pop(0) # type: ignore
501
- if len(disk_version.vm_images) > 0
502
- else None
503
- )
504
- log.debug("Image for current generation: %s", img)
505
- log.debug("Image for legacy generation: %s", img_legacy)
506
- return img, img_legacy
507
-
508
-
509
438
  def create_vm_image_definitions(
510
439
  metadata: AzurePublishingMetadata, source: VMImageSource
511
440
  ) -> List[VMImageDefinition]:
@@ -556,32 +485,55 @@ def set_new_sas_disk_version(
556
485
  Returns:
557
486
  The changed disk version with the given source.
558
487
  """
488
+ log.info("Setting up a new SAS disk version for \"%s\"", metadata.image_path)
559
489
  # If we already have a VMImageDefinition let's use it
560
490
  if disk_version.vm_images:
561
- log.debug("The DiskVersion \"%s\" contains inner images." % disk_version.version_number)
562
- img, img_legacy = vm_images_by_generation(disk_version, metadata.architecture)
563
-
564
- # Now we replace the SAS URI for the vm_images
565
- log.debug(
491
+ log.debug("The DiskVersion \"%s\" contains inner images.", disk_version.version_number)
492
+ log.info(
566
493
  "Adjusting the VMImages from existing DiskVersion \"%s\""
567
- "to fit the new image with SAS \"%s\"."
568
- % (disk_version.version_number, metadata.image_path)
569
- )
570
- disk_version.vm_images = prepare_vm_images(
571
- metadata=metadata,
572
- gen1=img_legacy,
573
- gen2=img,
574
- source=source,
494
+ " to fit the new image with SAS \"%s\".",
495
+ disk_version.version_number,
496
+ metadata.image_path,
575
497
  )
498
+ # Verify whether the arch is present for the new image
499
+ is_arch_present = False
500
+ # If the arch is present, update the SAS URI
501
+ for img in disk_version.vm_images:
502
+ if (
503
+ img.image_type == get_image_type_mapping(metadata.architecture, metadata.generation)
504
+ ) or (
505
+ metadata.support_legacy
506
+ and img.image_type == get_image_type_mapping(metadata.architecture, "V1") # noqa
507
+ ):
508
+ is_arch_present = True
509
+ img.source.os_disk.uri = source.os_disk.uri
510
+
511
+ # If the arch is not present, add it to the DiskVersion
512
+ if not is_arch_present:
513
+ if metadata.support_legacy:
514
+ disk_version.vm_images.append(
515
+ VMImageDefinition(
516
+ image_type=get_image_type_mapping(metadata.architecture, "V1"),
517
+ source=source.to_json(),
518
+ )
519
+ )
520
+ disk_version.vm_images.append(
521
+ VMImageDefinition(
522
+ image_type=get_image_type_mapping(metadata.architecture, metadata.generation),
523
+ source=source.to_json(),
524
+ )
525
+ )
526
+ return disk_version
576
527
 
577
528
  # If no VMImages, we need to create them from scratch
578
529
  else:
579
530
  log.debug(
580
- "The DiskVersion \"%s\" does not contain inner images." % disk_version.version_number
531
+ "The DiskVersion \"%s\" does not contain inner images.", disk_version.version_number
581
532
  )
582
- log.debug(
583
- "Setting the new image \"%s\" on DiskVersion \"%s\"."
584
- % (metadata.image_path, disk_version.version_number)
533
+ log.info(
534
+ "Setting the new image \"%s\" on DiskVersion \"%s\".",
535
+ metadata.image_path,
536
+ disk_version.version_number,
585
537
  )
586
538
  disk_version.vm_images = create_vm_image_definitions(metadata, source)
587
539
 
@@ -591,4 +543,4 @@ def set_new_sas_disk_version(
591
543
  def logdiff(diff: DeepDiff) -> None:
592
544
  """Log the offer diff if it exists."""
593
545
  if diff:
594
- log.warning(f"Found the following offer diff before publishing:\n{diff.pretty()}")
546
+ log.warning("Found the following offer diff before publishing:\n%s", diff.pretty())
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: cloudpub
3
- Version: 1.5.0
3
+ Version: 1.7.0
4
4
  Summary: Services for publishing products in cloud environments
5
5
  Home-page: https://github.com/release-engineering/cloudpub
6
6
  Author: Jonathan Gangi
@@ -19,4 +19,12 @@ Requires-Dist: requests
19
19
  Requires-Dist: tenacity
20
20
  Requires-Dist: packaging
21
21
  Requires-Dist: boto3>=1.34.117
22
-
22
+ Dynamic: author
23
+ Dynamic: author-email
24
+ Dynamic: classifier
25
+ Dynamic: home-page
26
+ Dynamic: keywords
27
+ Dynamic: license
28
+ Dynamic: license-file
29
+ Dynamic: requires-dist
30
+ Dynamic: summary
@@ -1,6 +1,6 @@
1
1
  cloudpub/__init__.py,sha256=OcXNPwjDybWtcxAL0IhFtKiMErHP4xugUgpMKPbpYGM,44
2
2
  cloudpub/common.py,sha256=fnMEVyf4M4hV2MGFKv59wy4C591jp6kji_sq0T6MRlA,3990
3
- cloudpub/error.py,sha256=5rMBJ3pPcbG92swnDlMtIKDVw60FeW37vFHcu58trcI,578
3
+ cloudpub/error.py,sha256=Tof439r4Ei2mM-BnP1zI1sC2IBzUpbhFd2xIZdyg_k8,661
4
4
  cloudpub/utils.py,sha256=HTFVlYOb7nTw6BTTv_migod7cQMXzpSlBzsC8aCfQdQ,1097
5
5
  cloudpub/aws/__init__.py,sha256=bQbgPTq-S3IrUkZpBNAZ6-2FQ3KiRg87Al5muXh8pmc,117
6
6
  cloudpub/aws/service.py,sha256=h-nsTE1mldjQD1nqlUCX0OQiAwnDTVJlI92X13fK8GU,19337
@@ -8,13 +8,13 @@ cloudpub/aws/utils.py,sha256=qlZ9R7J8AArR3tFYW47cJ0dgdqYc7kPJUEn24HtzeGI,6453
8
8
  cloudpub/models/__init__.py,sha256=OcXNPwjDybWtcxAL0IhFtKiMErHP4xugUgpMKPbpYGM,44
9
9
  cloudpub/models/aws.py,sha256=arzFqLmFw8O9Otk_VatLR5dmQ9FsdWT3f0Ibap7EW0o,42850
10
10
  cloudpub/models/common.py,sha256=iZ503VVFL9y0P_wXiK0f3flXV32VWBs9i-9NoYfJZUg,4970
11
- cloudpub/models/ms_azure.py,sha256=nzTp9IvAW-WEJuN20IAc93yY6YPHCTE0j116EfQUsPg,55974
11
+ cloudpub/models/ms_azure.py,sha256=f-r6Fic5zFyOGIZHUe1BK2iK7dzjXK1rqsZjn3Xvr5M,56252
12
12
  cloudpub/ms_azure/__init__.py,sha256=eeYXPd_wzDBmh0Hmzd5o4yzocFzM6n4r8qpCDy00kYk,117
13
- cloudpub/ms_azure/service.py,sha256=cMgKcOfgW0UwPPSGGH6Iiyqz2JidrEeowiY-SHq1mSU,26589
14
- cloudpub/ms_azure/session.py,sha256=7ZjBLBX4XSzx60Bxhn96kh64RJ3oQs734Tw3ZVSnFrU,6349
15
- cloudpub/ms_azure/utils.py,sha256=h6bEtMrlPsbayR-SlVEzlzxEC1i4fdSqH8Fn-m_xaMQ,20730
16
- cloudpub-1.5.0.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
17
- cloudpub-1.5.0.dist-info/METADATA,sha256=06r2pdV_DeEMDPue5D5iQ3rrmQe5fsEHF21zvlEFdFo,754
18
- cloudpub-1.5.0.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
19
- cloudpub-1.5.0.dist-info/top_level.txt,sha256=YnnJuTiWBpRI9zMkYUVcZNuvjzzJYblASj-7Q8m3Gzg,9
20
- cloudpub-1.5.0.dist-info/RECORD,,
13
+ cloudpub/ms_azure/service.py,sha256=izO6vrC-xEYpnUrxMDmMo9ZKlUoAGtK4EmN6o_2dPWc,38634
14
+ cloudpub/ms_azure/session.py,sha256=PXCSJ1dFkx43lQV0WFPnRxbpyOBccdtrMiWGPORT3Ro,6356
15
+ cloudpub/ms_azure/utils.py,sha256=pYDcpSuL-FDLbXANakTY7M70hARGAjQ3rVPLZbCRjo4,19180
16
+ cloudpub-1.7.0.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
17
+ cloudpub-1.7.0.dist-info/METADATA,sha256=ObxvkEd-CxzYbntbKDR2vW9hfgLntBxNtZWj3uHAJZY,927
18
+ cloudpub-1.7.0.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
19
+ cloudpub-1.7.0.dist-info/top_level.txt,sha256=YnnJuTiWBpRI9zMkYUVcZNuvjzzJYblASj-7Q8m3Gzg,9
20
+ cloudpub-1.7.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.45.1)
2
+ Generator: setuptools (79.0.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5