cloudpub 1.6.0__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cloudpub/error.py CHANGED
@@ -21,5 +21,9 @@ class NotFoundError(ValueError):
21
21
  """Represent a missing resource."""
22
22
 
23
23
 
24
+ class ConflictError(RuntimeError):
25
+ """Report a submission conflict error."""
26
+
27
+
24
28
  class Timeout(Exception):
25
29
  """Represent a missing resource."""
@@ -711,11 +711,21 @@ class ProductProperty(AzureProductLinkedResource):
711
711
  `Schema definition for ProductProperty <https://schema.mp.microsoft.com/schema/property/2022-03-01-preview2>`_
712
712
  """ # noqa E501
713
713
 
714
+ schema: str = field(
715
+ validator=instance_of(str),
716
+ metadata={
717
+ "alias": MS_SCHEMA,
718
+ "const": "https://schema.mp.microsoft.com/schema/property/2022-03-01-preview2",
719
+ },
720
+ )
721
+ """
722
+ The `resource schema`_ for Graph API."""
723
+
714
724
  kind: str
715
725
  """Expected to be ``azureVM``"""
716
726
 
717
727
  terms_of_use: Optional[str] = field(
718
- validator=optional(instance_of(str)), metadata={"alias": "termsOfUse"}
728
+ validator=optional(instance_of(str)), metadata={"alias": "termsOfUseUrl"}
719
729
  )
720
730
  """The product terms of use."""
721
731
 
@@ -7,13 +7,13 @@ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union, cast
7
7
 
8
8
  from deepdiff import DeepDiff
9
9
  from requests import HTTPError
10
- from tenacity import retry
11
- from tenacity.retry import retry_if_result
10
+ from tenacity import RetryError, Retrying, retry
11
+ from tenacity.retry import retry_if_exception_type, retry_if_result
12
12
  from tenacity.stop import stop_after_attempt, stop_after_delay
13
- from tenacity.wait import wait_chain, wait_fixed
13
+ from tenacity.wait import wait_fixed
14
14
 
15
15
  from cloudpub.common import BaseService
16
- from cloudpub.error import InvalidStateError, NotFoundError
16
+ from cloudpub.error import ConflictError, InvalidStateError, NotFoundError, Timeout
17
17
  from cloudpub.models.ms_azure import (
18
18
  RESOURCE_MAPING,
19
19
  AzureResource,
@@ -92,18 +92,31 @@ class AzureService(BaseService[AzurePublishingMetadata]):
92
92
  CONFIGURE_SCHEMA = "https://schema.mp.microsoft.com/schema/configure/{AZURE_API_VERSION}"
93
93
  DIFF_EXCLUDES = [r"root\['resources'\]\[[0-9]+\]\['url'\]"]
94
94
 
95
- def __init__(self, credentials: Dict[str, str]):
95
+ def __init__(
96
+ self,
97
+ credentials: Dict[str, str],
98
+ retry_interval: Union[int, float] = 300,
99
+ retry_timeout: Union[int, float] = 3600 * 24 * 7,
100
+ ):
96
101
  """
97
102
  Create a new AuzureService object.
98
103
 
99
104
  Args:
100
105
  credentials (dict)
101
106
  Dictionary with Azure credentials to authenticate on Product Ingestion API.
107
+ retry_interval (int, float)
108
+ The wait time interval in seconds for retrying jobs.
109
+ Defaults to 300
110
+ retry_timeout (int, float)
111
+ The max time in seconds to attempt retries.
112
+ Defaults to 7 days.
102
113
  """
103
114
  self.session = PartnerPortalSession.make_graph_api_session(
104
115
  auth_keys=credentials, schema_version=self.AZURE_SCHEMA_VERSION
105
116
  )
106
117
  self._products: List[ProductSummary] = []
118
+ self.retry_interval = retry_interval
119
+ self.retry_timeout = retry_timeout
107
120
 
108
121
  def _configure(self, data: Dict[str, Any]) -> ConfigureStatus:
109
122
  """
@@ -162,15 +175,27 @@ class AzureService(BaseService[AzurePublishingMetadata]):
162
175
  log.debug("Query Job details response: %s", parsed_resp)
163
176
  return parsed_resp
164
177
 
165
- @retry(
166
- retry=retry_if_result(predicate=is_azure_job_not_complete),
167
- wait=wait_chain(
168
- *[wait_fixed(wait=60)] # First wait for 1 minute # noqa: W503
169
- + [wait_fixed(wait=10 * 60)] # Then wait for 10 minutes # noqa: W503
170
- + [wait_fixed(wait=30 * 60)] # Finally wait each 30 minutes # noqa: W503
171
- ),
172
- stop=stop_after_delay(max_delay=60 * 60 * 24 * 7), # Give up after retrying for 7 days
173
- )
178
+ def query_job_status(self, job_id: str) -> ConfigureStatus:
179
+ """Query the job status for a given Job ID.
180
+
181
+ It will raise error if any invalid state is detected.
182
+
183
+ Args:
184
+ job_id (str): The job ID to query details from.
185
+
186
+ Returns:
187
+ ConfigureStatus: The ConfigureStatus from JobID
188
+ Raises:
189
+ InvalidStateError: If the job has failed.
190
+ """
191
+ job_details = self._query_job_details(job_id=job_id)
192
+ if job_details.job_result == "failed":
193
+ error_message = f"Job {job_id} failed: \n{job_details.errors}"
194
+ self._raise_error(InvalidStateError, error_message)
195
+ elif job_details.job_result == "succeeded":
196
+ log.debug("Job %s succeeded", job_id)
197
+ return job_details
198
+
174
199
  def _wait_for_job_completion(self, job_id: str) -> ConfigureStatus:
175
200
  """
176
201
  Wait until the specified job ID is complete.
@@ -189,13 +214,15 @@ class AzureService(BaseService[AzurePublishingMetadata]):
189
214
  Raises:
190
215
  InvalidStateError if the job failed
191
216
  """
192
- job_details = self._query_job_details(job_id=job_id)
193
- if job_details.job_result == "failed":
194
- error_message = f"Job {job_id} failed: \n{job_details.errors}"
195
- self._raise_error(InvalidStateError, error_message)
196
- elif job_details.job_result == "succeeded":
197
- log.debug("Job %s succeeded", job_id)
198
- return job_details
217
+ r = Retrying(
218
+ retry=retry_if_result(predicate=is_azure_job_not_complete),
219
+ wait=wait_fixed(self.retry_interval),
220
+ stop=stop_after_delay(max_delay=self.retry_timeout),
221
+ )
222
+ try:
223
+ return r(self.query_job_status, job_id)
224
+ except RetryError:
225
+ self._raise_error(Timeout, f"Time out waiting for job {job_id}")
199
226
 
200
227
  def configure(self, resources: List[AzureResource]) -> ConfigureStatus:
201
228
  """
@@ -437,6 +464,21 @@ class AzureService(BaseService[AzurePublishingMetadata]):
437
464
  remote = self.get_product(product.id, target=target)
438
465
  return DeepDiff(remote.to_json(), product.to_json(), exclude_regex_paths=self.DIFF_EXCLUDES)
439
466
 
467
+ def diff_two_offers(self, last_offer: Product, prev_offer: Product) -> DeepDiff:
468
+ """Compute the difference between two provided products.
469
+
470
+ Args:
471
+ last_offer (Product)
472
+ The lastest offer state to diff
473
+ prev_offer (Product)
474
+ The previous offer state to diff
475
+ Returns:
476
+ DeepDiff: The diff data.
477
+ """
478
+ return DeepDiff(
479
+ prev_offer.to_json(), last_offer.to_json(), exclude_regex_paths=self.DIFF_EXCLUDES
480
+ )
481
+
440
482
  def submit_to_status(
441
483
  self, product_id: str, status: str, resources: Optional[List[AzureResource]] = None
442
484
  ) -> ConfigureStatus:
@@ -477,31 +519,48 @@ class AzureService(BaseService[AzurePublishingMetadata]):
477
519
  log.debug("Set the status \"%s\" to submission.", status)
478
520
  return self.configure(resources=cfg_res)
479
521
 
480
- @retry(
481
- wait=wait_fixed(300),
482
- stop=stop_after_delay(max_delay=60 * 60 * 24 * 7), # Give up after retrying for 7 days,
483
- reraise=True,
484
- )
485
522
  def ensure_can_publish(self, product_id: str) -> None:
486
523
  """
487
524
  Ensure the offer is not already being published.
488
525
 
489
- It will wait for up to 7 days retrying to make sure it's possible to publish before
490
- giving up and raising.
526
+ It will raise ConflictError if a publish is already in progress in any submission target.
491
527
 
492
528
  Args:
493
529
  product_id (str)
494
530
  The product ID to check the offer's publishing status
495
531
  Raises:
496
- RuntimeError: whenever a publishing is already in progress.
532
+ ConflictError: whenever a publishing is already in progress for any submission target.
497
533
  """
498
534
  log.info("Ensuring no other publishing jobs are in progress for \"%s\"", product_id)
499
- submission_targets = ["preview", "live"]
500
535
 
501
- for target in submission_targets:
502
- sub = self.get_submission_state(product_id, state=target)
503
- if sub and sub.status and sub.status == "running":
504
- raise RuntimeError(f"The offer {product_id} is already being published to {target}")
536
+ for sub in self.get_submissions(product_id):
537
+ if sub and sub.status and sub.status != "completed":
538
+ msg = (
539
+ f"The offer {product_id} is already being published to "
540
+ f"{sub.target.targetType}: {sub.status}/{sub.result}"
541
+ )
542
+ log.error(msg)
543
+ raise ConflictError(msg)
544
+
545
+ def wait_active_publishing(self, product_id: str) -> None:
546
+ """
547
+ Wait when there's an existing submission in progress.
548
+
549
+ Args:
550
+ product_id (str)
551
+ The product ID of to verify the submissions state.
552
+ """
553
+ r = Retrying(
554
+ retry=retry_if_exception_type(ConflictError),
555
+ wait=wait_fixed(self.retry_interval),
556
+ stop=stop_after_delay(max_delay=self.retry_timeout),
557
+ )
558
+ log.info("Checking for active changes on %s.", product_id)
559
+
560
+ try:
561
+ r(self.ensure_can_publish, product_id)
562
+ except RetryError:
563
+ self._raise_error(Timeout, f"Timed out waiting for {product_id} to be unlocked")
505
564
 
506
565
  def get_plan_tech_config(self, product: Product, plan: PlanSummary) -> VMIPlanTechConfig:
507
566
  """
@@ -548,14 +607,13 @@ class AzureService(BaseService[AzurePublishingMetadata]):
548
607
  # The following resources shouldn't be required:
549
608
  # -> customer-leads
550
609
  # -> test-drive
551
- # -> property
552
610
  # -> *listing*
553
611
  # -> reseller
554
612
  # -> price-and-availability-*
555
613
  # NOTE: The "submission" resource will be already added by the "submit_to_status" method
556
614
  #
557
- # With that it needs only the related "product" and "plan" resources alongisde the
558
- # updated tech_config
615
+ # With that it needs only the related "product", "property" and "plan" resources alongisde
616
+ # the updated tech_config
559
617
  product_id = tech_config.product_id
560
618
  plan_id = tech_config.plan_id
561
619
  prod_res = cast(
@@ -566,6 +624,14 @@ class AzureService(BaseService[AzurePublishingMetadata]):
566
624
  if prd.id == product_id
567
625
  ],
568
626
  )[0]
627
+ property = cast(
628
+ List[ProductProperty],
629
+ [
630
+ prop
631
+ for prop in self.filter_product_resources(product=product, resource="property")
632
+ if prop.product_id == product_id # type: ignore [union-attr]
633
+ ],
634
+ )[0]
569
635
  plan_res = cast(
570
636
  List[PlanSummary],
571
637
  [
@@ -574,7 +640,7 @@ class AzureService(BaseService[AzurePublishingMetadata]):
574
640
  if pln.id == plan_id
575
641
  ],
576
642
  )[0]
577
- return [prod_res, plan_res, tech_config]
643
+ return [prod_res, property, plan_res, tech_config]
578
644
 
579
645
  def compute_targets(self, product_id: str) -> List[str]:
580
646
  """List all the possible publishing targets order to seek data from Azure.
@@ -815,6 +881,7 @@ class AzureService(BaseService[AzurePublishingMetadata]):
815
881
  plan_name = metadata.destination.split("/")[-1]
816
882
  product_id = self.get_productid(product_name)
817
883
  sas_in_target = SasFoundStatus.missing
884
+ self.wait_active_publishing(product_id=product_id)
818
885
  log.info(
819
886
  "Preparing to associate the image \"%s\" with the plan \"%s\" from product \"%s\"",
820
887
  metadata.image_path,
@@ -1,7 +1,7 @@
1
1
  # SPDX-License-Identifier: GPL-3.0-or-later
2
2
  import logging
3
3
  from operator import attrgetter
4
- from typing import Any, Dict, List, Optional, Tuple, TypedDict
4
+ from typing import Any, Dict, List, Optional, TypedDict
5
5
 
6
6
  from deepdiff import DeepDiff
7
7
 
@@ -241,56 +241,6 @@ def is_legacy_gen_supported(metadata: AzurePublishingMetadata) -> bool:
241
241
  return metadata.architecture == "x64" and metadata.support_legacy
242
242
 
243
243
 
244
- def prepare_vm_images(
245
- metadata: AzurePublishingMetadata,
246
- gen1: Optional[VMImageDefinition],
247
- gen2: Optional[VMImageDefinition],
248
- source: VMImageSource,
249
- ) -> List[VMImageDefinition]:
250
- """
251
- Update the vm_images list with the proper SAS based in existing generation(s).
252
-
253
- Args:
254
- metadata (AzurePublishingMetadata)
255
- The VHD publishing metadata.
256
- gen1 (VMImageDefinition, optional)
257
- The VMImageDefinition for Gen1 VHD.
258
- If not set the argument `gen2` must be set.
259
- gen2 (VMImageDefinition, optional)
260
- The VMImageDefinition for Gen2 VHD.
261
- If not set the argument `gen1` must be set.
262
- source (VMImageSource):
263
- The VMImageSource with the updated SAS URI.
264
- Returns:
265
- list: A new list containing the expected VMImageDefinition(s)
266
- """
267
- if not gen1 and not gen2:
268
- msg = "At least one argument of \"gen1\" or \"gen2\" must be set."
269
- log.error(msg)
270
- raise ValueError(msg)
271
-
272
- raw_source = source.to_json()
273
- json_gen1 = {
274
- "imageType": get_image_type_mapping(metadata.architecture, "V1"),
275
- "source": raw_source,
276
- }
277
- json_gen2 = {
278
- "imageType": get_image_type_mapping(metadata.architecture, "V2"),
279
- "source": raw_source,
280
- }
281
-
282
- if metadata.generation == "V2":
283
- # In this case we need to set a V2 SAS URI
284
- gen2_new = VMImageDefinition.from_json(json_gen2)
285
- if is_legacy_gen_supported(metadata): # and in this case a V1 as well
286
- gen1_new = VMImageDefinition.from_json(json_gen1)
287
- return [gen2_new, gen1_new]
288
- return [gen2_new]
289
- else:
290
- # It's expected to be a Gen1 only, let's get rid of Gen2
291
- return [VMImageDefinition.from_json(json_gen1)]
292
-
293
-
294
244
  def _all_skus_present(old_skus: List[VMISku], disk_versions: List[DiskVersion]) -> bool:
295
245
  image_types = set()
296
246
  for sku in old_skus:
@@ -485,47 +435,6 @@ def seek_disk_version(
485
435
  return None
486
436
 
487
437
 
488
- def vm_images_by_generation(
489
- disk_version: DiskVersion, architecture: str
490
- ) -> Tuple[Optional[VMImageDefinition], ...]:
491
- """
492
- Return a tuple containing the Gen1 and Gen2 VHD images in this order.
493
-
494
- If one of the images doesn't exist it will return None in the expected tuple position.
495
-
496
- Args:
497
- disk_version
498
- The disk version to retrieve the VMImageDefinitions from
499
- architecture
500
- The expected architecture for the VMImageDefinition.
501
- Returns:
502
- Gen1 and Gen2 VMImageDefinitions when they exist.
503
- """
504
- log.debug("Sorting the VMImageDefinition by generation.")
505
- # Here we have 3 possibilities:
506
- # 1. vm_images => "Gen1" only
507
- # 2. vm_images => "Gen2" only
508
- # 3. vm_images => "Gen1" and "Gen2"
509
-
510
- # So let's get the first image whatever it is
511
- img = disk_version.vm_images.pop(0)
512
-
513
- # If first `img` is Gen2 we set the other one as `img_legacy`
514
- if img.image_type == get_image_type_mapping(architecture, "V2"):
515
- img_legacy = disk_version.vm_images.pop(0) if len(disk_version.vm_images) > 0 else None
516
-
517
- else: # Otherwise we set it as `img_legacy` and get the gen2
518
- img_legacy = img
519
- img = (
520
- disk_version.vm_images.pop(0) # type: ignore
521
- if len(disk_version.vm_images) > 0
522
- else None
523
- )
524
- log.debug("Image for current generation: %s", img)
525
- log.debug("Image for legacy generation: %s", img_legacy)
526
- return img, img_legacy
527
-
528
-
529
438
  def create_vm_image_definitions(
530
439
  metadata: AzurePublishingMetadata, source: VMImageSource
531
440
  ) -> List[VMImageDefinition]:
@@ -580,21 +489,41 @@ def set_new_sas_disk_version(
580
489
  # If we already have a VMImageDefinition let's use it
581
490
  if disk_version.vm_images:
582
491
  log.debug("The DiskVersion \"%s\" contains inner images.", disk_version.version_number)
583
- img, img_legacy = vm_images_by_generation(disk_version, metadata.architecture)
584
-
585
- # Now we replace the SAS URI for the vm_images
586
492
  log.info(
587
493
  "Adjusting the VMImages from existing DiskVersion \"%s\""
588
- "to fit the new image with SAS \"%s\".",
494
+ " to fit the new image with SAS \"%s\".",
589
495
  disk_version.version_number,
590
496
  metadata.image_path,
591
497
  )
592
- disk_version.vm_images = prepare_vm_images(
593
- metadata=metadata,
594
- gen1=img_legacy,
595
- gen2=img,
596
- source=source,
597
- )
498
+ # Verify whether the arch is present for the new image
499
+ is_arch_present = False
500
+ # If the arch is present, update the SAS URI
501
+ for img in disk_version.vm_images:
502
+ if (
503
+ img.image_type == get_image_type_mapping(metadata.architecture, metadata.generation)
504
+ ) or (
505
+ metadata.support_legacy
506
+ and img.image_type == get_image_type_mapping(metadata.architecture, "V1") # noqa
507
+ ):
508
+ is_arch_present = True
509
+ img.source.os_disk.uri = source.os_disk.uri
510
+
511
+ # If the arch is not present, add it to the DiskVersion
512
+ if not is_arch_present:
513
+ if metadata.support_legacy:
514
+ disk_version.vm_images.append(
515
+ VMImageDefinition(
516
+ image_type=get_image_type_mapping(metadata.architecture, "V1"),
517
+ source=source.to_json(),
518
+ )
519
+ )
520
+ disk_version.vm_images.append(
521
+ VMImageDefinition(
522
+ image_type=get_image_type_mapping(metadata.architecture, metadata.generation),
523
+ source=source.to_json(),
524
+ )
525
+ )
526
+ return disk_version
598
527
 
599
528
  # If no VMImages, we need to create them from scratch
600
529
  else:
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: cloudpub
3
- Version: 1.6.0
3
+ Version: 1.7.0
4
4
  Summary: Services for publishing products in cloud environments
5
5
  Home-page: https://github.com/release-engineering/cloudpub
6
6
  Author: Jonathan Gangi
@@ -19,4 +19,12 @@ Requires-Dist: requests
19
19
  Requires-Dist: tenacity
20
20
  Requires-Dist: packaging
21
21
  Requires-Dist: boto3>=1.34.117
22
-
22
+ Dynamic: author
23
+ Dynamic: author-email
24
+ Dynamic: classifier
25
+ Dynamic: home-page
26
+ Dynamic: keywords
27
+ Dynamic: license
28
+ Dynamic: license-file
29
+ Dynamic: requires-dist
30
+ Dynamic: summary
@@ -1,6 +1,6 @@
1
1
  cloudpub/__init__.py,sha256=OcXNPwjDybWtcxAL0IhFtKiMErHP4xugUgpMKPbpYGM,44
2
2
  cloudpub/common.py,sha256=fnMEVyf4M4hV2MGFKv59wy4C591jp6kji_sq0T6MRlA,3990
3
- cloudpub/error.py,sha256=5rMBJ3pPcbG92swnDlMtIKDVw60FeW37vFHcu58trcI,578
3
+ cloudpub/error.py,sha256=Tof439r4Ei2mM-BnP1zI1sC2IBzUpbhFd2xIZdyg_k8,661
4
4
  cloudpub/utils.py,sha256=HTFVlYOb7nTw6BTTv_migod7cQMXzpSlBzsC8aCfQdQ,1097
5
5
  cloudpub/aws/__init__.py,sha256=bQbgPTq-S3IrUkZpBNAZ6-2FQ3KiRg87Al5muXh8pmc,117
6
6
  cloudpub/aws/service.py,sha256=h-nsTE1mldjQD1nqlUCX0OQiAwnDTVJlI92X13fK8GU,19337
@@ -8,13 +8,13 @@ cloudpub/aws/utils.py,sha256=qlZ9R7J8AArR3tFYW47cJ0dgdqYc7kPJUEn24HtzeGI,6453
8
8
  cloudpub/models/__init__.py,sha256=OcXNPwjDybWtcxAL0IhFtKiMErHP4xugUgpMKPbpYGM,44
9
9
  cloudpub/models/aws.py,sha256=arzFqLmFw8O9Otk_VatLR5dmQ9FsdWT3f0Ibap7EW0o,42850
10
10
  cloudpub/models/common.py,sha256=iZ503VVFL9y0P_wXiK0f3flXV32VWBs9i-9NoYfJZUg,4970
11
- cloudpub/models/ms_azure.py,sha256=nzTp9IvAW-WEJuN20IAc93yY6YPHCTE0j116EfQUsPg,55974
11
+ cloudpub/models/ms_azure.py,sha256=f-r6Fic5zFyOGIZHUe1BK2iK7dzjXK1rqsZjn3Xvr5M,56252
12
12
  cloudpub/ms_azure/__init__.py,sha256=eeYXPd_wzDBmh0Hmzd5o4yzocFzM6n4r8qpCDy00kYk,117
13
- cloudpub/ms_azure/service.py,sha256=ySc3ktibNhMw-ZvTgTIB1EZjliYu0OYSBi8sRBlpslg,36250
13
+ cloudpub/ms_azure/service.py,sha256=izO6vrC-xEYpnUrxMDmMo9ZKlUoAGtK4EmN6o_2dPWc,38634
14
14
  cloudpub/ms_azure/session.py,sha256=PXCSJ1dFkx43lQV0WFPnRxbpyOBccdtrMiWGPORT3Ro,6356
15
- cloudpub/ms_azure/utils.py,sha256=goADEmIZBFuIDG5sH9dAZed_EWIUWUAtjzDTw9HyNsI,21512
16
- cloudpub-1.6.0.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
17
- cloudpub-1.6.0.dist-info/METADATA,sha256=bXsSCTG8RdWlsD5SjlqYrWgxsOQKOL5bRwk-fVVVP6Y,754
18
- cloudpub-1.6.0.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
19
- cloudpub-1.6.0.dist-info/top_level.txt,sha256=YnnJuTiWBpRI9zMkYUVcZNuvjzzJYblASj-7Q8m3Gzg,9
20
- cloudpub-1.6.0.dist-info/RECORD,,
15
+ cloudpub/ms_azure/utils.py,sha256=pYDcpSuL-FDLbXANakTY7M70hARGAjQ3rVPLZbCRjo4,19180
16
+ cloudpub-1.7.0.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
17
+ cloudpub-1.7.0.dist-info/METADATA,sha256=ObxvkEd-CxzYbntbKDR2vW9hfgLntBxNtZWj3uHAJZY,927
18
+ cloudpub-1.7.0.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
19
+ cloudpub-1.7.0.dist-info/top_level.txt,sha256=YnnJuTiWBpRI9zMkYUVcZNuvjzzJYblASj-7Q8m3Gzg,9
20
+ cloudpub-1.7.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.45.1)
2
+ Generator: setuptools (79.0.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5