service-capacity-modeling 0.3.60__py3-none-any.whl → 0.3.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of service-capacity-modeling might be problematic. Click here for more details.

Files changed (22) hide show
  1. service_capacity_modeling/capacity_planner.py +0 -1
  2. service_capacity_modeling/hardware/__init__.py +1 -2
  3. service_capacity_modeling/interface.py +1 -1
  4. service_capacity_modeling/models/common.py +2 -3
  5. service_capacity_modeling/models/org/netflix/cassandra.py +5 -6
  6. service_capacity_modeling/models/org/netflix/crdb.py +0 -1
  7. service_capacity_modeling/models/org/netflix/ddb.py +9 -9
  8. service_capacity_modeling/models/org/netflix/elasticsearch.py +0 -1
  9. service_capacity_modeling/models/org/netflix/evcache.py +0 -1
  10. service_capacity_modeling/models/org/netflix/graphkv.py +0 -1
  11. service_capacity_modeling/models/org/netflix/kafka.py +2 -6
  12. service_capacity_modeling/models/org/netflix/stateless_java.py +0 -1
  13. service_capacity_modeling/models/org/netflix/wal.py +0 -1
  14. service_capacity_modeling/models/org/netflix/zookeeper.py +0 -2
  15. service_capacity_modeling/tools/auto_shape.py +3 -3
  16. service_capacity_modeling/tools/fetch_pricing.py +0 -1
  17. {service_capacity_modeling-0.3.60.dist-info → service_capacity_modeling-0.3.62.dist-info}/METADATA +18 -6
  18. {service_capacity_modeling-0.3.60.dist-info → service_capacity_modeling-0.3.62.dist-info}/RECORD +22 -22
  19. {service_capacity_modeling-0.3.60.dist-info → service_capacity_modeling-0.3.62.dist-info}/WHEEL +0 -0
  20. {service_capacity_modeling-0.3.60.dist-info → service_capacity_modeling-0.3.62.dist-info}/entry_points.txt +0 -0
  21. {service_capacity_modeling-0.3.60.dist-info → service_capacity_modeling-0.3.62.dist-info}/licenses/LICENSE +0 -0
  22. {service_capacity_modeling-0.3.60.dist-info → service_capacity_modeling-0.3.62.dist-info}/top_level.txt +0 -0
@@ -586,7 +586,6 @@ class CapacityPlanner:
586
586
 
587
587
  # Calculates the minimum cpu, memory, and network requirements based on desires.
588
588
  def _per_instance_requirements(self, desires) -> Tuple[int, float]:
589
-
590
589
  # Applications often set fixed reservations of heap or OS memory
591
590
  per_instance_mem = (
592
591
  desires.data_shape.reserved_instance_app_mem_gib
@@ -110,8 +110,7 @@ def merge_hardware(existing: Hardware, override: Hardware) -> Hardware:
110
110
  for shape in existing_keys | override_keys:
111
111
  if shape in existing_keys and shape in override_keys:
112
112
  raise ValueError(
113
- f"Duplicate shape {shape}! "
114
- "Only one file should contain a shape"
113
+ f"Duplicate shape {shape}! Only one file should contain a shape"
115
114
  )
116
115
  if shape not in existing_keys:
117
116
  merged_field[shape] = override_field[shape]
@@ -405,7 +405,7 @@ class Instance(ExcludeUnsetModel):
405
405
  self_dict = self.model_dump()
406
406
  other_dict = overrides.model_dump(exclude_unset=True)
407
407
 
408
- for (k, v) in other_dict.items():
408
+ for k, v in other_dict.items():
409
409
  # TODO we need a deep merge on drive (recursive merge)
410
410
  if k in ("platforms",):
411
411
  # Unique merge platforms
@@ -355,9 +355,8 @@ def compute_stateful_zone( # pylint: disable=too-many-positional-arguments
355
355
  # or less IOs for a given data size as well as space
356
356
  # Contract for disk ios is
357
357
  # (per_node_size_gib, node_count) -> (read_ios, write_ios)
358
- required_disk_ios: Callable[
359
- [float, int], Tuple[float, float]
360
- ] = lambda size_gib, count: (0, 0),
358
+ required_disk_ios: Callable[[float, int], Tuple[float, float]] = lambda size_gib,
359
+ count: (0, 0),
361
360
  required_disk_space: Callable[[float], float] = lambda size_gib: size_gib,
362
361
  # The maximum amount of state we can hold per node in the database
363
362
  # typically you don't want stateful systems going much higher than a
@@ -13,7 +13,6 @@ from service_capacity_modeling.interface import AccessConsistency
13
13
  from service_capacity_modeling.interface import AccessPattern
14
14
  from service_capacity_modeling.interface import Buffer
15
15
  from service_capacity_modeling.interface import BufferComponent
16
- from service_capacity_modeling.interface import BufferIntent
17
16
  from service_capacity_modeling.interface import Buffers
18
17
  from service_capacity_modeling.interface import CapacityDesires
19
18
  from service_capacity_modeling.interface import CapacityPlan
@@ -603,7 +602,7 @@ class NflxCassandraCapacityModel(CapacityModel):
603
602
  f"Required cluster size must be at least "
604
603
  f"{CRITICAL_TIER_MIN_CLUSTER_SIZE=} when "
605
604
  f"service tier({tier}) is a "
606
- f"critical tier({CRITICAL_TIERS})."
605
+ f"critical tier({CRITICAL_TIERS}). "
607
606
  f"If it is an existing cluster, horizontally "
608
607
  f"scale the cluster to be >= "
609
608
  f"{CRITICAL_TIER_MIN_CLUSTER_SIZE}"
@@ -629,10 +628,10 @@ class NflxCassandraCapacityModel(CapacityModel):
629
628
  require_attached_disks: bool = extra_model_arguments.get(
630
629
  "require_attached_disks", False
631
630
  )
632
- required_cluster_size: Optional[
633
- int
634
- ] = NflxCassandraCapacityModel.get_required_cluster_size(
635
- desires.service_tier, extra_model_arguments
631
+ required_cluster_size: Optional[int] = (
632
+ NflxCassandraCapacityModel.get_required_cluster_size(
633
+ desires.service_tier, extra_model_arguments
634
+ )
636
635
  )
637
636
 
638
637
  max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 500)
@@ -143,7 +143,6 @@ def _estimate_cockroachdb_cluster_zonal( # noqa=E501 pylint: disable=too-many-p
143
143
  min_vcpu_per_instance: int = 4,
144
144
  license_fee_per_core: float = 0.0,
145
145
  ) -> Optional[CapacityPlan]:
146
-
147
146
  if instance.cpu < min_vcpu_per_instance:
148
147
  return None
149
148
 
@@ -191,9 +191,9 @@ def _get_write_consistency_percentages(
191
191
  transactional_write_percent = 0.0
192
192
  non_transactional_write_percent = 1.0
193
193
  total_percent = transactional_write_percent + non_transactional_write_percent
194
- assert (
195
- total_percent == 1
196
- ), "transactional_write_percent, non_transactional_write_percent should sum to 1"
194
+ assert total_percent == 1, (
195
+ "transactional_write_percent, non_transactional_write_percent should sum to 1"
196
+ )
197
197
  return {
198
198
  "transactional_write_percent": transactional_write_percent,
199
199
  "non_transactional_write_percent": non_transactional_write_percent,
@@ -452,9 +452,9 @@ class NflxDynamoDBCapacityModel(CapacityModel):
452
452
  "data_transfer_gib": data_transfer_plan.total_data_transfer_gib,
453
453
  "target_utilization_percentage": target_util_percentage,
454
454
  }
455
- requirement_context[
456
- "replicated_write_capacity_units"
457
- ] = write_plan.replicated_write_capacity_units
455
+ requirement_context["replicated_write_capacity_units"] = (
456
+ write_plan.replicated_write_capacity_units
457
+ )
458
458
 
459
459
  dynamo_costs = {
460
460
  "dynamo.regional-writes": write_plan.total_annual_write_cost,
@@ -462,9 +462,9 @@ class NflxDynamoDBCapacityModel(CapacityModel):
462
462
  "dynamo.regional-storage": storage_plan.total_annual_data_storage_cost,
463
463
  }
464
464
 
465
- dynamo_costs[
466
- "dynamo.regional-transfer"
467
- ] = data_transfer_plan.total_annual_data_transfer_cost
465
+ dynamo_costs["dynamo.regional-transfer"] = (
466
+ data_transfer_plan.total_annual_data_transfer_cost
467
+ )
468
468
 
469
469
  dynamo_costs["dynamo.data-backup"] = backup_plan.total_annual_backup_cost
470
470
 
@@ -184,7 +184,6 @@ class NflxElasticsearchDataCapacityModel(CapacityModel):
184
184
  desires: CapacityDesires,
185
185
  extra_model_arguments: Dict[str, Any],
186
186
  ) -> Optional[CapacityPlan]:
187
-
188
187
  copies_per_region: int = _target_rf(
189
188
  desires, extra_model_arguments.get("copies_per_region", None)
190
189
  )
@@ -217,7 +217,6 @@ def _estimate_evcache_cluster_zonal( # noqa: C901,E501 pylint: disable=too-many
217
217
  min_instance_memory_gib: int = 12,
218
218
  cross_region_replication: Replication = Replication.none,
219
219
  ) -> Optional[CapacityPlan]:
220
-
221
220
  # EVCache doesn't like to deploy on single CPU instances
222
221
  if instance.cpu < 2:
223
222
  return None
@@ -30,7 +30,6 @@ class NflxGraphKVCapacityModel(CapacityModel):
30
30
  desires: CapacityDesires,
31
31
  extra_model_arguments: Dict[str, Any],
32
32
  ) -> Optional[CapacityPlan]:
33
-
34
33
  graphkv_app = nflx_java_app_capacity_model.capacity_plan(
35
34
  instance=instance,
36
35
  drive=drive,
@@ -110,10 +110,8 @@ def _estimate_kafka_requirement( # pylint: disable=too-many-positional-argument
110
110
  (write_mib_per_second * MIB_IN_BYTES) * copies_per_region
111
111
  ) / MEGABIT_IN_BYTES
112
112
  bw_out = (
113
- (
114
- (read_mib_per_second * MIB_IN_BYTES)
115
- + ((write_mib_per_second * MIB_IN_BYTES) * (copies_per_region - 1))
116
- )
113
+ (read_mib_per_second * MIB_IN_BYTES)
114
+ + ((write_mib_per_second * MIB_IN_BYTES) * (copies_per_region - 1))
117
115
  ) / MEGABIT_IN_BYTES
118
116
  if (
119
117
  current_zonal_capacity
@@ -252,7 +250,6 @@ def _estimate_kafka_cluster_zonal( # noqa: C901
252
250
  min_instance_memory_gib: int = 12,
253
251
  require_same_instance_family: bool = True,
254
252
  ) -> Optional[CapacityPlan]:
255
-
256
253
  # Kafka doesn't like to deploy on single CPU instances or with < 12 GiB of ram
257
254
  if instance.cpu < min_instance_cpu or instance.ram_gib < min_instance_memory_gib:
258
255
  return None
@@ -469,7 +466,6 @@ class NflxKafkaArguments(BaseModel):
469
466
 
470
467
 
471
468
  class NflxKafkaCapacityModel(CapacityModel):
472
-
473
469
  HA_DEFAULT_REPLICATION_FACTOR = 2
474
470
  SC_DEFAULT_REPLICATION_FACTOR = 3
475
471
 
@@ -91,7 +91,6 @@ def _estimate_java_app_region( # pylint: disable=too-many-positional-arguments
91
91
  failover: bool = True,
92
92
  jvm_memory_overhead: float = 2,
93
93
  ) -> Optional[CapacityPlan]:
94
-
95
94
  if drive.name != "gp2":
96
95
  return None
97
96
 
@@ -30,7 +30,6 @@ class NflxWALCapacityModel(CapacityModel):
30
30
  desires: CapacityDesires,
31
31
  extra_model_arguments: Dict[str, Any],
32
32
  ) -> Optional[CapacityPlan]:
33
-
34
33
  wal_app = nflx_java_app_capacity_model.capacity_plan(
35
34
  instance=instance,
36
35
  drive=drive,
@@ -35,7 +35,6 @@ def _zk_requirement(
35
35
  heap_overhead: float,
36
36
  disk_overhead: float,
37
37
  ) -> Optional[CapacityRequirement]:
38
-
39
38
  # We only deploy Zookeeper to fast ephemeral storage
40
39
  # Due to fsync latency to the disk.
41
40
  if instance.drive is None:
@@ -94,7 +93,6 @@ class NflxZookeeperCapacityModel(CapacityModel):
94
93
  desires: CapacityDesires,
95
94
  extra_model_arguments: Dict[str, Any],
96
95
  ) -> Optional[CapacityPlan]:
97
-
98
96
  # We only deploy Zookeeper to 3 zone regions at this time
99
97
  if context.zones_in_region != 3:
100
98
  return None
@@ -178,9 +178,9 @@ def pull_family(
178
178
  # The biggest size drive is the single tenant one
179
179
  disk_max_size = max(disk_max_size, int(disk["SizeInGB"]))
180
180
 
181
- instance_jsons_dict[
182
- instance_type_json["InstanceType"].split(".")[1]
183
- ] = instance_type_json
181
+ instance_jsons_dict[instance_type_json["InstanceType"].split(".")[1]] = (
182
+ instance_type_json
183
+ )
184
184
 
185
185
  if disk_type.name.startswith("local"):
186
186
  if (
@@ -22,7 +22,6 @@ def extract_3yr_upfront_price(price_data):
22
22
  and term_attrs.get("PurchaseOption") == "All Upfront"
23
23
  and term_attrs.get("OfferingClass") == "standard"
24
24
  ):
25
-
26
25
  # Get upfront fee
27
26
  for dim in term["priceDimensions"].values():
28
27
  if dim["unit"] == "Quantity":
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: service-capacity-modeling
3
- Version: 0.3.60
3
+ Version: 0.3.62
4
4
  Summary: Contains utilities for modeling capacity for pluggable workloads
5
5
  Author: Joseph Lynch
6
6
  Author-email: josephl@netflix.com
@@ -251,16 +251,28 @@ tox -e py38 -- -k test_<your_functionality> --pdb --pdbcls=IPython.terminal.debu
251
251
  ```
252
252
 
253
253
  ### Pre-commit / Linting
254
- To install the pre-commit linter
255
- ```
256
- pre-commit install
257
- ```
258
-
259
254
  To run the linting manually:
260
255
  ```
261
256
  tox -e pre-commit
262
257
  ```
263
258
 
259
+ ### Installing Pre-commit Hooks
260
+
261
+ This repository includes a custom pre-commit hook that runs all linting and formatting checks through the tox environment. To install it:
262
+
263
+ ```bash
264
+ # Install the custom pre-commit hook
265
+ tox -e install-hooks
266
+
267
+ # Or manually copy the hook
268
+ cp hooks/pre-commit .git/hooks/pre-commit
269
+ chmod +x .git/hooks/pre-commit
270
+ ```
271
+
272
+ The hook will automatically:
273
+ - Create the tox pre-commit environment if it doesn't exist
274
+ - Run all pre-commit checks (ruff, flake8, etc.)
275
+ - Ensure all code quality standards are met before commits
264
276
 
265
277
  ### PyCharm IDE Setup
266
278
  Use one of the test environments for IDE development, e.g. `tox -e py310` and then
@@ -1,8 +1,8 @@
1
1
  service_capacity_modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- service_capacity_modeling/capacity_planner.py,sha256=B-C6EgZEWLKS6HEL74WxGeoLHrzNN6Cmyfee2jKsdcU,31915
3
- service_capacity_modeling/interface.py,sha256=145YV4Lm2YZhBAKjsdUIjeIOxJ7NxqUPwz9oxekgsPo,36995
2
+ service_capacity_modeling/capacity_planner.py,sha256=B6e0esOAvV6qMkEeLIO9rEveTftRG_Ut_d9gYgSIM0w,31914
3
+ service_capacity_modeling/interface.py,sha256=eJRahrVv-JMbalwd6gqVLbtvCoBhZSLyG-mm8asOBD4,36993
4
4
  service_capacity_modeling/stats.py,sha256=8HIPwVnmvbauBwXhn6vbNYO7-CzWPuymnq0eX7ZA1_w,5849
5
- service_capacity_modeling/hardware/__init__.py,sha256=Lc-DdX2ZJ_sFT3KlImPUUcKmcjVnavH_mMXlyCwpNiw,9001
5
+ service_capacity_modeling/hardware/__init__.py,sha256=kzIHnIymwnf4qQYDpfIChIAxTF8b87XtnBg1TwF_J9E,8974
6
6
  service_capacity_modeling/hardware/profiles/__init__.py,sha256=7-y3JbCBkgzaAjFla2RIymREcImdZ51HTl3yn3vzoGw,1602
7
7
  service_capacity_modeling/hardware/profiles/profiles.txt,sha256=tOfSR3B0E0uAOaXd5SLI3ioq83UYZ3yhK7UHhsK4awQ,49
8
8
  service_capacity_modeling/hardware/profiles/pricing/aws/3yr-reserved_ec2.json,sha256=JNAj4yotSrEzlsMLamg_GmhqOiiTwKNcDPNs44PTLxI,52798
@@ -40,38 +40,38 @@ service_capacity_modeling/hardware/profiles/shapes/aws/manual_drives.json,sha256
40
40
  service_capacity_modeling/hardware/profiles/shapes/aws/manual_instances.json,sha256=i611n6d7hsjd7L8aSEDzfaTMS2jVs-Jc38-vl-NKfs4,18013
41
41
  service_capacity_modeling/hardware/profiles/shapes/aws/manual_services.json,sha256=h63675KKmu5IrI3BORDN8fiAqLjAyYHArErKbC7-T30,776
42
42
  service_capacity_modeling/models/__init__.py,sha256=XK7rTBW8ZXQY5L9Uy2FwjuFN_KBW3hKw7IrhG1piajs,13567
43
- service_capacity_modeling/models/common.py,sha256=ixmNkohvw30R2JBvdpM6xKb19na_g3GNGvQQYLlkV_A,33577
43
+ service_capacity_modeling/models/common.py,sha256=f2M3ZpQopENexwwDeoMyH57oktenz88advR2XQoNTys,33567
44
44
  service_capacity_modeling/models/headroom_strategy.py,sha256=QIkP_K_tK2EGAjloaGfXeAPH5M0UDCN8FlAtwV9xxTA,651
45
45
  service_capacity_modeling/models/utils.py,sha256=0F__wz9KAGhPIQfvNp-FTtTANW6-sO4FsyddnuXqSJc,2161
46
46
  service_capacity_modeling/models/org/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
47
  service_capacity_modeling/models/org/netflix/__init__.py,sha256=m7IaQbo85NEbDvfoPJREIznpzg0YHTCrKP5C1GvnOYM,2378
48
48
  service_capacity_modeling/models/org/netflix/aurora.py,sha256=Mi9zd48k64GkKIjAs3J1S2qThguNvyWIy2dUmhwrVhc,12883
49
- service_capacity_modeling/models/org/netflix/cassandra.py,sha256=ppvHfHIwSzljlkSDTbPBXXaX0N1suVdmI3sfEwN7Cek,34682
49
+ service_capacity_modeling/models/org/netflix/cassandra.py,sha256=BsMx9QYSGSh4wKdJm_dR5rBHdbYlE7NMNWiYIQ0oU4w,34632
50
50
  service_capacity_modeling/models/org/netflix/counter.py,sha256=hOVRRCgCPU-A5TdLKQXc_mWTQpkKOWRNjOeECdDP7kA,9205
51
- service_capacity_modeling/models/org/netflix/crdb.py,sha256=2rD4Io0yT7o0NR4lNferXXOSTDe0SkT1LbSChvNgRrQ,19698
52
- service_capacity_modeling/models/org/netflix/ddb.py,sha256=2jxMFz31xckJvymvVlu1yWm0X4dGYlqxDo0bftU1B9M,26307
53
- service_capacity_modeling/models/org/netflix/elasticsearch.py,sha256=Ku7OFz-6BY0FS0lfNh9MCqtDcjufLcuPbug0CO4UPvY,23686
51
+ service_capacity_modeling/models/org/netflix/crdb.py,sha256=AlHdGFpR1RmwQSZsiuiHLR2wTrnmtguT2MMYBDHfdiM,19697
52
+ service_capacity_modeling/models/org/netflix/ddb.py,sha256=GDoXVIpDDY6xDB0dsiaz7RAPPj-qffTrM9N6w5-5ndg,26311
53
+ service_capacity_modeling/models/org/netflix/elasticsearch.py,sha256=746WYY_WSeMgY-Go7wvWGYOxfFV8ryupNThvDZHPbGo,23685
54
54
  service_capacity_modeling/models/org/netflix/entity.py,sha256=M0vzwhf8UAbVxnXspAkN4GEbq3rix6yoky6W2oDG6a0,8648
55
- service_capacity_modeling/models/org/netflix/evcache.py,sha256=l3fzIVQ1PznD2tG00Fmwl728437MZrtHOS4WjXVa3fs,25229
56
- service_capacity_modeling/models/org/netflix/graphkv.py,sha256=EVRo-1OCDvvotqPgIdP2_JXMfZUsG7KZxRMmlgYc3CI,8558
55
+ service_capacity_modeling/models/org/netflix/evcache.py,sha256=G5d8OXp21FslKt2EhYWxhxicKEggA0lQhG3Bid93elw,25228
56
+ service_capacity_modeling/models/org/netflix/graphkv.py,sha256=iS5QDDv9_hNY6nIgdL-umB439qP7-jN-n6_Tl6d-ZSo,8557
57
57
  service_capacity_modeling/models/org/netflix/iso_date_math.py,sha256=CPGHLmbGeNqkcYcmCkLKhPZcAU-yTJ2HjvuXdnNyCYc,996
58
- service_capacity_modeling/models/org/netflix/kafka.py,sha256=cIvO88g7YhEOYsMAVbZ5a4YyJCvbKaTmwQjk73gg_7s,25545
58
+ service_capacity_modeling/models/org/netflix/kafka.py,sha256=FiuBml8uWOVfPFZ37NvZW13nMVtFE4VDf_SZIFdw4sA,25515
59
59
  service_capacity_modeling/models/org/netflix/key_value.py,sha256=yL5moU0SJD4ocBU9zeGhPYE4KY7oSSq5yqfVWd_Ot2g,9336
60
60
  service_capacity_modeling/models/org/netflix/postgres.py,sha256=R3Tog-ZW1Yx6gO3AKqI_wquSm30s01QX9yWR7Jvgk9A,4055
61
61
  service_capacity_modeling/models/org/netflix/rds.py,sha256=z9egFBg4Ltqyuz_WHk-_hw-xL-EQNzl1JopJoWdNli8,10768
62
- service_capacity_modeling/models/org/netflix/stateless_java.py,sha256=WYIDUdy-1BgVTLcAbMW2xg-Xx6RPusITLOKNh2DuA1w,11307
62
+ service_capacity_modeling/models/org/netflix/stateless_java.py,sha256=1la-z5tG5CGr7QM5SHBGguxzkA7BxJPAoUWhthADe8s,11306
63
63
  service_capacity_modeling/models/org/netflix/time_series.py,sha256=XDujp-m7bRQWFDXKY7nto0OQUq3v1z56HZKs3eO8o_I,8167
64
64
  service_capacity_modeling/models/org/netflix/time_series_config.py,sha256=Qrtngn6VQq13NF55h_oRsSNA8lVnOOIb-c-dsQn6TH4,7311
65
- service_capacity_modeling/models/org/netflix/wal.py,sha256=EdhEKTqxdMRobvqp5EHtjnNE4UpWCt1rNXKQUx5OlL8,4410
66
- service_capacity_modeling/models/org/netflix/zookeeper.py,sha256=buYv400R8_KCF4Y02uQ5tzcBCvk5jSGfPnMWSh4A9EI,7603
65
+ service_capacity_modeling/models/org/netflix/wal.py,sha256=9GA7V5pZF1mKu9mM7kI9XN_i6U7Ap1oh6REzXO2ZxOM,4409
66
+ service_capacity_modeling/models/org/netflix/zookeeper.py,sha256=BHLjnVDyx15wMGrc0QFmv9v6M95snU30WR7mhIRoa4Q,7601
67
67
  service_capacity_modeling/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
68
- service_capacity_modeling/tools/auto_shape.py,sha256=Rk5Fjrw2susVL8It_J2KUADoMGBN6N394fcThdl62Ng,14672
69
- service_capacity_modeling/tools/fetch_pricing.py,sha256=SHOtFaPr61op2bnY9i_g_1-d-Nz2rV8c7Jwsye2R49s,3763
68
+ service_capacity_modeling/tools/auto_shape.py,sha256=41pfR40BN-xJS8js4BWSoqT67JhO2_XqzmNeKDoCFBo,14674
69
+ service_capacity_modeling/tools/fetch_pricing.py,sha256=JkgJPTE0SVj8sdGQvo0HN-Hdv3nfA2tu7C_Arad5aX8,3762
70
70
  service_capacity_modeling/tools/generate_missing.py,sha256=uvr9fQanx3bm4KTneH-x7EOQvO7cVV0i9gdQvArPCuY,2947
71
71
  service_capacity_modeling/tools/instance_families.py,sha256=e9JWSIdljSmHI8Nb2MI5Ld9JqQ7WdOtPtV7g3oR7ZiU,7764
72
- service_capacity_modeling-0.3.60.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
73
- service_capacity_modeling-0.3.60.dist-info/METADATA,sha256=tMXT9YuVyoSpvawrjPjzLWQTwIJUXHrr9_XmL2btp7Y,9733
74
- service_capacity_modeling-0.3.60.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
- service_capacity_modeling-0.3.60.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
76
- service_capacity_modeling-0.3.60.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
77
- service_capacity_modeling-0.3.60.dist-info/RECORD,,
72
+ service_capacity_modeling-0.3.62.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
73
+ service_capacity_modeling-0.3.62.dist-info/METADATA,sha256=3PzrFsAG7No9MPYyMQ-MVY28MHZLysYPUX4MFcCmzsU,10214
74
+ service_capacity_modeling-0.3.62.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
+ service_capacity_modeling-0.3.62.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
76
+ service_capacity_modeling-0.3.62.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
77
+ service_capacity_modeling-0.3.62.dist-info/RECORD,,