service-capacity-modeling 0.3.76__tar.gz → 0.3.77__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/PKG-INFO +1 -1
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/capacity_planner.py +46 -40
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/__init__.py +11 -7
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/interface.py +20 -18
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/__init__.py +21 -2
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/common.py +20 -19
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/headroom_strategy.py +2 -1
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/__init__.py +4 -1
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/aurora.py +12 -7
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/cassandra.py +22 -12
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/counter.py +4 -2
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/crdb.py +7 -4
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/ddb.py +9 -5
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/elasticsearch.py +8 -6
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/entity.py +5 -3
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/evcache.py +13 -9
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/graphkv.py +5 -3
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/iso_date_math.py +12 -9
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/kafka.py +13 -7
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/key_value.py +4 -2
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/postgres.py +4 -2
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/rds.py +10 -5
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/stateless_java.py +4 -2
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/time_series.py +4 -2
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/time_series_config.py +3 -3
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/wal.py +4 -2
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/netflix/zookeeper.py +5 -3
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/stats.py +14 -11
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/tools/auto_shape.py +10 -6
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/tools/fetch_pricing.py +13 -6
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/tools/generate_missing.py +4 -3
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/tools/instance_families.py +4 -1
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling.egg-info/PKG-INFO +1 -1
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/LICENSE +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/README.md +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/__init__.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/__init__.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/pricing/aws/3yr-reserved_ec2.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/pricing/aws/3yr-reserved_zz-overrides.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/profiles.txt +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5a.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5d.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5n.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6a.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6i.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6id.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c7a.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c7i.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c8i.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_i3en.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_i4i.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_i7i.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m4.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m5.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m5n.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6a.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6i.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6id.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6idn.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6in.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m7a.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m7i.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m8i.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r4.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r5.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r5n.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6a.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6i.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6id.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6idn.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6in.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r7a.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r7i.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r8i.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_drives.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_instances.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_services.json +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/org/__init__.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/models/utils.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling/tools/__init__.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling.egg-info/SOURCES.txt +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling.egg-info/dependency_links.txt +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling.egg-info/entry_points.txt +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling.egg-info/requires.txt +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/service_capacity_modeling.egg-info/top_level.txt +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/setup.cfg +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/setup.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_arguments.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_buffers.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_common.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_desire_merge.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_generate_scenarios.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_hardware.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_hardware_shapes.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_headroom_strategy.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_io2.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_model_dump.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_reproducible.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_simulation.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_utils.py +0 -0
- {service_capacity_modeling-0.3.76 → service_capacity_modeling-0.3.77}/tests/test_working_set.py +0 -0
|
@@ -190,7 +190,7 @@ def model_desires_percentiles(
|
|
|
190
190
|
def _set_instance_objects(
|
|
191
191
|
desires: CapacityDesires,
|
|
192
192
|
hardware: Hardware,
|
|
193
|
-
):
|
|
193
|
+
) -> None:
|
|
194
194
|
if desires.current_clusters:
|
|
195
195
|
for zonal_cluster_capacity in desires.current_clusters.zonal:
|
|
196
196
|
if zonal_cluster_capacity.cluster_instance_name in hardware.instances:
|
|
@@ -290,7 +290,9 @@ def _regret(
|
|
|
290
290
|
return plans_by_regret
|
|
291
291
|
|
|
292
292
|
|
|
293
|
-
def _add_requirement(
|
|
293
|
+
def _add_requirement(
|
|
294
|
+
requirement: CapacityRequirement, accum: Dict[str, Dict[str, List[Interval]]]
|
|
295
|
+
) -> None:
|
|
294
296
|
if requirement.requirement_type not in accum:
|
|
295
297
|
accum[requirement.requirement_type] = {}
|
|
296
298
|
|
|
@@ -305,7 +307,11 @@ def _add_requirement(requirement, accum):
|
|
|
305
307
|
requirements[field].append(d)
|
|
306
308
|
|
|
307
309
|
|
|
308
|
-
def _merge_models(
|
|
310
|
+
def _merge_models(
|
|
311
|
+
plans_by_model: List[List[CapacityPlan]],
|
|
312
|
+
zonal_requirements: Dict[str, Dict[str, List[Interval]]],
|
|
313
|
+
regional_requirements: Dict[str, Dict[str, List[Interval]]],
|
|
314
|
+
) -> List[CapacityPlan]:
|
|
309
315
|
capacity_plans = []
|
|
310
316
|
for composed in zip(*filter(lambda x: x, plans_by_model)):
|
|
311
317
|
merged_plans = [functools.reduce(merge_plan, composed)]
|
|
@@ -331,10 +337,10 @@ def _in_allowed(inp: str, allowed: Sequence[str]) -> bool:
|
|
|
331
337
|
class CapacityPlanner:
|
|
332
338
|
def __init__(
|
|
333
339
|
self,
|
|
334
|
-
default_num_simulations=128,
|
|
335
|
-
default_num_results=2,
|
|
336
|
-
default_lifecycles=(Lifecycle.stable, Lifecycle.beta),
|
|
337
|
-
):
|
|
340
|
+
default_num_simulations: int = 128,
|
|
341
|
+
default_num_results: int = 2,
|
|
342
|
+
default_lifecycles: Tuple[Lifecycle, ...] = (Lifecycle.stable, Lifecycle.beta),
|
|
343
|
+
) -> None:
|
|
338
344
|
self._shapes: HardwareShapes = shapes
|
|
339
345
|
self._models: Dict[str, CapacityModel] = {}
|
|
340
346
|
|
|
@@ -343,11 +349,11 @@ class CapacityPlanner:
|
|
|
343
349
|
self._default_regret_params = CapacityRegretParameters()
|
|
344
350
|
self._default_lifecycles = default_lifecycles
|
|
345
351
|
|
|
346
|
-
def register_group(self, group: Callable[[], Dict[str, CapacityModel]]):
|
|
352
|
+
def register_group(self, group: Callable[[], Dict[str, CapacityModel]]) -> None:
|
|
347
353
|
for name, model in group().items():
|
|
348
354
|
self.register_model(name, model)
|
|
349
355
|
|
|
350
|
-
def register_model(self, name: str, capacity_model: CapacityModel):
|
|
356
|
+
def register_model(self, name: str, capacity_model: CapacityModel) -> None:
|
|
351
357
|
self._models[name] = capacity_model
|
|
352
358
|
|
|
353
359
|
@property
|
|
@@ -429,16 +435,16 @@ class CapacityPlanner:
|
|
|
429
435
|
|
|
430
436
|
def _group_plans_by_percentile( # pylint: disable=too-many-positional-arguments
|
|
431
437
|
self,
|
|
432
|
-
drives,
|
|
433
|
-
extra_model_arguments,
|
|
434
|
-
instance_families,
|
|
435
|
-
lifecycles,
|
|
436
|
-
num_regions,
|
|
437
|
-
num_results,
|
|
438
|
-
region,
|
|
439
|
-
model_percentile_desires,
|
|
440
|
-
sorted_percentiles,
|
|
441
|
-
):
|
|
438
|
+
drives: Optional[Sequence[str]],
|
|
439
|
+
extra_model_arguments: Dict[str, Any],
|
|
440
|
+
instance_families: Optional[Sequence[str]],
|
|
441
|
+
lifecycles: Sequence[Lifecycle],
|
|
442
|
+
num_regions: int,
|
|
443
|
+
num_results: Optional[int],
|
|
444
|
+
region: str,
|
|
445
|
+
model_percentile_desires: Any,
|
|
446
|
+
sorted_percentiles: List[int],
|
|
447
|
+
) -> Dict[int, Sequence[CapacityPlan]]:
|
|
442
448
|
percentile_plans = {}
|
|
443
449
|
for index, percentile in enumerate(sorted_percentiles):
|
|
444
450
|
percentile_plan = []
|
|
@@ -470,15 +476,15 @@ class CapacityPlanner:
|
|
|
470
476
|
|
|
471
477
|
def _mean_plan( # pylint: disable=too-many-positional-arguments
|
|
472
478
|
self,
|
|
473
|
-
drives,
|
|
474
|
-
extra_model_arguments,
|
|
475
|
-
instance_families,
|
|
476
|
-
lifecycles,
|
|
477
|
-
num_regions,
|
|
478
|
-
num_results,
|
|
479
|
-
region,
|
|
480
|
-
model_mean_desires,
|
|
481
|
-
):
|
|
479
|
+
drives: Optional[Sequence[str]],
|
|
480
|
+
extra_model_arguments: Dict[str, Any],
|
|
481
|
+
instance_families: Optional[Sequence[str]],
|
|
482
|
+
lifecycles: Sequence[Lifecycle],
|
|
483
|
+
num_regions: int,
|
|
484
|
+
num_results: Optional[int],
|
|
485
|
+
region: str,
|
|
486
|
+
model_mean_desires: Dict[str, CapacityDesires],
|
|
487
|
+
) -> Sequence[CapacityPlan]:
|
|
482
488
|
mean_plans = []
|
|
483
489
|
for mean_sub_model, mean_sub_desire in model_mean_desires.items():
|
|
484
490
|
mean_sub_plan = self._plan_certain(
|
|
@@ -585,7 +591,7 @@ class CapacityPlanner:
|
|
|
585
591
|
]
|
|
586
592
|
|
|
587
593
|
# Calculates the minimum cpu, memory, and network requirements based on desires.
|
|
588
|
-
def _per_instance_requirements(self, desires) -> Tuple[int, float]:
|
|
594
|
+
def _per_instance_requirements(self, desires: CapacityDesires) -> Tuple[int, float]:
|
|
589
595
|
# Applications often set fixed reservations of heap or OS memory
|
|
590
596
|
per_instance_mem = (
|
|
591
597
|
desires.data_shape.reserved_instance_app_mem_gib
|
|
@@ -621,14 +627,14 @@ class CapacityPlanner:
|
|
|
621
627
|
|
|
622
628
|
def generate_scenarios( # pylint: disable=too-many-positional-arguments
|
|
623
629
|
self,
|
|
624
|
-
model,
|
|
625
|
-
region,
|
|
626
|
-
desires,
|
|
627
|
-
num_regions,
|
|
628
|
-
lifecycles,
|
|
629
|
-
instance_families,
|
|
630
|
-
drives,
|
|
631
|
-
):
|
|
630
|
+
model: CapacityModel,
|
|
631
|
+
region: str,
|
|
632
|
+
desires: CapacityDesires,
|
|
633
|
+
num_regions: int,
|
|
634
|
+
lifecycles: Optional[Sequence[Lifecycle]],
|
|
635
|
+
instance_families: Optional[Sequence[str]],
|
|
636
|
+
drives: Optional[Sequence[str]],
|
|
637
|
+
) -> Generator[Tuple[Instance, Drive, RegionContext], None, None]:
|
|
632
638
|
lifecycles = lifecycles or self._default_lifecycles
|
|
633
639
|
instance_families = instance_families or []
|
|
634
640
|
drives = drives or []
|
|
@@ -718,8 +724,8 @@ class CapacityPlanner:
|
|
|
718
724
|
lifecycles = lifecycles or self._default_lifecycles
|
|
719
725
|
|
|
720
726
|
# requirement types -> values
|
|
721
|
-
zonal_requirements: Dict[str, Dict] = {}
|
|
722
|
-
regional_requirements: Dict[str, Dict] = {}
|
|
727
|
+
zonal_requirements: Dict[str, Dict[str, List[Interval]]] = {}
|
|
728
|
+
regional_requirements: Dict[str, Dict[str, List[Interval]]] = {}
|
|
723
729
|
|
|
724
730
|
regret_clusters_by_model: Dict[
|
|
725
731
|
str, Sequence[Tuple[CapacityPlan, CapacityDesires, float]]
|
|
@@ -836,7 +842,7 @@ class CapacityPlanner:
|
|
|
836
842
|
model_name: str,
|
|
837
843
|
desires: CapacityDesires,
|
|
838
844
|
extra_model_arguments: Dict[str, Any],
|
|
839
|
-
):
|
|
845
|
+
) -> Generator[Tuple[str, CapacityDesires], None, None]:
|
|
840
846
|
queue: List[Tuple[CapacityDesires, str]] = [(desires, model_name)]
|
|
841
847
|
models_used = []
|
|
842
848
|
|
|
@@ -5,6 +5,8 @@ import logging
|
|
|
5
5
|
import os
|
|
6
6
|
from functools import reduce
|
|
7
7
|
from pathlib import Path
|
|
8
|
+
from typing import Any
|
|
9
|
+
from typing import cast
|
|
8
10
|
from typing import Dict
|
|
9
11
|
from typing import List
|
|
10
12
|
from typing import Optional
|
|
@@ -20,11 +22,11 @@ from service_capacity_modeling.interface import Service
|
|
|
20
22
|
logger = logging.getLogger(__name__)
|
|
21
23
|
|
|
22
24
|
|
|
23
|
-
def load_pricing(pricing: Dict) -> Pricing:
|
|
25
|
+
def load_pricing(pricing: Dict[str, Any]) -> Pricing:
|
|
24
26
|
return Pricing(regions=pricing)
|
|
25
27
|
|
|
26
28
|
|
|
27
|
-
def load_hardware(hardware: Dict) -> Hardware:
|
|
29
|
+
def load_hardware(hardware: Dict[str, Any]) -> Hardware:
|
|
28
30
|
return Hardware(**hardware)
|
|
29
31
|
|
|
30
32
|
|
|
@@ -103,7 +105,9 @@ def merge_hardware(existing: Hardware, override: Hardware) -> Hardware:
|
|
|
103
105
|
merged[key] = override_obj.get(key)
|
|
104
106
|
elif isinstance(existing_field, Dict):
|
|
105
107
|
override_field = override_obj.get(key)
|
|
106
|
-
|
|
108
|
+
if override_field is None:
|
|
109
|
+
override_field = {}
|
|
110
|
+
merged_field = cast(Dict[str, Any], merged.setdefault(key, {}))
|
|
107
111
|
|
|
108
112
|
existing_keys = existing_field.keys()
|
|
109
113
|
override_keys = override_obj.get(key, {}).keys()
|
|
@@ -119,7 +123,7 @@ def merge_hardware(existing: Hardware, override: Hardware) -> Hardware:
|
|
|
119
123
|
return Hardware(**merged)
|
|
120
124
|
|
|
121
125
|
|
|
122
|
-
def merge_pricing(existing: Dict, override: Dict) -> Dict:
|
|
126
|
+
def merge_pricing(existing: Dict[str, Any], override: Dict[str, Any]) -> Dict[str, Any]:
|
|
123
127
|
merged = existing.copy()
|
|
124
128
|
for region, override_pricing in override.items():
|
|
125
129
|
if region not in merged:
|
|
@@ -165,7 +169,7 @@ def load_hardware_from_disk(
|
|
|
165
169
|
if shape_paths is None:
|
|
166
170
|
shape_paths = []
|
|
167
171
|
|
|
168
|
-
combined_pricing: Dict = {}
|
|
172
|
+
combined_pricing: Dict[str, Any] = {}
|
|
169
173
|
|
|
170
174
|
logger.debug("Loading pricing from: %s", price_paths)
|
|
171
175
|
for price_path in price_paths:
|
|
@@ -185,7 +189,7 @@ def load_hardware_from_disk(
|
|
|
185
189
|
return price_hardware(hardware=hardware, pricing=pricing)
|
|
186
190
|
|
|
187
191
|
|
|
188
|
-
def load_hardware_from_s3(bucket, path) -> GlobalHardware:
|
|
192
|
+
def load_hardware_from_s3(bucket: str, path: str) -> GlobalHardware:
|
|
189
193
|
try:
|
|
190
194
|
# boto is a heavy dependency so we only want to take it if
|
|
191
195
|
# someone will be using it ...
|
|
@@ -205,7 +209,7 @@ def load_hardware_from_s3(bucket, path) -> GlobalHardware:
|
|
|
205
209
|
|
|
206
210
|
|
|
207
211
|
class HardwareShapes:
|
|
208
|
-
def __init__(self):
|
|
212
|
+
def __init__(self) -> None:
|
|
209
213
|
self._hardware: Optional[GlobalHardware] = None
|
|
210
214
|
|
|
211
215
|
def load(self, new_hardware: GlobalHardware) -> None:
|
|
@@ -27,12 +27,12 @@ MEGABIT_IN_BYTES = (1000 * 1000) / 8
|
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
class ExcludeUnsetModel(BaseModel):
|
|
30
|
-
def model_dump(self, *args, **kwargs):
|
|
30
|
+
def model_dump(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:
|
|
31
31
|
if "exclude_unset" not in kwargs:
|
|
32
32
|
kwargs["exclude_unset"] = True
|
|
33
33
|
return super().model_dump(*args, **kwargs)
|
|
34
34
|
|
|
35
|
-
def model_dump_json(self, *args, **kwargs):
|
|
35
|
+
def model_dump_json(self, *args: Any, **kwargs: Any) -> str:
|
|
36
36
|
if "exclude_unset" not in kwargs:
|
|
37
37
|
kwargs["exclude_unset"] = True
|
|
38
38
|
return super().model_dump_json(*args, **kwargs)
|
|
@@ -44,10 +44,10 @@ class ExcludeUnsetModel(BaseModel):
|
|
|
44
44
|
|
|
45
45
|
|
|
46
46
|
class IntervalModel(str, Enum):
|
|
47
|
-
def __str__(self):
|
|
47
|
+
def __str__(self) -> str:
|
|
48
48
|
return str(self.value)
|
|
49
49
|
|
|
50
|
-
def __repr__(self):
|
|
50
|
+
def __repr__(self) -> str:
|
|
51
51
|
return f"D({self.value})"
|
|
52
52
|
|
|
53
53
|
gamma = "gamma"
|
|
@@ -71,11 +71,11 @@ class Interval(ExcludeUnsetModel):
|
|
|
71
71
|
model_config = ConfigDict(frozen=True, protected_namespaces=())
|
|
72
72
|
|
|
73
73
|
@property
|
|
74
|
-
def can_simulate(self):
|
|
74
|
+
def can_simulate(self) -> bool:
|
|
75
75
|
return self.confidence <= 0.99 and self.allow_simulate
|
|
76
76
|
|
|
77
77
|
@property
|
|
78
|
-
def minimum(self):
|
|
78
|
+
def minimum(self) -> float:
|
|
79
79
|
if self.minimum_value is None:
|
|
80
80
|
if self.confidence == 1.0:
|
|
81
81
|
return self.low * 0.999
|
|
@@ -84,17 +84,19 @@ class Interval(ExcludeUnsetModel):
|
|
|
84
84
|
return self.minimum_value
|
|
85
85
|
|
|
86
86
|
@property
|
|
87
|
-
def maximum(self):
|
|
87
|
+
def maximum(self) -> float:
|
|
88
88
|
if self.maximum_value is None:
|
|
89
89
|
if self.confidence == 1.0:
|
|
90
90
|
return self.high * 1.001
|
|
91
91
|
return self.high * 2
|
|
92
92
|
return self.maximum_value
|
|
93
93
|
|
|
94
|
-
def __hash__(self):
|
|
94
|
+
def __hash__(self) -> int:
|
|
95
95
|
return hash((type(self),) + tuple(self.__dict__.values()))
|
|
96
96
|
|
|
97
|
-
def __eq__(self, other):
|
|
97
|
+
def __eq__(self, other: object) -> bool:
|
|
98
|
+
if not isinstance(other, Interval):
|
|
99
|
+
return False
|
|
98
100
|
return self.__hash__() == other.__hash__()
|
|
99
101
|
|
|
100
102
|
def scale(self, factor: float) -> Interval:
|
|
@@ -264,7 +266,7 @@ class Drive(ExcludeUnsetModel):
|
|
|
264
266
|
return max(self.block_size_kib, self.group_size_kib)
|
|
265
267
|
|
|
266
268
|
@property
|
|
267
|
-
def max_size_gib(self) ->
|
|
269
|
+
def max_size_gib(self) -> float:
|
|
268
270
|
if self.max_scale_size_gib != 0:
|
|
269
271
|
return self.max_scale_size_gib
|
|
270
272
|
else:
|
|
@@ -279,7 +281,7 @@ class Drive(ExcludeUnsetModel):
|
|
|
279
281
|
|
|
280
282
|
@computed_field(return_type=float) # type: ignore
|
|
281
283
|
@property
|
|
282
|
-
def annual_cost(self):
|
|
284
|
+
def annual_cost(self) -> float:
|
|
283
285
|
size = self.size_gib or 0
|
|
284
286
|
r_ios = self.read_io_per_s or 0
|
|
285
287
|
w_ios = self.write_io_per_s or 0
|
|
@@ -382,15 +384,15 @@ class Instance(ExcludeUnsetModel):
|
|
|
382
384
|
family_separator: str = "."
|
|
383
385
|
|
|
384
386
|
@property
|
|
385
|
-
def family(self):
|
|
387
|
+
def family(self) -> str:
|
|
386
388
|
return self.name.rsplit(self.family_separator, 1)[0]
|
|
387
389
|
|
|
388
390
|
@property
|
|
389
|
-
def size(self):
|
|
391
|
+
def size(self) -> str:
|
|
390
392
|
return self.name.rsplit(self.family_separator, 1)[1]
|
|
391
393
|
|
|
392
394
|
@property
|
|
393
|
-
def cores(self):
|
|
395
|
+
def cores(self) -> int:
|
|
394
396
|
if self.cpu_cores is not None:
|
|
395
397
|
return self.cpu_cores
|
|
396
398
|
return self.cpu // 2
|
|
@@ -456,7 +458,7 @@ class Service(ExcludeUnsetModel):
|
|
|
456
458
|
low=1, mid=10, high=50, confidence=0.9
|
|
457
459
|
)
|
|
458
460
|
|
|
459
|
-
def annual_cost_gib(self, data_gib: float = 0):
|
|
461
|
+
def annual_cost_gib(self, data_gib: float = 0) -> float:
|
|
460
462
|
if isinstance(self.annual_cost_per_gib, float):
|
|
461
463
|
return self.annual_cost_per_gib * data_gib
|
|
462
464
|
else:
|
|
@@ -979,7 +981,7 @@ class CapacityRequirement(ExcludeUnsetModel):
|
|
|
979
981
|
network_mbps: Interval = certain_int(0)
|
|
980
982
|
disk_gib: Interval = certain_int(0)
|
|
981
983
|
|
|
982
|
-
context: Dict = {}
|
|
984
|
+
context: Dict[str, Any] = {}
|
|
983
985
|
|
|
984
986
|
|
|
985
987
|
class ClusterCapacity(ExcludeUnsetModel):
|
|
@@ -992,7 +994,7 @@ class ClusterCapacity(ExcludeUnsetModel):
|
|
|
992
994
|
# When provisioning services we might need to signal they
|
|
993
995
|
# should have certain configuration, for example flags that
|
|
994
996
|
# affect durability shut off
|
|
995
|
-
cluster_params: Dict = {}
|
|
997
|
+
cluster_params: Dict[str, Any] = {}
|
|
996
998
|
|
|
997
999
|
|
|
998
1000
|
class ServiceCapacity(ExcludeUnsetModel):
|
|
@@ -1003,7 +1005,7 @@ class ServiceCapacity(ExcludeUnsetModel):
|
|
|
1003
1005
|
regret_cost: bool = False
|
|
1004
1006
|
# Often while provisioning cloud services we need to represent
|
|
1005
1007
|
# parameters to the cloud APIs, use this to inject those from models
|
|
1006
|
-
service_params: Dict = {}
|
|
1008
|
+
service_params: Dict[str, Any] = {}
|
|
1007
1009
|
|
|
1008
1010
|
|
|
1009
1011
|
# For services that are provisioned by zone (e.g. Cassandra, EVCache)
|
|
@@ -20,6 +20,25 @@ from service_capacity_modeling.interface import Platform
|
|
|
20
20
|
from service_capacity_modeling.interface import QueryPattern
|
|
21
21
|
from service_capacity_modeling.interface import RegionContext
|
|
22
22
|
|
|
23
|
+
__all__ = [
|
|
24
|
+
"AccessConsistency",
|
|
25
|
+
"AccessPattern",
|
|
26
|
+
"CapacityDesires",
|
|
27
|
+
"CapacityPlan",
|
|
28
|
+
"CapacityRegretParameters",
|
|
29
|
+
"certain_float",
|
|
30
|
+
"Consistency",
|
|
31
|
+
"DataShape",
|
|
32
|
+
"Drive",
|
|
33
|
+
"FixedInterval",
|
|
34
|
+
"GlobalConsistency",
|
|
35
|
+
"Instance",
|
|
36
|
+
"Platform",
|
|
37
|
+
"QueryPattern",
|
|
38
|
+
"RegionContext",
|
|
39
|
+
"CapacityModel",
|
|
40
|
+
]
|
|
41
|
+
|
|
23
42
|
__common_regrets__ = frozenset(("spend", "disk", "mem"))
|
|
24
43
|
|
|
25
44
|
|
|
@@ -85,7 +104,7 @@ class CapacityModel:
|
|
|
85
104
|
|
|
86
105
|
"""
|
|
87
106
|
|
|
88
|
-
def __init__(self):
|
|
107
|
+
def __init__(self) -> None:
|
|
89
108
|
pass
|
|
90
109
|
|
|
91
110
|
@staticmethod
|
|
@@ -270,7 +289,7 @@ class CapacityModel:
|
|
|
270
289
|
@staticmethod
|
|
271
290
|
def default_desires(
|
|
272
291
|
user_desires: CapacityDesires, extra_model_arguments: Dict[str, Any]
|
|
273
|
-
):
|
|
292
|
+
) -> CapacityDesires:
|
|
274
293
|
"""Optional defaults to apply given a user desires
|
|
275
294
|
|
|
276
295
|
Often users do not know what the on-cpu time of their queries
|
|
@@ -3,6 +3,7 @@ import logging
|
|
|
3
3
|
import math
|
|
4
4
|
import random
|
|
5
5
|
from decimal import Decimal
|
|
6
|
+
from typing import Any
|
|
6
7
|
from typing import Callable
|
|
7
8
|
from typing import Dict
|
|
8
9
|
from typing import List
|
|
@@ -552,7 +553,7 @@ def compute_stateful_zone( # pylint: disable=too-many-positional-arguments
|
|
|
552
553
|
ratio = ebs_gib / max_size
|
|
553
554
|
count = max(cluster_size(math.ceil(count * ratio)), min_count)
|
|
554
555
|
cost = count * instance.annual_cost
|
|
555
|
-
ebs_gib = max_size
|
|
556
|
+
ebs_gib = int(max_size)
|
|
556
557
|
|
|
557
558
|
read_io, write_io = required_disk_ios(space_gib, count)
|
|
558
559
|
read_io, write_io = (
|
|
@@ -597,27 +598,27 @@ def compute_stateful_zone( # pylint: disable=too-many-positional-arguments
|
|
|
597
598
|
|
|
598
599
|
|
|
599
600
|
# AWS GP2 gives 3 IOS / gb stored.
|
|
600
|
-
def gp2_gib_for_io(read_ios) -> int:
|
|
601
|
+
def gp2_gib_for_io(read_ios: float) -> int:
|
|
601
602
|
return int(max(1, read_ios // 3))
|
|
602
603
|
|
|
603
604
|
|
|
604
|
-
def cloud_gib_for_io(drive, total_ios, space_gib) -> int:
|
|
605
|
+
def cloud_gib_for_io(drive: Drive, total_ios: float, space_gib: float) -> int:
|
|
605
606
|
if drive.name == "gp2":
|
|
606
607
|
return gp2_gib_for_io(total_ios)
|
|
607
608
|
else:
|
|
608
|
-
return space_gib
|
|
609
|
+
return int(space_gib)
|
|
609
610
|
|
|
610
611
|
|
|
611
612
|
class WorkingSetEstimator:
|
|
612
|
-
def __init__(self):
|
|
613
|
-
self._cache = {}
|
|
613
|
+
def __init__(self) -> None:
|
|
614
|
+
self._cache: Dict[Any, Interval] = {}
|
|
614
615
|
|
|
615
616
|
def working_set_percent(
|
|
616
617
|
self,
|
|
617
618
|
# latency distributions of the read SLOs versus the drives
|
|
618
619
|
# expressed as scipy rv_continuous objects
|
|
619
|
-
drive_read_latency_dist,
|
|
620
|
-
read_slo_latency_dist,
|
|
620
|
+
drive_read_latency_dist: Any,
|
|
621
|
+
read_slo_latency_dist: Any,
|
|
621
622
|
# what percentile of disk latency should we target for keeping in
|
|
622
623
|
# memory. Not as this is _increased_ more memory will be reserved
|
|
623
624
|
target_percentile: float = 0.90,
|
|
@@ -655,8 +656,8 @@ _working_set_estimator = WorkingSetEstimator()
|
|
|
655
656
|
def working_set_from_drive_and_slo(
|
|
656
657
|
# latency distributions of the read SLOs versus the drives
|
|
657
658
|
# expressed as scipy rv_continuous objects
|
|
658
|
-
drive_read_latency_dist,
|
|
659
|
-
read_slo_latency_dist,
|
|
659
|
+
drive_read_latency_dist: Any,
|
|
660
|
+
read_slo_latency_dist: Any,
|
|
660
661
|
estimated_working_set: Optional[Interval] = None,
|
|
661
662
|
# what percentile of disk latency should we target for keeping in
|
|
662
663
|
# memory. Not as this is _increased_ more memory will be reserved
|
|
@@ -797,7 +798,7 @@ class DerivedBuffers(BaseModel):
|
|
|
797
798
|
buffer: Dict[str, Buffer],
|
|
798
799
|
components: List[str],
|
|
799
800
|
component_fallbacks: Optional[Dict[str, List[str]]] = None,
|
|
800
|
-
):
|
|
801
|
+
) -> "DerivedBuffers":
|
|
801
802
|
expanded_components = _expand_components(components, component_fallbacks)
|
|
802
803
|
|
|
803
804
|
scale = 1.0
|
|
@@ -856,7 +857,7 @@ class RequirementFromCurrentCapacity(BaseModel):
|
|
|
856
857
|
|
|
857
858
|
def cpu(self, instance_candidate: Instance) -> int:
|
|
858
859
|
current_cpu_util = self.current_capacity.cpu_utilization.mid / 100
|
|
859
|
-
current_total_cpu = (
|
|
860
|
+
current_total_cpu = float(
|
|
860
861
|
self.current_instance.cpu * self.current_capacity.cluster_instance_count.mid
|
|
861
862
|
)
|
|
862
863
|
|
|
@@ -881,11 +882,11 @@ class RequirementFromCurrentCapacity(BaseModel):
|
|
|
881
882
|
|
|
882
883
|
@property
|
|
883
884
|
def mem_gib(self) -> float:
|
|
884
|
-
current_memory_utilization = (
|
|
885
|
+
current_memory_utilization = float(
|
|
885
886
|
self.current_capacity.memory_utilization_gib.mid
|
|
886
887
|
* self.current_capacity.cluster_instance_count.mid
|
|
887
888
|
)
|
|
888
|
-
zonal_ram_allocated = (
|
|
889
|
+
zonal_ram_allocated = float(
|
|
889
890
|
self.current_instance.ram_gib
|
|
890
891
|
* self.current_capacity.cluster_instance_count.mid
|
|
891
892
|
)
|
|
@@ -905,11 +906,11 @@ class RequirementFromCurrentCapacity(BaseModel):
|
|
|
905
906
|
|
|
906
907
|
@property
|
|
907
908
|
def disk_gib(self) -> int:
|
|
908
|
-
current_cluster_disk_util_gib = (
|
|
909
|
+
current_cluster_disk_util_gib = float(
|
|
909
910
|
self.current_capacity.disk_utilization_gib.mid
|
|
910
911
|
* self.current_capacity.cluster_instance_count.mid
|
|
911
912
|
)
|
|
912
|
-
current_node_disk_gib = (
|
|
913
|
+
current_node_disk_gib = float(
|
|
913
914
|
self.current_instance.drive.max_size_gib
|
|
914
915
|
if self.current_instance.drive is not None
|
|
915
916
|
else (
|
|
@@ -919,7 +920,7 @@ class RequirementFromCurrentCapacity(BaseModel):
|
|
|
919
920
|
)
|
|
920
921
|
)
|
|
921
922
|
|
|
922
|
-
zonal_disk_allocated = (
|
|
923
|
+
zonal_disk_allocated = float(
|
|
923
924
|
current_node_disk_gib * self.current_capacity.cluster_instance_count.mid
|
|
924
925
|
)
|
|
925
926
|
# These are the desired buffers
|
|
@@ -939,11 +940,11 @@ class RequirementFromCurrentCapacity(BaseModel):
|
|
|
939
940
|
|
|
940
941
|
@property
|
|
941
942
|
def network_mbps(self) -> int:
|
|
942
|
-
current_network_utilization = (
|
|
943
|
+
current_network_utilization = float(
|
|
943
944
|
self.current_capacity.network_utilization_mbps.mid
|
|
944
945
|
* self.current_capacity.cluster_instance_count.mid
|
|
945
946
|
)
|
|
946
|
-
zonal_network_allocated = (
|
|
947
|
+
zonal_network_allocated = float(
|
|
947
948
|
self.current_instance.net_mbps
|
|
948
949
|
* self.current_capacity.cluster_instance_count.mid
|
|
949
950
|
)
|
|
@@ -1,3 +1,6 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
from typing import Dict
|
|
3
|
+
|
|
1
4
|
from .aurora import nflx_aurora_capacity_model
|
|
2
5
|
from .cassandra import nflx_cassandra_capacity_model
|
|
3
6
|
from .counter import nflx_counter_capacity_model
|
|
@@ -20,7 +23,7 @@ from .wal import nflx_wal_capacity_model
|
|
|
20
23
|
from .zookeeper import nflx_zookeeper_capacity_model
|
|
21
24
|
|
|
22
25
|
|
|
23
|
-
def models():
|
|
26
|
+
def models() -> Dict[str, Any]:
|
|
24
27
|
return {
|
|
25
28
|
"org.netflix.cassandra": nflx_cassandra_capacity_model,
|
|
26
29
|
"org.netflix.stateless-java": nflx_java_app_capacity_model,
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import math
|
|
3
3
|
from typing import Any
|
|
4
|
+
from typing import Callable
|
|
4
5
|
from typing import Dict
|
|
5
6
|
from typing import Optional
|
|
6
7
|
from typing import Tuple
|
|
@@ -86,7 +87,9 @@ def _estimate_aurora_requirement(
|
|
|
86
87
|
|
|
87
88
|
# MySQL default block size is 16KiB, PostGreSQL is 8KiB Number of reads for B-Tree
|
|
88
89
|
# are given by log of total pages to the base of B-Tree fan out factor
|
|
89
|
-
def _rds_required_disk_ios(
|
|
90
|
+
def _rds_required_disk_ios(
|
|
91
|
+
disk_size_gib: int, db_type: str, btree_fan_out: int = 100
|
|
92
|
+
) -> float:
|
|
90
93
|
disk_size_kb = disk_size_gib * 1024 * 1024
|
|
91
94
|
if db_type == "postgres":
|
|
92
95
|
default_block_size = 8 # KiB
|
|
@@ -100,11 +103,11 @@ def _rds_required_disk_ios(disk_size_gib: int, db_type: str, btree_fan_out: int
|
|
|
100
103
|
# This is a start, we should iterate based on the actual work load
|
|
101
104
|
def _estimate_io_cost(
|
|
102
105
|
db_type: str,
|
|
103
|
-
desires,
|
|
106
|
+
desires: CapacityDesires,
|
|
104
107
|
read_io_price: float,
|
|
105
108
|
write_io_price: float,
|
|
106
109
|
cache_hit_rate: float = 0.8,
|
|
107
|
-
):
|
|
110
|
+
) -> float:
|
|
108
111
|
if db_type == "postgres":
|
|
109
112
|
read_byte_per_io = 8192
|
|
110
113
|
else:
|
|
@@ -134,8 +137,8 @@ def _compute_aurora_region( # pylint: disable=too-many-positional-arguments
|
|
|
134
137
|
needed_disk_gib: int,
|
|
135
138
|
needed_memory_gib: int,
|
|
136
139
|
needed_network_mbps: float,
|
|
137
|
-
required_disk_ios,
|
|
138
|
-
required_disk_space,
|
|
140
|
+
required_disk_ios: Callable[[int], float],
|
|
141
|
+
required_disk_space: Callable[[int], float],
|
|
139
142
|
db_type: str,
|
|
140
143
|
desires: CapacityDesires,
|
|
141
144
|
) -> Optional[RegionClusterCapacity]:
|
|
@@ -295,7 +298,7 @@ class NflxAuroraCapacityModel(CapacityModel):
|
|
|
295
298
|
)
|
|
296
299
|
|
|
297
300
|
@staticmethod
|
|
298
|
-
def description():
|
|
301
|
+
def description() -> str:
|
|
299
302
|
return "Netflix Aurora Cluster Model"
|
|
300
303
|
|
|
301
304
|
@staticmethod
|
|
@@ -307,7 +310,9 @@ class NflxAuroraCapacityModel(CapacityModel):
|
|
|
307
310
|
return Platform.aurora_mysql, Platform.aurora_mysql
|
|
308
311
|
|
|
309
312
|
@staticmethod
|
|
310
|
-
def default_desires(
|
|
313
|
+
def default_desires(
|
|
314
|
+
user_desires: CapacityDesires, extra_model_arguments: Dict[str, Any]
|
|
315
|
+
) -> CapacityDesires:
|
|
311
316
|
return CapacityDesires(
|
|
312
317
|
query_pattern=QueryPattern(
|
|
313
318
|
access_pattern=AccessPattern.latency,
|