service-capacity-modeling 0.3.74__tar.gz → 0.3.79__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of service-capacity-modeling might be problematic. Click here for more details.

Files changed (103) hide show
  1. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/PKG-INFO +9 -5
  2. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/README.md +3 -3
  3. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/capacity_planner.py +46 -40
  4. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/__init__.py +11 -7
  5. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/interface.py +48 -22
  6. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/__init__.py +21 -2
  7. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/common.py +268 -190
  8. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/headroom_strategy.py +2 -1
  9. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/__init__.py +4 -1
  10. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/aurora.py +12 -7
  11. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/cassandra.py +39 -24
  12. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/counter.py +44 -20
  13. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/crdb.py +7 -4
  14. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/ddb.py +9 -5
  15. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/elasticsearch.py +8 -6
  16. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/entity.py +5 -3
  17. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/evcache.py +21 -25
  18. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/graphkv.py +5 -3
  19. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/iso_date_math.py +12 -9
  20. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/kafka.py +13 -7
  21. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/key_value.py +4 -2
  22. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/postgres.py +4 -2
  23. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/rds.py +10 -5
  24. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/stateless_java.py +4 -2
  25. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/time_series.py +4 -2
  26. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/time_series_config.py +3 -3
  27. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/wal.py +4 -2
  28. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/netflix/zookeeper.py +5 -3
  29. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/stats.py +14 -11
  30. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/tools/auto_shape.py +10 -6
  31. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/tools/fetch_pricing.py +13 -6
  32. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/tools/generate_missing.py +4 -3
  33. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/tools/instance_families.py +4 -1
  34. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling.egg-info/PKG-INFO +9 -5
  35. service_capacity_modeling-0.3.79/service_capacity_modeling.egg-info/requires.txt +7 -0
  36. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/setup.py +4 -1
  37. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_buffers.py +119 -0
  38. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_common.py +230 -18
  39. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_reproducible.py +38 -20
  40. service_capacity_modeling-0.3.74/service_capacity_modeling.egg-info/requires.txt +0 -10
  41. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/LICENSE +0 -0
  42. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/__init__.py +0 -0
  43. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/__init__.py +0 -0
  44. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/pricing/aws/3yr-reserved_ec2.json +0 -0
  45. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/pricing/aws/3yr-reserved_zz-overrides.json +0 -0
  46. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/profiles.txt +0 -0
  47. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5.json +0 -0
  48. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5a.json +0 -0
  49. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5d.json +0 -0
  50. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5n.json +0 -0
  51. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6a.json +0 -0
  52. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6i.json +0 -0
  53. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6id.json +0 -0
  54. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c7a.json +0 -0
  55. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c7i.json +0 -0
  56. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c8i.json +0 -0
  57. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_i3en.json +0 -0
  58. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_i4i.json +0 -0
  59. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_i7i.json +0 -0
  60. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m4.json +0 -0
  61. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m5.json +0 -0
  62. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m5n.json +0 -0
  63. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6a.json +0 -0
  64. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6i.json +0 -0
  65. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6id.json +0 -0
  66. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6idn.json +0 -0
  67. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6in.json +0 -0
  68. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m7a.json +0 -0
  69. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m7i.json +0 -0
  70. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m8i.json +0 -0
  71. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r4.json +0 -0
  72. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r5.json +0 -0
  73. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r5n.json +0 -0
  74. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6a.json +0 -0
  75. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6i.json +0 -0
  76. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6id.json +0 -0
  77. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6idn.json +0 -0
  78. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6in.json +0 -0
  79. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r7a.json +0 -0
  80. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r7i.json +0 -0
  81. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r8i.json +0 -0
  82. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_drives.json +0 -0
  83. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_instances.json +0 -0
  84. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_services.json +0 -0
  85. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/org/__init__.py +0 -0
  86. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/models/utils.py +0 -0
  87. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling/tools/__init__.py +0 -0
  88. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling.egg-info/SOURCES.txt +0 -0
  89. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling.egg-info/dependency_links.txt +0 -0
  90. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling.egg-info/entry_points.txt +0 -0
  91. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/service_capacity_modeling.egg-info/top_level.txt +0 -0
  92. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/setup.cfg +0 -0
  93. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_arguments.py +0 -0
  94. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_desire_merge.py +0 -0
  95. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_generate_scenarios.py +0 -0
  96. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_hardware.py +0 -0
  97. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_hardware_shapes.py +0 -0
  98. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_headroom_strategy.py +0 -0
  99. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_io2.py +0 -0
  100. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_model_dump.py +0 -0
  101. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_simulation.py +0 -0
  102. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_utils.py +0 -0
  103. {service_capacity_modeling-0.3.74 → service_capacity_modeling-0.3.79}/tests/test_working_set.py +0 -0
@@ -1,19 +1,22 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: service-capacity-modeling
3
- Version: 0.3.74
3
+ Version: 0.3.79
4
4
  Summary: Contains utilities for modeling capacity for pluggable workloads
5
5
  Author: Joseph Lynch
6
6
  Author-email: josephl@netflix.com
7
7
  License: Apache 2.0
8
8
  Classifier: Programming Language :: Python :: 3
9
+ Classifier: Programming Language :: Python :: 3.9
10
+ Classifier: Programming Language :: Python :: 3.10
11
+ Classifier: Programming Language :: Python :: 3.11
9
12
  Classifier: License :: OSI Approved :: Apache Software License
10
13
  Classifier: Operating System :: OS Independent
14
+ Requires-Python: >=3.9,<3.12
11
15
  Description-Content-Type: text/markdown
12
16
  License-File: LICENSE
13
17
  Requires-Dist: pydantic>2.0
14
18
  Requires-Dist: scipy
15
19
  Requires-Dist: numpy
16
- Requires-Dist: importlib_resources; python_version < "3.7"
17
20
  Requires-Dist: isodate
18
21
  Provides-Extra: aws
19
22
  Requires-Dist: boto3; extra == "aws"
@@ -26,6 +29,7 @@ Dynamic: license
26
29
  Dynamic: license-file
27
30
  Dynamic: provides-extra
28
31
  Dynamic: requires-dist
32
+ Dynamic: requires-python
29
33
  Dynamic: summary
30
34
 
31
35
  # Service Capacity Modeling
@@ -43,10 +47,10 @@ remember this repository is public when making changes to it.
43
47
  Run the tests:
44
48
  ```bash
45
49
  # Test the capacity planner on included netflix models
46
- $ tox -e py38
50
+ $ tox -e py39
47
51
 
48
52
  # Run a single test with a debugger attached if the test fails
49
- $ .tox/py38/bin/pytest -n0 -k test_java_heap_heavy --pdb --pdbcls=IPython.terminal.debugger:Pdb
53
+ $ .tox/py39/bin/pytest -n0 -k test_java_heap_heavy --pdb --pdbcls=IPython.terminal.debugger:Pdb
50
54
 
51
55
  # Verify all type contracts
52
56
  $ tox -e mypy
@@ -247,7 +251,7 @@ To contribute to this project:
247
251
  2. Write a unit test using `pytest` in the `tests` folder.
248
252
  3. Ensure your tests pass via `tox` or debug them with:
249
253
  ```
250
- tox -e py38 -- -k test_<your_functionality> --pdb --pdbcls=IPython.terminal.debugger:Pdb
254
+ tox -e py39 -- -k test_<your_functionality> --pdb --pdbcls=IPython.terminal.debugger:Pdb
251
255
  ```
252
256
 
253
257
  ### Pre-commit / Linting
@@ -13,10 +13,10 @@ remember this repository is public when making changes to it.
13
13
  Run the tests:
14
14
  ```bash
15
15
  # Test the capacity planner on included netflix models
16
- $ tox -e py38
16
+ $ tox -e py39
17
17
 
18
18
  # Run a single test with a debugger attached if the test fails
19
- $ .tox/py38/bin/pytest -n0 -k test_java_heap_heavy --pdb --pdbcls=IPython.terminal.debugger:Pdb
19
+ $ .tox/py39/bin/pytest -n0 -k test_java_heap_heavy --pdb --pdbcls=IPython.terminal.debugger:Pdb
20
20
 
21
21
  # Verify all type contracts
22
22
  $ tox -e mypy
@@ -217,7 +217,7 @@ To contribute to this project:
217
217
  2. Write a unit test using `pytest` in the `tests` folder.
218
218
  3. Ensure your tests pass via `tox` or debug them with:
219
219
  ```
220
- tox -e py38 -- -k test_<your_functionality> --pdb --pdbcls=IPython.terminal.debugger:Pdb
220
+ tox -e py39 -- -k test_<your_functionality> --pdb --pdbcls=IPython.terminal.debugger:Pdb
221
221
  ```
222
222
 
223
223
  ### Pre-commit / Linting
@@ -190,7 +190,7 @@ def model_desires_percentiles(
190
190
  def _set_instance_objects(
191
191
  desires: CapacityDesires,
192
192
  hardware: Hardware,
193
- ):
193
+ ) -> None:
194
194
  if desires.current_clusters:
195
195
  for zonal_cluster_capacity in desires.current_clusters.zonal:
196
196
  if zonal_cluster_capacity.cluster_instance_name in hardware.instances:
@@ -290,7 +290,9 @@ def _regret(
290
290
  return plans_by_regret
291
291
 
292
292
 
293
- def _add_requirement(requirement, accum):
293
+ def _add_requirement(
294
+ requirement: CapacityRequirement, accum: Dict[str, Dict[str, List[Interval]]]
295
+ ) -> None:
294
296
  if requirement.requirement_type not in accum:
295
297
  accum[requirement.requirement_type] = {}
296
298
 
@@ -305,7 +307,11 @@ def _add_requirement(requirement, accum):
305
307
  requirements[field].append(d)
306
308
 
307
309
 
308
- def _merge_models(plans_by_model, zonal_requirements, regional_requirements):
310
+ def _merge_models(
311
+ plans_by_model: List[List[CapacityPlan]],
312
+ zonal_requirements: Dict[str, Dict[str, List[Interval]]],
313
+ regional_requirements: Dict[str, Dict[str, List[Interval]]],
314
+ ) -> List[CapacityPlan]:
309
315
  capacity_plans = []
310
316
  for composed in zip(*filter(lambda x: x, plans_by_model)):
311
317
  merged_plans = [functools.reduce(merge_plan, composed)]
@@ -331,10 +337,10 @@ def _in_allowed(inp: str, allowed: Sequence[str]) -> bool:
331
337
  class CapacityPlanner:
332
338
  def __init__(
333
339
  self,
334
- default_num_simulations=128,
335
- default_num_results=2,
336
- default_lifecycles=(Lifecycle.stable, Lifecycle.beta),
337
- ):
340
+ default_num_simulations: int = 128,
341
+ default_num_results: int = 2,
342
+ default_lifecycles: Tuple[Lifecycle, ...] = (Lifecycle.stable, Lifecycle.beta),
343
+ ) -> None:
338
344
  self._shapes: HardwareShapes = shapes
339
345
  self._models: Dict[str, CapacityModel] = {}
340
346
 
@@ -343,11 +349,11 @@ class CapacityPlanner:
343
349
  self._default_regret_params = CapacityRegretParameters()
344
350
  self._default_lifecycles = default_lifecycles
345
351
 
346
- def register_group(self, group: Callable[[], Dict[str, CapacityModel]]):
352
+ def register_group(self, group: Callable[[], Dict[str, CapacityModel]]) -> None:
347
353
  for name, model in group().items():
348
354
  self.register_model(name, model)
349
355
 
350
- def register_model(self, name: str, capacity_model: CapacityModel):
356
+ def register_model(self, name: str, capacity_model: CapacityModel) -> None:
351
357
  self._models[name] = capacity_model
352
358
 
353
359
  @property
@@ -429,16 +435,16 @@ class CapacityPlanner:
429
435
 
430
436
  def _group_plans_by_percentile( # pylint: disable=too-many-positional-arguments
431
437
  self,
432
- drives,
433
- extra_model_arguments,
434
- instance_families,
435
- lifecycles,
436
- num_regions,
437
- num_results,
438
- region,
439
- model_percentile_desires,
440
- sorted_percentiles,
441
- ):
438
+ drives: Optional[Sequence[str]],
439
+ extra_model_arguments: Dict[str, Any],
440
+ instance_families: Optional[Sequence[str]],
441
+ lifecycles: Sequence[Lifecycle],
442
+ num_regions: int,
443
+ num_results: Optional[int],
444
+ region: str,
445
+ model_percentile_desires: Any,
446
+ sorted_percentiles: List[int],
447
+ ) -> Dict[int, Sequence[CapacityPlan]]:
442
448
  percentile_plans = {}
443
449
  for index, percentile in enumerate(sorted_percentiles):
444
450
  percentile_plan = []
@@ -470,15 +476,15 @@ class CapacityPlanner:
470
476
 
471
477
  def _mean_plan( # pylint: disable=too-many-positional-arguments
472
478
  self,
473
- drives,
474
- extra_model_arguments,
475
- instance_families,
476
- lifecycles,
477
- num_regions,
478
- num_results,
479
- region,
480
- model_mean_desires,
481
- ):
479
+ drives: Optional[Sequence[str]],
480
+ extra_model_arguments: Dict[str, Any],
481
+ instance_families: Optional[Sequence[str]],
482
+ lifecycles: Sequence[Lifecycle],
483
+ num_regions: int,
484
+ num_results: Optional[int],
485
+ region: str,
486
+ model_mean_desires: Dict[str, CapacityDesires],
487
+ ) -> Sequence[CapacityPlan]:
482
488
  mean_plans = []
483
489
  for mean_sub_model, mean_sub_desire in model_mean_desires.items():
484
490
  mean_sub_plan = self._plan_certain(
@@ -585,7 +591,7 @@ class CapacityPlanner:
585
591
  ]
586
592
 
587
593
  # Calculates the minimum cpu, memory, and network requirements based on desires.
588
- def _per_instance_requirements(self, desires) -> Tuple[int, float]:
594
+ def _per_instance_requirements(self, desires: CapacityDesires) -> Tuple[int, float]:
589
595
  # Applications often set fixed reservations of heap or OS memory
590
596
  per_instance_mem = (
591
597
  desires.data_shape.reserved_instance_app_mem_gib
@@ -621,14 +627,14 @@ class CapacityPlanner:
621
627
 
622
628
  def generate_scenarios( # pylint: disable=too-many-positional-arguments
623
629
  self,
624
- model,
625
- region,
626
- desires,
627
- num_regions,
628
- lifecycles,
629
- instance_families,
630
- drives,
631
- ):
630
+ model: CapacityModel,
631
+ region: str,
632
+ desires: CapacityDesires,
633
+ num_regions: int,
634
+ lifecycles: Optional[Sequence[Lifecycle]],
635
+ instance_families: Optional[Sequence[str]],
636
+ drives: Optional[Sequence[str]],
637
+ ) -> Generator[Tuple[Instance, Drive, RegionContext], None, None]:
632
638
  lifecycles = lifecycles or self._default_lifecycles
633
639
  instance_families = instance_families or []
634
640
  drives = drives or []
@@ -718,8 +724,8 @@ class CapacityPlanner:
718
724
  lifecycles = lifecycles or self._default_lifecycles
719
725
 
720
726
  # requirement types -> values
721
- zonal_requirements: Dict[str, Dict] = {}
722
- regional_requirements: Dict[str, Dict] = {}
727
+ zonal_requirements: Dict[str, Dict[str, List[Interval]]] = {}
728
+ regional_requirements: Dict[str, Dict[str, List[Interval]]] = {}
723
729
 
724
730
  regret_clusters_by_model: Dict[
725
731
  str, Sequence[Tuple[CapacityPlan, CapacityDesires, float]]
@@ -836,7 +842,7 @@ class CapacityPlanner:
836
842
  model_name: str,
837
843
  desires: CapacityDesires,
838
844
  extra_model_arguments: Dict[str, Any],
839
- ):
845
+ ) -> Generator[Tuple[str, CapacityDesires], None, None]:
840
846
  queue: List[Tuple[CapacityDesires, str]] = [(desires, model_name)]
841
847
  models_used = []
842
848
 
@@ -5,6 +5,8 @@ import logging
5
5
  import os
6
6
  from functools import reduce
7
7
  from pathlib import Path
8
+ from typing import Any
9
+ from typing import cast
8
10
  from typing import Dict
9
11
  from typing import List
10
12
  from typing import Optional
@@ -20,11 +22,11 @@ from service_capacity_modeling.interface import Service
20
22
  logger = logging.getLogger(__name__)
21
23
 
22
24
 
23
- def load_pricing(pricing: Dict) -> Pricing:
25
+ def load_pricing(pricing: Dict[str, Any]) -> Pricing:
24
26
  return Pricing(regions=pricing)
25
27
 
26
28
 
27
- def load_hardware(hardware: Dict) -> Hardware:
29
+ def load_hardware(hardware: Dict[str, Any]) -> Hardware:
28
30
  return Hardware(**hardware)
29
31
 
30
32
 
@@ -103,7 +105,9 @@ def merge_hardware(existing: Hardware, override: Hardware) -> Hardware:
103
105
  merged[key] = override_obj.get(key)
104
106
  elif isinstance(existing_field, Dict):
105
107
  override_field = override_obj.get(key)
106
- merged_field = merged.setdefault(key, {})
108
+ if override_field is None:
109
+ override_field = {}
110
+ merged_field = cast(Dict[str, Any], merged.setdefault(key, {}))
107
111
 
108
112
  existing_keys = existing_field.keys()
109
113
  override_keys = override_obj.get(key, {}).keys()
@@ -119,7 +123,7 @@ def merge_hardware(existing: Hardware, override: Hardware) -> Hardware:
119
123
  return Hardware(**merged)
120
124
 
121
125
 
122
- def merge_pricing(existing: Dict, override: Dict) -> Dict:
126
+ def merge_pricing(existing: Dict[str, Any], override: Dict[str, Any]) -> Dict[str, Any]:
123
127
  merged = existing.copy()
124
128
  for region, override_pricing in override.items():
125
129
  if region not in merged:
@@ -165,7 +169,7 @@ def load_hardware_from_disk(
165
169
  if shape_paths is None:
166
170
  shape_paths = []
167
171
 
168
- combined_pricing: Dict = {}
172
+ combined_pricing: Dict[str, Any] = {}
169
173
 
170
174
  logger.debug("Loading pricing from: %s", price_paths)
171
175
  for price_path in price_paths:
@@ -185,7 +189,7 @@ def load_hardware_from_disk(
185
189
  return price_hardware(hardware=hardware, pricing=pricing)
186
190
 
187
191
 
188
- def load_hardware_from_s3(bucket, path) -> GlobalHardware:
192
+ def load_hardware_from_s3(bucket: str, path: str) -> GlobalHardware:
189
193
  try:
190
194
  # boto is a heavy dependency so we only want to take it if
191
195
  # someone will be using it ...
@@ -205,7 +209,7 @@ def load_hardware_from_s3(bucket, path) -> GlobalHardware:
205
209
 
206
210
 
207
211
  class HardwareShapes:
208
- def __init__(self):
212
+ def __init__(self) -> None:
209
213
  self._hardware: Optional[GlobalHardware] = None
210
214
 
211
215
  def load(self, new_hardware: GlobalHardware) -> None:
@@ -27,12 +27,12 @@ MEGABIT_IN_BYTES = (1000 * 1000) / 8
27
27
 
28
28
 
29
29
  class ExcludeUnsetModel(BaseModel):
30
- def model_dump(self, *args, **kwargs):
30
+ def model_dump(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:
31
31
  if "exclude_unset" not in kwargs:
32
32
  kwargs["exclude_unset"] = True
33
33
  return super().model_dump(*args, **kwargs)
34
34
 
35
- def model_dump_json(self, *args, **kwargs):
35
+ def model_dump_json(self, *args: Any, **kwargs: Any) -> str:
36
36
  if "exclude_unset" not in kwargs:
37
37
  kwargs["exclude_unset"] = True
38
38
  return super().model_dump_json(*args, **kwargs)
@@ -44,10 +44,10 @@ class ExcludeUnsetModel(BaseModel):
44
44
 
45
45
 
46
46
  class IntervalModel(str, Enum):
47
- def __str__(self):
47
+ def __str__(self) -> str:
48
48
  return str(self.value)
49
49
 
50
- def __repr__(self):
50
+ def __repr__(self) -> str:
51
51
  return f"D({self.value})"
52
52
 
53
53
  gamma = "gamma"
@@ -71,11 +71,11 @@ class Interval(ExcludeUnsetModel):
71
71
  model_config = ConfigDict(frozen=True, protected_namespaces=())
72
72
 
73
73
  @property
74
- def can_simulate(self):
74
+ def can_simulate(self) -> bool:
75
75
  return self.confidence <= 0.99 and self.allow_simulate
76
76
 
77
77
  @property
78
- def minimum(self):
78
+ def minimum(self) -> float:
79
79
  if self.minimum_value is None:
80
80
  if self.confidence == 1.0:
81
81
  return self.low * 0.999
@@ -84,17 +84,19 @@ class Interval(ExcludeUnsetModel):
84
84
  return self.minimum_value
85
85
 
86
86
  @property
87
- def maximum(self):
87
+ def maximum(self) -> float:
88
88
  if self.maximum_value is None:
89
89
  if self.confidence == 1.0:
90
90
  return self.high * 1.001
91
91
  return self.high * 2
92
92
  return self.maximum_value
93
93
 
94
- def __hash__(self):
94
+ def __hash__(self) -> int:
95
95
  return hash((type(self),) + tuple(self.__dict__.values()))
96
96
 
97
- def __eq__(self, other):
97
+ def __eq__(self, other: object) -> bool:
98
+ if not isinstance(other, Interval):
99
+ return False
98
100
  return self.__hash__() == other.__hash__()
99
101
 
100
102
  def scale(self, factor: float) -> Interval:
@@ -264,14 +266,14 @@ class Drive(ExcludeUnsetModel):
264
266
  return max(self.block_size_kib, self.group_size_kib)
265
267
 
266
268
  @property
267
- def max_size_gib(self):
269
+ def max_size_gib(self) -> float:
268
270
  if self.max_scale_size_gib != 0:
269
271
  return self.max_scale_size_gib
270
272
  else:
271
273
  return self.size_gib
272
274
 
273
275
  @property
274
- def max_io_per_s(self):
276
+ def max_io_per_s(self) -> int:
275
277
  if self.max_scale_io_per_s != 0:
276
278
  return self.max_scale_io_per_s
277
279
  else:
@@ -279,7 +281,7 @@ class Drive(ExcludeUnsetModel):
279
281
 
280
282
  @computed_field(return_type=float) # type: ignore
281
283
  @property
282
- def annual_cost(self):
284
+ def annual_cost(self) -> float:
283
285
  size = self.size_gib or 0
284
286
  r_ios = self.read_io_per_s or 0
285
287
  w_ios = self.write_io_per_s or 0
@@ -382,15 +384,15 @@ class Instance(ExcludeUnsetModel):
382
384
  family_separator: str = "."
383
385
 
384
386
  @property
385
- def family(self):
387
+ def family(self) -> str:
386
388
  return self.name.rsplit(self.family_separator, 1)[0]
387
389
 
388
390
  @property
389
- def size(self):
391
+ def size(self) -> str:
390
392
  return self.name.rsplit(self.family_separator, 1)[1]
391
393
 
392
394
  @property
393
- def cores(self):
395
+ def cores(self) -> int:
394
396
  if self.cpu_cores is not None:
395
397
  return self.cpu_cores
396
398
  return self.cpu // 2
@@ -456,7 +458,7 @@ class Service(ExcludeUnsetModel):
456
458
  low=1, mid=10, high=50, confidence=0.9
457
459
  )
458
460
 
459
- def annual_cost_gib(self, data_gib: float = 0):
461
+ def annual_cost_gib(self, data_gib: float = 0) -> float:
460
462
  if isinstance(self.annual_cost_per_gib, float):
461
463
  return self.annual_cost_per_gib * data_gib
462
464
  else:
@@ -779,23 +781,48 @@ class BufferComponent(str, Enum):
779
781
  compute = "compute"
780
782
  # [Data Shape] a.k.a. "Dataset" related buffers, e.g. Disk and Memory
781
783
  storage = "storage"
782
-
783
784
  # Resource specific component
784
785
  cpu = "cpu"
785
786
  network = "network"
786
787
  disk = "disk"
787
788
  memory = "memory"
788
789
 
790
+ @staticmethod
791
+ def is_generic(component: str) -> bool:
792
+ return component in {BufferComponent.compute, BufferComponent.storage}
793
+
794
+ @staticmethod
795
+ def is_specific(component: str) -> bool:
796
+ return not BufferComponent.is_generic(component)
797
+
789
798
 
790
799
  class BufferIntent(str, Enum):
791
800
  # Most buffers show "desired" buffer, this is the default
792
801
  desired = "desired"
793
802
  # ratio on top of existing buffers to ensure exists. Generally combined
794
803
  # with a different desired buffer to ensure we don't just scale needlessly
804
+ # This means we can scale up or down as as long as we meet the desired buffer.
795
805
  scale = "scale"
796
- # Ignore model preferences, just preserve existing buffers
806
+
807
+ # DEPRECATED: Use scale_up/scale_down instead
808
+ # Ignores model preferences, just preserve existing buffers
809
+ # We rarely actually want to do this since it can cause severe over provisioning
797
810
  preserve = "preserve"
798
811
 
812
+ # Scale up if necessary to meet the desired buffer.
813
+ # If the existing resource is over-provisioned, do not reduce the requirement.
814
+ # If under-provisioned, the requirement can be increased to meet the desired buffer.
815
+ # Example: need 20 cores but have 10 → scale up to 20 cores.
816
+ # Example 2: need 20 cores but have 40 → do not scale down and require at
817
+ # least 40 cores
818
+ scale_up = "scale_up"
819
+ # Scale down if necessary to meet the desired buffer.
820
+ # If the existing resource is under-provisioned, do not increase the requirement.
821
+ # If over-provisioned, the requirement can be decreased to meet the desired buffer.
822
+ # Example: need 20 cores but have 10 → maintain buffer and do not scale up.
823
+ # Example 2: need 20 cores but have 40 → scale down to 20 cores.
824
+ scale_down = "scale_down"
825
+
799
826
 
800
827
  class Buffer(ExcludeUnsetModel):
801
828
  # The value of the buffer expressed as a ratio over "normal" load e.g. 1.5x
@@ -819,7 +846,6 @@ class Buffers(ExcludeUnsetModel):
819
846
  "compute": Buffer(ratio: 1.5),
820
847
  }
821
848
  )
822
-
823
849
  And then models layer in their buffers, for example if a workload
824
850
  requires 10 CPU cores, but the operator of that workload likes to build in
825
851
  2x buffer for background work (20 cores provisioned), they would express that
@@ -955,7 +981,7 @@ class CapacityRequirement(ExcludeUnsetModel):
955
981
  network_mbps: Interval = certain_int(0)
956
982
  disk_gib: Interval = certain_int(0)
957
983
 
958
- context: Dict = {}
984
+ context: Dict[str, Any] = {}
959
985
 
960
986
 
961
987
  class ClusterCapacity(ExcludeUnsetModel):
@@ -968,7 +994,7 @@ class ClusterCapacity(ExcludeUnsetModel):
968
994
  # When provisioning services we might need to signal they
969
995
  # should have certain configuration, for example flags that
970
996
  # affect durability shut off
971
- cluster_params: Dict = {}
997
+ cluster_params: Dict[str, Any] = {}
972
998
 
973
999
 
974
1000
  class ServiceCapacity(ExcludeUnsetModel):
@@ -979,7 +1005,7 @@ class ServiceCapacity(ExcludeUnsetModel):
979
1005
  regret_cost: bool = False
980
1006
  # Often while provisioning cloud services we need to represent
981
1007
  # parameters to the cloud APIs, use this to inject those from models
982
- service_params: Dict = {}
1008
+ service_params: Dict[str, Any] = {}
983
1009
 
984
1010
 
985
1011
  # For services that are provisioned by zone (e.g. Cassandra, EVCache)
@@ -20,6 +20,25 @@ from service_capacity_modeling.interface import Platform
20
20
  from service_capacity_modeling.interface import QueryPattern
21
21
  from service_capacity_modeling.interface import RegionContext
22
22
 
23
+ __all__ = [
24
+ "AccessConsistency",
25
+ "AccessPattern",
26
+ "CapacityDesires",
27
+ "CapacityPlan",
28
+ "CapacityRegretParameters",
29
+ "certain_float",
30
+ "Consistency",
31
+ "DataShape",
32
+ "Drive",
33
+ "FixedInterval",
34
+ "GlobalConsistency",
35
+ "Instance",
36
+ "Platform",
37
+ "QueryPattern",
38
+ "RegionContext",
39
+ "CapacityModel",
40
+ ]
41
+
23
42
  __common_regrets__ = frozenset(("spend", "disk", "mem"))
24
43
 
25
44
 
@@ -85,7 +104,7 @@ class CapacityModel:
85
104
 
86
105
  """
87
106
 
88
- def __init__(self):
107
+ def __init__(self) -> None:
89
108
  pass
90
109
 
91
110
  @staticmethod
@@ -270,7 +289,7 @@ class CapacityModel:
270
289
  @staticmethod
271
290
  def default_desires(
272
291
  user_desires: CapacityDesires, extra_model_arguments: Dict[str, Any]
273
- ):
292
+ ) -> CapacityDesires:
274
293
  """Optional defaults to apply given a user desires
275
294
 
276
295
  Often users do not know what the on-cpu time of their queries