service-capacity-modeling 0.3.58__py3-none-any.whl → 0.3.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,6 +4,7 @@ from typing import Any
4
4
  from typing import Callable
5
5
  from typing import Dict
6
6
  from typing import Optional
7
+ from typing import Set
7
8
 
8
9
  from pydantic import BaseModel
9
10
  from pydantic import Field
@@ -47,6 +48,9 @@ from service_capacity_modeling.stats import dist_for_interval
47
48
  logger = logging.getLogger(__name__)
48
49
 
49
50
  BACKGROUND_BUFFER = "background"
51
+ CRITICAL_TIERS: Set[int] = {0, 1}
52
+ # cluster size aka nodes per ASG
53
+ CRITICAL_TIER_MIN_CLUSTER_SIZE = 2
50
54
 
51
55
 
52
56
  def _write_buffer_gib_zone(
@@ -362,10 +366,7 @@ def _estimate_cassandra_cluster_zonal( # pylint: disable=too-many-positional-ar
362
366
  # Cassandra clusters should aim to be at least 2 nodes per zone to start
363
367
  # out with for tier 0 or tier 1. This gives us more room to "up-color"]
364
368
  # clusters.
365
- min_count = 0
366
- if desires.service_tier <= 1:
367
- min_count = 2
368
-
369
+ min_count = 2 if desires.service_tier in CRITICAL_TIERS else 0
369
370
  base_mem = _get_base_memory(desires)
370
371
 
371
372
  heap_fn = _cass_heap_for_write_buffer(
@@ -583,6 +584,33 @@ class NflxCassandraArguments(BaseModel):
583
584
 
584
585
 
585
586
  class NflxCassandraCapacityModel(CapacityModel):
587
+ @staticmethod
588
+ def get_required_cluster_size(tier, extra_model_arguments):
589
+ required_cluster_size: Optional[int] = (
590
+ math.ceil(extra_model_arguments["required_cluster_size"])
591
+ if "required_cluster_size" in extra_model_arguments
592
+ else None
593
+ )
594
+
595
+ if tier not in CRITICAL_TIERS or required_cluster_size is None:
596
+ return required_cluster_size
597
+
598
+ # If the upstream explicitly set a cluster size, make sure it is
599
+ # at least CRITICAL_TIER_MIN_CLUSTER_SIZE. We cannot do a max
600
+ # of the two because the horizontal scaling is disabled
601
+ if required_cluster_size < CRITICAL_TIER_MIN_CLUSTER_SIZE:
602
+ raise ValueError(
603
+ f"Required cluster size must be at least "
604
+ f"{CRITICAL_TIER_MIN_CLUSTER_SIZE=} when "
605
+ f"service tier({tier}) is a "
606
+ f"critical tier({CRITICAL_TIERS})."
607
+ f"If it is an existing cluster, horizontally "
608
+ f"scale the cluster to be >= "
609
+ f"{CRITICAL_TIER_MIN_CLUSTER_SIZE}"
610
+ )
611
+
612
+ return required_cluster_size
613
+
586
614
  @staticmethod
587
615
  def capacity_plan(
588
616
  instance: Instance,
@@ -601,11 +629,12 @@ class NflxCassandraCapacityModel(CapacityModel):
601
629
  require_attached_disks: bool = extra_model_arguments.get(
602
630
  "require_attached_disks", False
603
631
  )
604
- required_cluster_size: Optional[int] = (
605
- math.ceil(extra_model_arguments["required_cluster_size"])
606
- if "required_cluster_size" in extra_model_arguments
607
- else None
632
+ required_cluster_size: Optional[
633
+ int
634
+ ] = NflxCassandraCapacityModel.get_required_cluster_size(
635
+ desires.service_tier, extra_model_arguments
608
636
  )
637
+
609
638
  max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 500)
610
639
  max_regional_size: int = extra_model_arguments.get("max_regional_size", 192)
611
640
  max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 5120)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: service-capacity-modeling
3
- Version: 0.3.58
3
+ Version: 0.3.60
4
4
  Summary: Contains utilities for modeling capacity for pluggable workloads
5
5
  Author: Joseph Lynch
6
6
  Author-email: josephl@netflix.com
@@ -250,6 +250,18 @@ To contribute to this project:
250
250
  tox -e py38 -- -k test_<your_functionality> --pdb --pdbcls=IPython.terminal.debugger:Pdb
251
251
  ```
252
252
 
253
+ ### Pre-commit / Linting
254
+ To install the pre-commit linter
255
+ ```
256
+ pre-commit install
257
+ ```
258
+
259
+ To run the linting manually:
260
+ ```
261
+ tox -e pre-commit
262
+ ```
263
+
264
+
253
265
  ### PyCharm IDE Setup
254
266
  Use one of the test environments for IDE development, e.g. `tox -e py310` and then
255
267
  `Add New Interpreter -> Add Local -> Select Existing -> Navigate to (workdir)/.tox/py310`.
@@ -46,7 +46,7 @@ service_capacity_modeling/models/utils.py,sha256=0F__wz9KAGhPIQfvNp-FTtTANW6-sO4
46
46
  service_capacity_modeling/models/org/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
47
  service_capacity_modeling/models/org/netflix/__init__.py,sha256=m7IaQbo85NEbDvfoPJREIznpzg0YHTCrKP5C1GvnOYM,2378
48
48
  service_capacity_modeling/models/org/netflix/aurora.py,sha256=Mi9zd48k64GkKIjAs3J1S2qThguNvyWIy2dUmhwrVhc,12883
49
- service_capacity_modeling/models/org/netflix/cassandra.py,sha256=47R0jgdyL1kxekJ-2UvW-GC018qYG-iU_c8SZzcHQZk,33443
49
+ service_capacity_modeling/models/org/netflix/cassandra.py,sha256=ppvHfHIwSzljlkSDTbPBXXaX0N1suVdmI3sfEwN7Cek,34682
50
50
  service_capacity_modeling/models/org/netflix/counter.py,sha256=hOVRRCgCPU-A5TdLKQXc_mWTQpkKOWRNjOeECdDP7kA,9205
51
51
  service_capacity_modeling/models/org/netflix/crdb.py,sha256=2rD4Io0yT7o0NR4lNferXXOSTDe0SkT1LbSChvNgRrQ,19698
52
52
  service_capacity_modeling/models/org/netflix/ddb.py,sha256=2jxMFz31xckJvymvVlu1yWm0X4dGYlqxDo0bftU1B9M,26307
@@ -69,9 +69,9 @@ service_capacity_modeling/tools/auto_shape.py,sha256=Rk5Fjrw2susVL8It_J2KUADoMGB
69
69
  service_capacity_modeling/tools/fetch_pricing.py,sha256=SHOtFaPr61op2bnY9i_g_1-d-Nz2rV8c7Jwsye2R49s,3763
70
70
  service_capacity_modeling/tools/generate_missing.py,sha256=uvr9fQanx3bm4KTneH-x7EOQvO7cVV0i9gdQvArPCuY,2947
71
71
  service_capacity_modeling/tools/instance_families.py,sha256=e9JWSIdljSmHI8Nb2MI5Ld9JqQ7WdOtPtV7g3oR7ZiU,7764
72
- service_capacity_modeling-0.3.58.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
73
- service_capacity_modeling-0.3.58.dist-info/METADATA,sha256=I7I4UO18FYRZ-LP5hGDhVFP_4w_zQrEKKh-WphQroKE,9590
74
- service_capacity_modeling-0.3.58.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
- service_capacity_modeling-0.3.58.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
76
- service_capacity_modeling-0.3.58.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
77
- service_capacity_modeling-0.3.58.dist-info/RECORD,,
72
+ service_capacity_modeling-0.3.60.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
73
+ service_capacity_modeling-0.3.60.dist-info/METADATA,sha256=tMXT9YuVyoSpvawrjPjzLWQTwIJUXHrr9_XmL2btp7Y,9733
74
+ service_capacity_modeling-0.3.60.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
+ service_capacity_modeling-0.3.60.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
76
+ service_capacity_modeling-0.3.60.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
77
+ service_capacity_modeling-0.3.60.dist-info/RECORD,,