service-capacity-modeling 0.3.68__py3-none-any.whl → 0.3.70__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of service-capacity-modeling might be problematic. Click here for more details.

@@ -69,6 +69,43 @@ def _sqrt_staffed_cores(rps: float, latency_s: float, qos: float) -> int:
69
69
  return math.ceil((rps * latency_s) + qos * math.sqrt(rps * latency_s))
70
70
 
71
71
 
72
+ def get_effective_disk_per_node_gib(
73
+ instance: Instance,
74
+ drive: Drive,
75
+ disk_buffer_ratio: float,
76
+ max_local_data_per_node_gib: float = float("inf"),
77
+ max_attached_data_per_node_gib: float = float("inf"),
78
+ ) -> float:
79
+ """Calculate usable disk for an instance while respecting per-node data limits
80
+ and desired disk buffer ratio
81
+
82
+ Prevents overloading nodes with too much data, which causes slow bootstrapping and
83
+ recovery times
84
+
85
+ Args:
86
+ instance: The compute instance configuration
87
+ drive: The drive configuration for the instance
88
+ disk_buffer_ratio: Buffer ratio for operational headroom
89
+ max_local_data_per_node_gib: Maximum data per node for local drives
90
+ max_attached_data_per_node_gib: Maximum data per node for attached drives
91
+
92
+ Returns:
93
+ float: Maximum usable disk capacity per node in GiB
94
+ """
95
+ # TODO: @homatthew / @vrayini: Incorporate disk headroom for attached / local drives
96
+ if instance.drive is None:
97
+ if max_attached_data_per_node_gib == float("inf"):
98
+ return drive.max_size_gib
99
+
100
+ attached_disk_limit_gib = max_attached_data_per_node_gib * disk_buffer_ratio
101
+ # Attached disks are provisioned in 100GB limits
102
+ rounded_size = utils.next_n(attached_disk_limit_gib, n=100)
103
+ return min(rounded_size, drive.max_size_gib)
104
+
105
+ local_disk_limit_gib = max_local_data_per_node_gib * disk_buffer_ratio
106
+ return min(local_disk_limit_gib, instance.drive.size_gib)
107
+
108
+
72
109
  def sqrt_staffed_cores(desires: CapacityDesires) -> int:
73
110
  """Computes cores given a sqrt staffing model
74
111
 
@@ -357,11 +394,6 @@ def compute_stateful_zone( # pylint: disable=too-many-positional-arguments
357
394
  # (per_node_size_gib, node_count) -> (read_ios, write_ios)
358
395
  required_disk_ios: Callable[[float, int], Tuple[float, float]] = lambda size_gib,
359
396
  count: (0, 0),
360
- required_disk_space: Callable[[float], float] = lambda size_gib: size_gib,
361
- # The maximum amount of state we can hold per node in the database
362
- # typically you don't want stateful systems going much higher than a
363
- # few TiB so that recovery functions properly
364
- max_local_disk_gib: float = 2048,
365
397
  # Some stateful clusters have sidecars that take memory
366
398
  reserve_memory: Callable[[float], float] = lambda x: 0,
367
399
  # How much write buffer we get per instance (usually a percentage of
@@ -373,14 +405,7 @@ def compute_stateful_zone( # pylint: disable=too-many-positional-arguments
373
405
  min_count: int = 0,
374
406
  adjusted_disk_io_needed: float = 0.0,
375
407
  read_write_ratio: float = 0.0,
376
- # Max attached EBS volume size per node. Higher value here could allow
377
- # for a lower instance count (allows more vertical scaling vs forcing horizontal)
378
- max_attached_disk_gib: Optional[float] = None,
379
408
  ) -> ZoneClusterCapacity:
380
- # Datastores often require disk headroom for e.g. compaction and such
381
- if instance.drive is not None:
382
- needed_disk_gib = math.ceil(required_disk_space(needed_disk_gib))
383
-
384
409
  # How many instances do we need for the CPU
385
410
  count = math.ceil(needed_cores / instance.cpu)
386
411
 
@@ -404,12 +429,8 @@ def compute_stateful_zone( # pylint: disable=too-many-positional-arguments
404
429
  count = max(count, math.ceil(needed_network_mbps / instance.net_mbps))
405
430
 
406
431
  # How many instances do we need for the disk
407
- if (
408
- instance.drive is not None
409
- and instance.drive.size_gib > 0
410
- and max_local_disk_gib > 0
411
- ):
412
- disk_per_node = min(max_local_disk_gib, instance.drive.size_gib)
432
+ if instance.drive is not None and instance.drive.size_gib > 0:
433
+ disk_per_node = instance.drive.size_gib
413
434
  count = max(count, math.ceil(needed_disk_gib / disk_per_node))
414
435
  if adjusted_disk_io_needed != 0.0:
415
436
  instance_read_iops = (
@@ -441,13 +462,13 @@ def compute_stateful_zone( # pylint: disable=too-many-positional-arguments
441
462
  cost = count * instance.annual_cost
442
463
 
443
464
  attached_drives = []
444
- if instance.drive is None and required_disk_space(needed_disk_gib) > 0:
465
+ if instance.drive is None and needed_disk_gib > 0:
445
466
  # If we don't have disks attach the cloud drive with enough
446
467
  # space and IO for the requirement
447
468
 
448
469
  # Note that cloud drivers are provisioned _per node_ and must be chosen for
449
470
  # the max of space and IOS.
450
- space_gib = max(1, math.ceil(required_disk_space(needed_disk_gib) / count))
471
+ space_gib = max(1, math.ceil(needed_disk_gib / count))
451
472
  read_io, write_io = required_disk_ios(space_gib, count)
452
473
  read_io, write_io = (
453
474
  utils.next_n(read_io, n=200),
@@ -463,9 +484,6 @@ def compute_stateful_zone( # pylint: disable=too-many-positional-arguments
463
484
  # 1/3 the maximum volume size in one node (preferring more nodes
464
485
  # with smaller volumes)
465
486
  max_size = drive.max_size_gib / 3
466
- if max_attached_disk_gib is not None:
467
- max_size = max_attached_disk_gib
468
-
469
487
  if ebs_gib > max_size > 0:
470
488
  ratio = ebs_gib / max_size
471
489
  count = max(cluster_size(math.ceil(count * ratio)), min_count)
@@ -21,6 +21,7 @@ from service_capacity_modeling.interface import certain_float
21
21
  from service_capacity_modeling.interface import certain_int
22
22
  from service_capacity_modeling.interface import Clusters
23
23
  from service_capacity_modeling.interface import Consistency
24
+ from service_capacity_modeling.interface import CurrentClusterCapacity
24
25
  from service_capacity_modeling.interface import DataShape
25
26
  from service_capacity_modeling.interface import Drive
26
27
  from service_capacity_modeling.interface import FixedInterval
@@ -35,12 +36,15 @@ from service_capacity_modeling.models import CapacityModel
35
36
  from service_capacity_modeling.models.common import buffer_for_components
36
37
  from service_capacity_modeling.models.common import compute_stateful_zone
37
38
  from service_capacity_modeling.models.common import derived_buffer_for_component
39
+ from service_capacity_modeling.models.common import get_effective_disk_per_node_gib
38
40
  from service_capacity_modeling.models.common import network_services
39
41
  from service_capacity_modeling.models.common import normalize_cores
40
42
  from service_capacity_modeling.models.common import simple_network_mbps
41
43
  from service_capacity_modeling.models.common import sqrt_staffed_cores
42
44
  from service_capacity_modeling.models.common import working_set_from_drive_and_slo
43
45
  from service_capacity_modeling.models.common import zonal_requirements_from_current
46
+ from service_capacity_modeling.models.utils import is_power_of_2
47
+ from service_capacity_modeling.models.utils import next_doubling
44
48
  from service_capacity_modeling.models.utils import next_power_of_2
45
49
  from service_capacity_modeling.stats import dist_for_interval
46
50
 
@@ -106,6 +110,53 @@ def _get_disk_from_desires(desires, copies_per_region):
106
110
  )
107
111
 
108
112
 
113
+ def _get_min_count(
114
+ tier: int,
115
+ required_cluster_size: Optional[int],
116
+ needed_disk_gib: float,
117
+ disk_per_node_gib: float,
118
+ cluster_size_lambda: Callable[[int], int],
119
+ ):
120
+ """
121
+ Compute the minimum number of nodes required for a zone.
122
+
123
+ This function is used to prevent the planner from allocating clusters that
124
+ would exceed the max data per node or under the required cluster size for
125
+ a tier or existing cluster
126
+ """
127
+
128
+ # Cassandra clusters should aim to be at least 2 nodes per zone to start
129
+ # out with for tier 0 or tier 1. This gives us more room to "up-color"]
130
+ # clusters.
131
+ min_nodes_for_tier = 2 if tier in CRITICAL_TIERS else 0
132
+
133
+ # Prevent allocating clusters that exceed the max data per node.
134
+ min_nodes_for_disk = math.ceil(needed_disk_gib / disk_per_node_gib)
135
+
136
+ # Take the max of the following in order to avoid:
137
+ # (1) if `required_cluster_size` < `min_nodes_for_disk`, don't let the planner
138
+ # pick a shape that would exceed the max data per node
139
+ #
140
+ # For example, if we need 4TiB of disk, and the max data per node is 1TiB,
141
+ # Regardless of the `required_cluster_size`, we cannot allocate less than 4
142
+ # nodes because that would exceed the max data per node.
143
+ #
144
+ # (2) if `required_cluster_size` > `min_nodes_for_disk`, don't let the
145
+ # node density requirement affect the min count because the required
146
+ # cluster size already meets the node density requirement.
147
+ #
148
+ # For example, if we need 4TiB of disk, and the max data per node is 1TiB,
149
+ # and the upstream requires >= 8 nodes, we can allocate 8 nodes because
150
+ # each node would only have 500GB of data.
151
+ min_count = max(
152
+ min_nodes_for_tier,
153
+ required_cluster_size or 0,
154
+ min_nodes_for_disk,
155
+ )
156
+ # Ensure that the min count is an increment of the cluster size constraint (doubling)
157
+ return cluster_size_lambda(min_count)
158
+
159
+
109
160
  def _zonal_requirement_for_new_cluster(
110
161
  desires, instance, copies_per_region, zones_per_region
111
162
  ) -> CapacityRequirement:
@@ -149,15 +200,7 @@ def _estimate_cassandra_requirement( # pylint: disable=too-many-positional-argu
149
200
  )
150
201
  memory_preserve = False
151
202
  reference_shape = desires.reference_shape
152
- current_capacity = (
153
- None
154
- if desires.current_clusters is None
155
- else (
156
- desires.current_clusters.zonal[0]
157
- if len(desires.current_clusters.zonal)
158
- else desires.current_clusters.regional[0]
159
- )
160
- )
203
+ current_capacity = _get_current_capacity(desires)
161
204
 
162
205
  # If the cluster is already provisioned
163
206
  if current_capacity and desires.current_clusters is not None:
@@ -277,6 +320,26 @@ def _estimate_cassandra_requirement( # pylint: disable=too-many-positional-argu
277
320
  )
278
321
 
279
322
 
323
+ def _get_current_cluster_size(desires) -> int:
324
+ current_capacity = _get_current_capacity(desires)
325
+ if current_capacity is None:
326
+ return 0
327
+ return math.ceil(current_capacity.cluster_instance_count.mid)
328
+
329
+
330
+ def _get_current_capacity(desires) -> Optional[CurrentClusterCapacity]:
331
+ current_capacity = (
332
+ None
333
+ if desires.current_clusters is None
334
+ else (
335
+ desires.current_clusters.zonal[0]
336
+ if len(desires.current_clusters.zonal)
337
+ else desires.current_clusters.regional[0]
338
+ )
339
+ )
340
+ return current_capacity
341
+
342
+
280
343
  def _upsert_params(cluster, params):
281
344
  if cluster.cluster_params:
282
345
  cluster.cluster_params.update(params)
@@ -284,6 +347,18 @@ def _upsert_params(cluster, params):
284
347
  cluster.cluster_params = params
285
348
 
286
349
 
350
+ def _get_cluster_size_lambda(
351
+ current_cluster_size: int,
352
+ required_cluster_size: Optional[int],
353
+ ) -> Callable[[int], int]:
354
+ if required_cluster_size:
355
+ return lambda x: next_doubling(x, base=required_cluster_size)
356
+ elif current_cluster_size and not is_power_of_2(current_cluster_size):
357
+ return lambda x: next_doubling(x, base=current_cluster_size)
358
+ else: # New provisionings
359
+ return next_power_of_2
360
+
361
+
287
362
  # pylint: disable=too-many-locals
288
363
  # pylint: disable=too-many-return-statements
289
364
  # flake8: noqa: C901
@@ -298,7 +373,8 @@ def _estimate_cassandra_cluster_zonal( # pylint: disable=too-many-positional-ar
298
373
  require_attached_disks: bool = False,
299
374
  required_cluster_size: Optional[int] = None,
300
375
  max_rps_to_disk: int = 500,
301
- max_local_disk_gib: int = 5120,
376
+ max_local_data_per_node_gib: int = 1280,
377
+ max_attached_data_per_node_gib: int = 2048,
302
378
  max_regional_size: int = 192,
303
379
  max_write_buffer_percent: float = 0.25,
304
380
  max_table_buffer_percent: float = 0.11,
@@ -361,10 +437,31 @@ def _estimate_cassandra_cluster_zonal( # pylint: disable=too-many-positional-ar
361
437
  copies_per_region=copies_per_region,
362
438
  )
363
439
 
364
- # Cassandra clusters should aim to be at least 2 nodes per zone to start
365
- # out with for tier 0 or tier 1. This gives us more room to "up-color"]
366
- # clusters.
367
- min_count = 2 if desires.service_tier in CRITICAL_TIERS else 0
440
+ # Adjust the min count to adjust to prevent too much data on a single
441
+ needed_disk_gib = int(requirement.disk_gib.mid)
442
+ disk_buffer_ratio = buffer_for_components(
443
+ buffers=desires.buffers, components=[BufferComponent.disk]
444
+ ).ratio
445
+ disk_per_node_gib = get_effective_disk_per_node_gib(
446
+ instance,
447
+ drive,
448
+ disk_buffer_ratio,
449
+ max_local_data_per_node_gib=max_local_data_per_node_gib,
450
+ max_attached_data_per_node_gib=max_attached_data_per_node_gib,
451
+ )
452
+
453
+ current_cluster_size = _get_current_cluster_size(desires)
454
+ cluster_size_lambda = _get_cluster_size_lambda(
455
+ current_cluster_size, required_cluster_size
456
+ )
457
+ min_count = _get_min_count(
458
+ tier=desires.service_tier,
459
+ required_cluster_size=required_cluster_size,
460
+ needed_disk_gib=needed_disk_gib,
461
+ disk_per_node_gib=disk_per_node_gib,
462
+ cluster_size_lambda=cluster_size_lambda,
463
+ )
464
+
368
465
  base_mem = _get_base_memory(desires)
369
466
 
370
467
  heap_fn = _cass_heap_for_write_buffer(
@@ -378,7 +475,7 @@ def _estimate_cassandra_cluster_zonal( # pylint: disable=too-many-positional-ar
378
475
  instance=instance,
379
476
  drive=drive,
380
477
  needed_cores=int(requirement.cpu_cores.mid),
381
- needed_disk_gib=int(requirement.disk_gib.mid),
478
+ needed_disk_gib=needed_disk_gib,
382
479
  needed_memory_gib=int(requirement.mem_gib.mid),
383
480
  needed_network_mbps=requirement.network_mbps.mid,
384
481
  # Take into account the reads per read
@@ -387,14 +484,9 @@ def _estimate_cassandra_cluster_zonal( # pylint: disable=too-many-positional-ar
387
484
  _cass_io_per_read(size) * math.ceil(read_io_per_sec / count),
388
485
  write_io_per_sec / count,
389
486
  ),
390
- # Disk buffer is already added while computing C* estimates
391
- required_disk_space=lambda x: x,
392
- # C* clusters cannot recover data from neighbors quickly so we
393
- # want to avoid clusters with more than 1 TiB of local state
394
- max_local_disk_gib=max_local_disk_gib,
395
487
  # C* clusters provision in powers of 2 because doubling
396
- cluster_size=next_power_of_2,
397
- min_count=max(min_count, required_cluster_size or 0),
488
+ cluster_size=cluster_size_lambda,
489
+ min_count=min_count,
398
490
  # TODO: Take reserve memory calculation into account during buffer calculation
399
491
  # C* heap usage takes away from OS page cache memory
400
492
  reserve_memory=lambda x: base_mem + heap_fn(x),
@@ -617,6 +709,11 @@ class NflxCassandraCapacityModel(CapacityModel):
617
709
  desires: CapacityDesires,
618
710
  extra_model_arguments: Dict[str, Any],
619
711
  ) -> Optional[CapacityPlan]:
712
+ # TODO: Standardize these extra model argument defaults in a single
713
+ # place. Many of them are defined here and as default values in the
714
+ # downstream method but only these ones are used which is confusing for
715
+ # readability
716
+
620
717
  # Use durabiliy and consistency to compute RF.
621
718
  copies_per_region = _target_rf(
622
719
  desires, extra_model_arguments.get("copies_per_region", None)
@@ -635,7 +732,11 @@ class NflxCassandraCapacityModel(CapacityModel):
635
732
 
636
733
  max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 500)
637
734
  max_regional_size: int = extra_model_arguments.get("max_regional_size", 192)
638
- max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 5120)
735
+ max_local_data_per_node_gib: int = extra_model_arguments.get(
736
+ "max_local_data_per_node_gib",
737
+ extra_model_arguments.get("max_local_disk_gib", 1280),
738
+ )
739
+
639
740
  max_write_buffer_percent: float = min(
640
741
  0.5, extra_model_arguments.get("max_write_buffer_percent", 0.25)
641
742
  )
@@ -663,7 +764,7 @@ class NflxCassandraCapacityModel(CapacityModel):
663
764
  required_cluster_size=required_cluster_size,
664
765
  max_rps_to_disk=max_rps_to_disk,
665
766
  max_regional_size=max_regional_size,
666
- max_local_disk_gib=max_local_disk_gib,
767
+ max_local_data_per_node_gib=max_local_data_per_node_gib,
667
768
  max_write_buffer_percent=max_write_buffer_percent,
668
769
  max_table_buffer_percent=max_table_buffer_percent,
669
770
  )
@@ -676,6 +777,26 @@ class NflxCassandraCapacityModel(CapacityModel):
676
777
  def extra_model_arguments_schema() -> Dict[str, Any]:
677
778
  return NflxCassandraArguments.model_json_schema()
678
779
 
780
+ @staticmethod
781
+ def default_buffers() -> Buffers:
782
+ return Buffers(
783
+ default=Buffer(ratio=1.5),
784
+ desired={
785
+ "compute": Buffer(ratio=1.5, components=[BufferComponent.compute]),
786
+ "storage": Buffer(ratio=4.0, components=[BufferComponent.storage]),
787
+ # Cassandra reserves headroom in both cpu and network for background
788
+ # work and tasks
789
+ "background": Buffer(
790
+ ratio=2.0,
791
+ components=[
792
+ BufferComponent.cpu,
793
+ BufferComponent.network,
794
+ BACKGROUND_BUFFER,
795
+ ],
796
+ ),
797
+ },
798
+ )
799
+
679
800
  @staticmethod
680
801
  def default_desires(user_desires, extra_model_arguments: Dict[str, Any]):
681
802
  acceptable_consistency = {
@@ -703,24 +824,7 @@ class NflxCassandraCapacityModel(CapacityModel):
703
824
 
704
825
  # By supplying these buffers we can deconstruct observed utilization into
705
826
  # load versus buffer.
706
- buffers = Buffers(
707
- default=Buffer(ratio=1.5),
708
- desired={
709
- "compute": Buffer(ratio=1.5, components=[BufferComponent.compute]),
710
- "storage": Buffer(ratio=4.0, components=[BufferComponent.storage]),
711
- # Cassandra reserves headroom in both cpu and network for background
712
- # work and tasks
713
- "background": Buffer(
714
- ratio=2.0,
715
- components=[
716
- BufferComponent.cpu,
717
- BufferComponent.network,
718
- BACKGROUND_BUFFER,
719
- ],
720
- ),
721
- },
722
- )
723
-
827
+ buffers = NflxCassandraCapacityModel.default_buffers()
724
828
  if user_desires.query_pattern.access_pattern == AccessPattern.latency:
725
829
  return CapacityDesires(
726
830
  query_pattern=QueryPattern(
@@ -10,6 +10,9 @@ from pydantic import Field
10
10
 
11
11
  from service_capacity_modeling.interface import AccessConsistency
12
12
  from service_capacity_modeling.interface import AccessPattern
13
+ from service_capacity_modeling.interface import Buffer
14
+ from service_capacity_modeling.interface import BufferComponent
15
+ from service_capacity_modeling.interface import Buffers
13
16
  from service_capacity_modeling.interface import CapacityDesires
14
17
  from service_capacity_modeling.interface import CapacityPlan
15
18
  from service_capacity_modeling.interface import CapacityRequirement
@@ -27,7 +30,9 @@ from service_capacity_modeling.interface import QueryPattern
27
30
  from service_capacity_modeling.interface import RegionContext
28
31
  from service_capacity_modeling.interface import Requirements
29
32
  from service_capacity_modeling.models import CapacityModel
33
+ from service_capacity_modeling.models.common import buffer_for_components
30
34
  from service_capacity_modeling.models.common import compute_stateful_zone
35
+ from service_capacity_modeling.models.common import get_effective_disk_per_node_gib
31
36
  from service_capacity_modeling.models.common import normalize_cores
32
37
  from service_capacity_modeling.models.common import simple_network_mbps
33
38
  from service_capacity_modeling.models.common import sqrt_staffed_cores
@@ -137,7 +142,7 @@ def _estimate_cockroachdb_cluster_zonal( # noqa=E501 pylint: disable=too-many-p
137
142
  desires: CapacityDesires,
138
143
  zones_per_region: int = 3,
139
144
  copies_per_region: int = 3,
140
- max_local_disk_gib: int = 2048,
145
+ max_local_data_per_node_gib: int = 2048,
141
146
  max_regional_size: int = 288,
142
147
  max_rps_to_disk: int = 500,
143
148
  min_vcpu_per_instance: int = 4,
@@ -184,11 +189,23 @@ def _estimate_cockroachdb_cluster_zonal( # noqa=E501 pylint: disable=too-many-p
184
189
  + desires.data_shape.reserved_instance_system_mem_gib
185
190
  )
186
191
 
192
+ disk_buffer_ratio = buffer_for_components(
193
+ buffers=desires.buffers, components=[BufferComponent.disk]
194
+ ).ratio
195
+ max_data_per_node_gib = get_effective_disk_per_node_gib(
196
+ instance,
197
+ drive,
198
+ disk_buffer_ratio,
199
+ max_local_data_per_node_gib=max_local_data_per_node_gib,
200
+ )
201
+ needed_disk_gib = requirement.disk_gib.mid * disk_buffer_ratio
202
+ min_count = math.ceil(needed_disk_gib / max_data_per_node_gib)
203
+
187
204
  cluster = compute_stateful_zone(
188
205
  instance=instance,
189
206
  drive=drive,
190
207
  needed_cores=int(requirement.cpu_cores.mid),
191
- needed_disk_gib=requirement.disk_gib.mid,
208
+ needed_disk_gib=needed_disk_gib,
192
209
  needed_memory_gib=requirement.mem_gib.mid,
193
210
  needed_network_mbps=requirement.network_mbps.mid,
194
211
  # Take into account the reads per read
@@ -199,13 +216,9 @@ def _estimate_cockroachdb_cluster_zonal( # noqa=E501 pylint: disable=too-many-p
199
216
  # TODO: presumably there are some write IOs here
200
217
  0,
201
218
  ),
202
- # CRDB requires ephemeral disks to be 80% full because leveled
203
- # compaction can make progress as long as there is some headroom
204
- required_disk_space=lambda x: x * 1.2,
205
- max_local_disk_gib=max_local_disk_gib,
206
219
  # cockroachdb clusters will autobalance across available nodes
207
220
  cluster_size=lambda x: x,
208
- min_count=1,
221
+ min_count=min_count,
209
222
  # Sidecars/System takes away memory from cockroachdb
210
223
  # cockroachdb by default uses --max-sql-memory of 25% of system memory
211
224
  # that cannot be used for caching
@@ -268,6 +281,12 @@ class NflxCockroachDBArguments(BaseModel):
268
281
 
269
282
 
270
283
  class NflxCockroachDBCapacityModel(CapacityModel):
284
+ @staticmethod
285
+ def default_buffers() -> Buffers:
286
+ return Buffers(
287
+ default=Buffer(ratio=1.2),
288
+ )
289
+
271
290
  @staticmethod
272
291
  def capacity_plan(
273
292
  instance: Instance,
@@ -282,7 +301,11 @@ class NflxCockroachDBCapacityModel(CapacityModel):
282
301
  max_regional_size: int = extra_model_arguments.get("max_regional_size", 500)
283
302
  max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 500)
284
303
  # Very large nodes are hard to recover
285
- max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 2048)
304
+ max_local_data_per_node_gib: int = extra_model_arguments.get(
305
+ "max_local_data_per_node_gib",
306
+ extra_model_arguments.get("max_local_disk_gib", 2048),
307
+ )
308
+
286
309
  # Cockroach Labs recommends a minimum of 8 vCPUs and strongly
287
310
  # recommends no fewer than 4 vCPUs per node.
288
311
  min_vcpu_per_instance: int = extra_model_arguments.get(
@@ -299,7 +322,7 @@ class NflxCockroachDBCapacityModel(CapacityModel):
299
322
  zones_per_region=context.zones_in_region,
300
323
  copies_per_region=copies_per_region,
301
324
  max_regional_size=max_regional_size,
302
- max_local_disk_gib=max_local_disk_gib,
325
+ max_local_data_per_node_gib=max_local_data_per_node_gib,
303
326
  max_rps_to_disk=max_rps_to_disk,
304
327
  min_vcpu_per_instance=min_vcpu_per_instance,
305
328
  license_fee_per_core=license_fee_per_core,
@@ -330,6 +353,7 @@ class NflxCockroachDBCapacityModel(CapacityModel):
330
353
  f"User asked for {key}={value}"
331
354
  )
332
355
 
356
+ buffers = NflxCockroachDBCapacityModel.default_buffers()
333
357
  if user_desires.query_pattern.access_pattern == AccessPattern.latency:
334
358
  return CapacityDesires(
335
359
  query_pattern=QueryPattern(
@@ -396,6 +420,7 @@ class NflxCockroachDBCapacityModel(CapacityModel):
396
420
  # gateway taking about 1 MiB of memory
397
421
  reserved_instance_app_mem_gib=0.001,
398
422
  ),
423
+ buffers=buffers,
399
424
  )
400
425
  else:
401
426
  return CapacityDesires(
@@ -465,6 +490,7 @@ class NflxCockroachDBCapacityModel(CapacityModel):
465
490
  # gateway taking about 1 MiB of memory
466
491
  reserved_instance_app_mem_gib=0.001,
467
492
  ),
493
+ buffers=buffers,
468
494
  )
469
495
 
470
496
 
@@ -11,6 +11,9 @@ from pydantic import Field
11
11
 
12
12
  from service_capacity_modeling.interface import AccessConsistency
13
13
  from service_capacity_modeling.interface import AccessPattern
14
+ from service_capacity_modeling.interface import Buffer
15
+ from service_capacity_modeling.interface import BufferComponent
16
+ from service_capacity_modeling.interface import Buffers
14
17
  from service_capacity_modeling.interface import CapacityDesires
15
18
  from service_capacity_modeling.interface import CapacityPlan
16
19
  from service_capacity_modeling.interface import CapacityRequirement
@@ -27,7 +30,9 @@ from service_capacity_modeling.interface import RegionContext
27
30
  from service_capacity_modeling.interface import Requirements
28
31
  from service_capacity_modeling.interface import ZoneClusterCapacity
29
32
  from service_capacity_modeling.models import CapacityModel
33
+ from service_capacity_modeling.models.common import buffer_for_components
30
34
  from service_capacity_modeling.models.common import compute_stateful_zone
35
+ from service_capacity_modeling.models.common import get_effective_disk_per_node_gib
31
36
  from service_capacity_modeling.models.common import normalize_cores
32
37
  from service_capacity_modeling.models.common import simple_network_mbps
33
38
  from service_capacity_modeling.models.common import sqrt_staffed_cores
@@ -176,6 +181,20 @@ class NflxElasticsearchArguments(BaseModel):
176
181
 
177
182
 
178
183
  class NflxElasticsearchDataCapacityModel(CapacityModel):
184
+ @staticmethod
185
+ def default_buffers() -> Buffers:
186
+ return Buffers(
187
+ default=Buffer(ratio=1.33),
188
+ )
189
+
190
+ @staticmethod
191
+ def default_desires(
192
+ user_desires, extra_model_arguments: Dict[str, Any]
193
+ ) -> CapacityDesires:
194
+ return CapacityDesires(
195
+ buffers=NflxElasticsearchDataCapacityModel.default_buffers()
196
+ )
197
+
179
198
  @staticmethod
180
199
  def capacity_plan(
181
200
  instance: Instance,
@@ -190,7 +209,10 @@ class NflxElasticsearchDataCapacityModel(CapacityModel):
190
209
  max_regional_size: int = extra_model_arguments.get("max_regional_size", 120)
191
210
  max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 1000)
192
211
  # Very large nodes are hard to recover
193
- max_local_disk_gib: int = extra_model_arguments.get("max_local_disk_gib", 8192)
212
+ max_local_data_per_node_gib: int = extra_model_arguments.get(
213
+ "max_local_data_per_node_gib",
214
+ extra_model_arguments.get("max_local_disk_gib", 8192),
215
+ )
194
216
 
195
217
  # the ratio of traffic that should be handled by search nodes.
196
218
  # 0.0 = no search nodes, all searches handled by data nodes
@@ -259,11 +281,23 @@ class NflxElasticsearchDataCapacityModel(CapacityModel):
259
281
  # io2/gp2 so for now we're just hardcoding.
260
282
  data_write_io_per_sec = (1 + 10) * max(1, data_write_bytes_per_sec // 16384)
261
283
 
284
+ disk_buffer_ratio = buffer_for_components(
285
+ buffers=desires.buffers, components=[BufferComponent.disk]
286
+ ).ratio
287
+ needed_disk_gib = data_requirement.disk_gib.mid * disk_buffer_ratio
288
+ max_data_per_node_gib = get_effective_disk_per_node_gib(
289
+ instance,
290
+ drive,
291
+ disk_buffer_ratio,
292
+ max_local_data_per_node_gib=max_local_data_per_node_gib,
293
+ )
294
+ min_count = math.ceil(needed_disk_gib / max_data_per_node_gib)
295
+
262
296
  data_cluster = compute_stateful_zone(
263
297
  instance=instance,
264
298
  drive=drive,
265
299
  needed_cores=int(data_requirement.cpu_cores.mid),
266
- needed_disk_gib=int(data_requirement.disk_gib.mid),
300
+ needed_disk_gib=needed_disk_gib,
267
301
  needed_memory_gib=int(data_requirement.mem_gib.mid),
268
302
  needed_network_mbps=data_requirement.network_mbps.mid,
269
303
  # Take into account the reads per read
@@ -272,13 +306,9 @@ class NflxElasticsearchDataCapacityModel(CapacityModel):
272
306
  _es_io_per_read(size) * math.ceil(data_rps / count),
273
307
  data_write_io_per_sec / count,
274
308
  ),
275
- # Elasticsearch requires ephemeral disks to be % full because tiered
276
- # merging can make progress as long as there is some headroom
277
- required_disk_space=lambda x: x * 1.33,
278
- max_local_disk_gib=max_local_disk_gib,
279
309
  # Elasticsearch clusters can auto-balance via shard placement
280
310
  cluster_size=lambda x: x,
281
- min_count=1,
311
+ min_count=min_count,
282
312
  # Sidecars/System takes away memory from Elasticsearch
283
313
  # which uses half of available system max of 32 for compressed oops
284
314
  reserve_memory=lambda x: base_mem + max(32, x / 2),
@@ -31,9 +31,11 @@ from service_capacity_modeling.interface import QueryPattern
31
31
  from service_capacity_modeling.interface import RegionContext
32
32
  from service_capacity_modeling.interface import Requirements
33
33
  from service_capacity_modeling.models import CapacityModel
34
+ from service_capacity_modeling.models.common import buffer_for_components
34
35
  from service_capacity_modeling.models.common import compute_stateful_zone
35
36
  from service_capacity_modeling.models.common import get_cores_from_current_capacity
36
37
  from service_capacity_modeling.models.common import get_disk_from_current_capacity
38
+ from service_capacity_modeling.models.common import get_effective_disk_per_node_gib
37
39
  from service_capacity_modeling.models.common import get_memory_from_current_capacity
38
40
  from service_capacity_modeling.models.common import get_network_from_current_capacity
39
41
  from service_capacity_modeling.models.common import network_services
@@ -216,7 +218,7 @@ def _estimate_evcache_cluster_zonal( # noqa: C901,E501 pylint: disable=too-many
216
218
  desires: CapacityDesires,
217
219
  context: RegionContext,
218
220
  copies_per_region: int = 3,
219
- max_local_disk_gib: int = 2048,
221
+ max_local_data_per_node_gib: int = 2048,
220
222
  max_regional_size: int = 10000,
221
223
  min_instance_memory_gib: int = 12,
222
224
  cross_region_replication: Replication = Replication.none,
@@ -278,9 +280,9 @@ def _estimate_evcache_cluster_zonal( # noqa: C901,E501 pylint: disable=too-many
278
280
  requirement.context["osmem"] = reserve_memory(instance.ram_gib)
279
281
  # EVCache clusters aim to be at least 2 nodes per zone to start
280
282
  # out with for tier 0
281
- min_count = 0
283
+ min_count_for_tier = 0
282
284
  if desires.service_tier < 1:
283
- min_count = 2
285
+ min_count_for_tier = 2
284
286
 
285
287
  is_disk_io_constraint: bool = requirement.disk_gib.mid > 0.0
286
288
  adjusted_disk_io_needed = 0.0
@@ -297,17 +299,25 @@ def _estimate_evcache_cluster_zonal( # noqa: C901,E501 pylint: disable=too-many
297
299
  adjusted_disk_io_needed = 1.4 * adjusted_disk_io_needed
298
300
  read_write_ratio = reads_per_sec / (reads_per_sec + writes_per_sec)
299
301
 
302
+ needed_disk_gib = int(requirement.disk_gib.mid)
303
+ disk_buffer_ratio = buffer_for_components(
304
+ desires.buffers, [BufferComponent.disk]
305
+ ).ratio
306
+ max_data_per_node_gib = get_effective_disk_per_node_gib(
307
+ instance,
308
+ drive,
309
+ disk_buffer_ratio,
310
+ max_local_data_per_node_gib=max_local_data_per_node_gib,
311
+ )
312
+ min_count_for_data = math.ceil(needed_disk_gib / max_data_per_node_gib)
300
313
  cluster = compute_stateful_zone(
301
314
  instance=instance,
302
315
  drive=drive,
303
316
  needed_cores=int(requirement.cpu_cores.mid),
304
- needed_disk_gib=int(requirement.disk_gib.mid),
317
+ needed_disk_gib=needed_disk_gib,
305
318
  needed_memory_gib=int(requirement.mem_gib.mid),
306
319
  needed_network_mbps=requirement.network_mbps.mid,
307
- # EVCache doesn't use cloud drives to store data, we will have
308
- # accounted for the data going on drives or memory via working set
309
- max_local_disk_gib=max_local_disk_gib,
310
- min_count=max(min_count, 0),
320
+ min_count=max(min_count_for_data, min_count_for_tier),
311
321
  adjusted_disk_io_needed=adjusted_disk_io_needed,
312
322
  read_write_ratio=read_write_ratio,
313
323
  )
@@ -411,8 +421,9 @@ class NflxEVCacheCapacityModel(CapacityModel):
411
421
  )
412
422
  max_regional_size: int = extra_model_arguments.get("max_regional_size", 10000)
413
423
  # Very large nodes are hard to cache warm
414
- max_local_disk_gib: int = extra_model_arguments.get(
415
- "max_local_disk_gib", 1024 * 6
424
+ max_local_data_per_node_gib: int = extra_model_arguments.get(
425
+ "max_local_data_per_node_gib",
426
+ extra_model_arguments.get("max_local_disk_gib", 1024 * 5),
416
427
  )
417
428
  # Very small nodes are hard to run memcache on
418
429
  # (Arun) We do not deploy to less than 12 GiB
@@ -429,7 +440,7 @@ class NflxEVCacheCapacityModel(CapacityModel):
429
440
  desires=desires,
430
441
  copies_per_region=copies_per_region,
431
442
  max_regional_size=max_regional_size,
432
- max_local_disk_gib=max_local_disk_gib,
443
+ max_local_data_per_node_gib=max_local_data_per_node_gib,
433
444
  min_instance_memory_gib=min_instance_memory_gib,
434
445
  cross_region_replication=cross_region_replication,
435
446
  context=context,
@@ -34,8 +34,9 @@ from service_capacity_modeling.interface import QueryPattern
34
34
  from service_capacity_modeling.interface import RegionContext
35
35
  from service_capacity_modeling.interface import Requirements
36
36
  from service_capacity_modeling.models import CapacityModel
37
- from service_capacity_modeling.models import utils
37
+ from service_capacity_modeling.models.common import buffer_for_components
38
38
  from service_capacity_modeling.models.common import compute_stateful_zone
39
+ from service_capacity_modeling.models.common import get_effective_disk_per_node_gib
39
40
  from service_capacity_modeling.models.common import normalize_cores
40
41
  from service_capacity_modeling.models.common import sqrt_staffed_cores
41
42
  from service_capacity_modeling.models.common import zonal_requirements_from_current
@@ -245,7 +246,8 @@ def _estimate_kafka_cluster_zonal( # noqa: C901
245
246
  require_attached_disks: bool = False,
246
247
  required_zone_size: Optional[int] = None,
247
248
  max_regional_size: int = 150,
248
- max_local_disk_gib: int = 1024 * 5,
249
+ max_local_data_per_node_gib: int = 2 * 1024,
250
+ max_attached_data_per_node_gib: int = 2 * 1024,
249
251
  min_instance_cpu: int = 2,
250
252
  min_instance_memory_gib: int = 12,
251
253
  require_same_instance_family: bool = True,
@@ -291,9 +293,9 @@ def _estimate_kafka_cluster_zonal( # noqa: C901
291
293
  )
292
294
 
293
295
  # Kafka clusters in prod (tier 0+1) need at least 2 nodes per zone
294
- min_count = 1
296
+ min_count_for_tier = 1
295
297
  if desires.service_tier < 2:
296
- min_count = 2
298
+ min_count_for_tier = 2
297
299
 
298
300
  # Kafka read io / second is zonal
299
301
  normalized_to_mib = desires.model_copy(deep=True)
@@ -313,8 +315,19 @@ def _estimate_kafka_cluster_zonal( # noqa: C901
313
315
  write_ios_per_second = max(
314
316
  1, (write_mib_per_second * 1024) // drive.seq_io_size_kib
315
317
  )
316
- max_attached_disk_gib = 8 * 1024
317
318
 
319
+ needed_disk_gib = int(requirement.disk_gib.mid)
320
+ disk_buffer_ratio = buffer_for_components(
321
+ buffers=desires.buffers, components=[BufferComponent.disk]
322
+ ).ratio
323
+ max_disk_per_node_gib = get_effective_disk_per_node_gib(
324
+ instance,
325
+ drive,
326
+ disk_buffer_ratio,
327
+ max_local_data_per_node_gib=max_local_data_per_node_gib,
328
+ max_attached_data_per_node_gib=max_attached_data_per_node_gib,
329
+ )
330
+ min_count_for_data = math.ceil(needed_disk_gib / max_disk_per_node_gib)
318
331
  cluster = compute_stateful_zone(
319
332
  instance=instance,
320
333
  drive=drive,
@@ -337,16 +350,11 @@ def _estimate_kafka_cluster_zonal( # noqa: C901
337
350
  # Leave 100% IO headroom for writes
338
351
  copies_per_region * (write_ios_per_second / count) * 2,
339
352
  ),
340
- # Disk buffer is already added when computing kafka disk requirements
341
- required_disk_space=lambda x: x,
342
- max_local_disk_gib=max_local_disk_gib,
343
353
  cluster_size=lambda x: x,
344
- min_count=max(min_count, required_zone_size or 1),
354
+ min_count=max(min_count_for_tier, min_count_for_data, required_zone_size or 1),
345
355
  # Sidecars and Variable OS Memory
346
356
  # Kafka currently uses 8GiB fixed, might want to change to min(30, x // 2)
347
357
  reserve_memory=lambda instance_mem_gib: base_mem + 8,
348
- # allow up to 8TiB of attached EBS
349
- max_attached_disk_gib=max_attached_disk_gib,
350
358
  )
351
359
 
352
360
  # Communicate to the actual provision that if we want reduced RF
@@ -355,28 +363,18 @@ def _estimate_kafka_cluster_zonal( # noqa: C901
355
363
 
356
364
  # This is roughly the disk we would have tried to provision with the current
357
365
  # cluster's instance count (or required_zone_size)
358
- if required_zone_size is not None:
359
- space_gib = max(1, math.ceil(requirement.disk_gib.mid / required_zone_size))
360
- ebs_gib = utils.next_n(space_gib, n=100)
361
-
362
- # Max allowed disk size in `compute_stateful_zone`
363
- if instance.drive is not None and instance.drive.size_gib > 0:
364
- max_size = min(max_local_disk_gib, instance.drive.size_gib)
365
- elif max_attached_disk_gib is not None:
366
- max_size = max_attached_disk_gib
367
- else:
368
- max_size = drive.max_size_gib / 3
369
-
370
- # Capacity planner only allows ~ 5TB disk (max_size) for gp3 drives
371
- # or max_attached_disk_gib if provided.
372
- # If ebs_gib > max_size, we do not have enough instances within the
373
- # required_zone_size for the required disk. In these cases, it is
374
- # not possible for cluster.count == required_zone_size. We should
375
- # allow higher instance count for these cases so that we return some result
376
- # If we did not exceed the max disk size with the required_zone_size, then
377
- # we only allow topologies that match the desired zone size
378
- if ebs_gib <= max_size and cluster.count != required_zone_size:
379
- return None
366
+ # If we *could* have satisified the required_zone_size without exceeding the
367
+ # max data size per node constraints but the actual cluster count does not
368
+ # match the required_zone_size, then we want to omit the plan
369
+ # However if there was no way to satisfy the required_zone_size with the
370
+ # max data size per node constraints, then we would rather return some result
371
+ # with a higher instance count since this is the best we can do
372
+ if (
373
+ required_zone_size is not None
374
+ and min_count_for_data <= required_zone_size
375
+ and cluster.count != required_zone_size
376
+ ):
377
+ return None
380
378
 
381
379
  # Kafka clusters generally should try to stay under some total number
382
380
  # of nodes. Orgs do this for all kinds of reasons such as
@@ -491,8 +489,9 @@ class NflxKafkaCapacityModel(CapacityModel):
491
489
 
492
490
  max_regional_size: int = extra_model_arguments.get("max_regional_size", 150)
493
491
  # Very large nodes are hard to cache warm
494
- max_local_disk_gib: int = extra_model_arguments.get(
495
- "max_local_disk_gib", 1024 * 5
492
+ max_local_data_per_node_gib: int = extra_model_arguments.get(
493
+ "max_local_data_per_node_gib",
494
+ extra_model_arguments.get("max_local_disk_gib", 1024 * 2),
496
495
  )
497
496
  min_instance_cpu: int = extra_model_arguments.get("min_instance_cpu", 2)
498
497
  min_instance_memory_gib: int = extra_model_arguments.get(
@@ -525,7 +524,7 @@ class NflxKafkaCapacityModel(CapacityModel):
525
524
  require_attached_disks=require_attached_disks,
526
525
  required_zone_size=required_zone_size,
527
526
  max_regional_size=max_regional_size,
528
- max_local_disk_gib=max_local_disk_gib,
527
+ max_local_data_per_node_gib=max_local_data_per_node_gib,
529
528
  min_instance_cpu=min_instance_cpu,
530
529
  min_instance_memory_gib=min_instance_memory_gib,
531
530
  hot_retention_seconds=hot_retention_seconds,
@@ -62,5 +62,21 @@ def next_power_of_2(y: float) -> int:
62
62
  return 1 if x == 0 else 2 ** (x - 1).bit_length()
63
63
 
64
64
 
65
+ def is_power_of_2(y: float) -> bool:
66
+ """Check if x is a power of 2 or 1"""
67
+ return y == next_power_of_2(y)
68
+
69
+
70
+ def next_doubling(x: float, base: int) -> int:
71
+ # Some clusters were provisioned as non powers of (e.g. 12)
72
+ # And so if a requirement cannot be satisifed by a cluster
73
+ # we would want to round up to the next doubling
74
+ # E.g. 12 -> 24 -> 48
75
+ # e.g. 3 -> 6 -> 12
76
+ if x <= base:
77
+ return int(base)
78
+ return int(base * (2 ** math.ceil(math.log(x / base, 2))))
79
+
80
+
65
81
  def next_n(x: float, n: float) -> int:
66
82
  return int(math.ceil(x / n)) * int(n)
@@ -29,7 +29,9 @@ def build_command(family: str, params: Dict[str, Any], output_path: Path) -> lis
29
29
  cmd.extend(["--io-latency-curve", params["io_latency_curve"]])
30
30
 
31
31
  if params.get("cpu_ipc_scale") is not None:
32
- cmd.extend(["--cpu-ipc-scale", str(params["cpu_ipc_scale"])])
32
+ cpu_ipc_scale = float(params["cpu_ipc_scale"])
33
+ rounded_cpu_ipc_scale = float(f"{cpu_ipc_scale:.2f}")
34
+ cmd.extend(["--cpu-ipc-scale", str(rounded_cpu_ipc_scale)])
33
35
 
34
36
  # Add output path
35
37
  cmd.extend(["--output-path", str(output_path)])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: service-capacity-modeling
3
- Version: 0.3.68
3
+ Version: 0.3.70
4
4
  Summary: Contains utilities for modeling capacity for pluggable workloads
5
5
  Author: Joseph Lynch
6
6
  Author-email: josephl@netflix.com
@@ -40,22 +40,22 @@ service_capacity_modeling/hardware/profiles/shapes/aws/manual_drives.json,sha256
40
40
  service_capacity_modeling/hardware/profiles/shapes/aws/manual_instances.json,sha256=i611n6d7hsjd7L8aSEDzfaTMS2jVs-Jc38-vl-NKfs4,18013
41
41
  service_capacity_modeling/hardware/profiles/shapes/aws/manual_services.json,sha256=h63675KKmu5IrI3BORDN8fiAqLjAyYHArErKbC7-T30,776
42
42
  service_capacity_modeling/models/__init__.py,sha256=XK7rTBW8ZXQY5L9Uy2FwjuFN_KBW3hKw7IrhG1piajs,13567
43
- service_capacity_modeling/models/common.py,sha256=P4NrPbAdgNKqvVNFUfpocG44Dw8ai9jEDuYIGzRGCRM,33549
43
+ service_capacity_modeling/models/common.py,sha256=oF1WVhd6kXFvKNs7zzkms25U1Si_iqCn48TptyxWd0E,34128
44
44
  service_capacity_modeling/models/headroom_strategy.py,sha256=QIkP_K_tK2EGAjloaGfXeAPH5M0UDCN8FlAtwV9xxTA,651
45
- service_capacity_modeling/models/utils.py,sha256=0F__wz9KAGhPIQfvNp-FTtTANW6-sO4FsyddnuXqSJc,2161
45
+ service_capacity_modeling/models/utils.py,sha256=WosEEg4o1_WSbTb5mL-M1v8JuWJgvS2oWvnDS3qNz3k,2662
46
46
  service_capacity_modeling/models/org/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
47
  service_capacity_modeling/models/org/netflix/__init__.py,sha256=m7IaQbo85NEbDvfoPJREIznpzg0YHTCrKP5C1GvnOYM,2378
48
48
  service_capacity_modeling/models/org/netflix/aurora.py,sha256=Mi9zd48k64GkKIjAs3J1S2qThguNvyWIy2dUmhwrVhc,12883
49
- service_capacity_modeling/models/org/netflix/cassandra.py,sha256=TetxgkoZ0hTprXX1GxWpBbOWZdHPtk9VqOi1B6ZeNnw,34569
49
+ service_capacity_modeling/models/org/netflix/cassandra.py,sha256=QdSurxiv9XiCK7z6Omn6hjZf0rHleDfMvn3-JSpr5rA,38548
50
50
  service_capacity_modeling/models/org/netflix/counter.py,sha256=hOVRRCgCPU-A5TdLKQXc_mWTQpkKOWRNjOeECdDP7kA,9205
51
- service_capacity_modeling/models/org/netflix/crdb.py,sha256=AlHdGFpR1RmwQSZsiuiHLR2wTrnmtguT2MMYBDHfdiM,19697
51
+ service_capacity_modeling/models/org/netflix/crdb.py,sha256=ELIbxwfNsJcEkNGW7qtz0SEzt3Vj6wj8QL5QQeebIlo,20635
52
52
  service_capacity_modeling/models/org/netflix/ddb.py,sha256=GDoXVIpDDY6xDB0dsiaz7RAPPj-qffTrM9N6w5-5ndg,26311
53
- service_capacity_modeling/models/org/netflix/elasticsearch.py,sha256=746WYY_WSeMgY-Go7wvWGYOxfFV8ryupNThvDZHPbGo,23685
53
+ service_capacity_modeling/models/org/netflix/elasticsearch.py,sha256=mYmr6DA3EyqXCsEhwJArK_VsUDLq5Pi3E8jUUEfhnRM,24729
54
54
  service_capacity_modeling/models/org/netflix/entity.py,sha256=M0vzwhf8UAbVxnXspAkN4GEbq3rix6yoky6W2oDG6a0,8648
55
- service_capacity_modeling/models/org/netflix/evcache.py,sha256=isONE5UF-b8gCgdGB1iMYOrwgKAHghYJiyoBfcYJ1ac,25246
55
+ service_capacity_modeling/models/org/netflix/evcache.py,sha256=70lgaRgNwJH84o6JVoUDplkCi4v-WzEX3nxVagoJjDc,25775
56
56
  service_capacity_modeling/models/org/netflix/graphkv.py,sha256=iS5QDDv9_hNY6nIgdL-umB439qP7-jN-n6_Tl6d-ZSo,8557
57
57
  service_capacity_modeling/models/org/netflix/iso_date_math.py,sha256=CPGHLmbGeNqkcYcmCkLKhPZcAU-yTJ2HjvuXdnNyCYc,996
58
- service_capacity_modeling/models/org/netflix/kafka.py,sha256=FiuBml8uWOVfPFZ37NvZW13nMVtFE4VDf_SZIFdw4sA,25515
58
+ service_capacity_modeling/models/org/netflix/kafka.py,sha256=MDHaht5cWsOJ113uMl6nQ7nllSATrlBCQ-TXLkqMWEk,25466
59
59
  service_capacity_modeling/models/org/netflix/key_value.py,sha256=yL5moU0SJD4ocBU9zeGhPYE4KY7oSSq5yqfVWd_Ot2g,9336
60
60
  service_capacity_modeling/models/org/netflix/postgres.py,sha256=R3Tog-ZW1Yx6gO3AKqI_wquSm30s01QX9yWR7Jvgk9A,4055
61
61
  service_capacity_modeling/models/org/netflix/rds.py,sha256=z9egFBg4Ltqyuz_WHk-_hw-xL-EQNzl1JopJoWdNli8,10768
@@ -67,11 +67,11 @@ service_capacity_modeling/models/org/netflix/zookeeper.py,sha256=BHLjnVDyx15wMGr
67
67
  service_capacity_modeling/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
68
68
  service_capacity_modeling/tools/auto_shape.py,sha256=41pfR40BN-xJS8js4BWSoqT67JhO2_XqzmNeKDoCFBo,14674
69
69
  service_capacity_modeling/tools/fetch_pricing.py,sha256=JkgJPTE0SVj8sdGQvo0HN-Hdv3nfA2tu7C_Arad5aX8,3762
70
- service_capacity_modeling/tools/generate_missing.py,sha256=uvr9fQanx3bm4KTneH-x7EOQvO7cVV0i9gdQvArPCuY,2947
70
+ service_capacity_modeling/tools/generate_missing.py,sha256=XqUs54CPfli4XtK0rEiFKqDvpwCiMAD8wrl7fAxpYHs,3062
71
71
  service_capacity_modeling/tools/instance_families.py,sha256=e9JWSIdljSmHI8Nb2MI5Ld9JqQ7WdOtPtV7g3oR7ZiU,7764
72
- service_capacity_modeling-0.3.68.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
73
- service_capacity_modeling-0.3.68.dist-info/METADATA,sha256=6Dp2jy5MMYu0UuOser41xX75P0Nov_6tC65J8l4wHEs,10214
74
- service_capacity_modeling-0.3.68.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
- service_capacity_modeling-0.3.68.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
76
- service_capacity_modeling-0.3.68.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
77
- service_capacity_modeling-0.3.68.dist-info/RECORD,,
72
+ service_capacity_modeling-0.3.70.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
73
+ service_capacity_modeling-0.3.70.dist-info/METADATA,sha256=-cFPJXpSY6SNZZD0hZTLlF9Lauq6PW083xJpBDwSIXA,10214
74
+ service_capacity_modeling-0.3.70.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
+ service_capacity_modeling-0.3.70.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
76
+ service_capacity_modeling-0.3.70.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
77
+ service_capacity_modeling-0.3.70.dist-info/RECORD,,