service-capacity-modeling 0.3.78__py3-none-any.whl → 0.3.80__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -85,7 +85,7 @@ def model_desires(
85
85
  data_shape = desires.data_shape.model_copy(deep=True)
86
86
 
87
87
  query_pattern_simulation = {}
88
- for field in sorted(query_pattern.model_fields):
88
+ for field in sorted(QueryPattern.model_fields):
89
89
  d = getattr(query_pattern, field)
90
90
  if isinstance(d, Interval):
91
91
  query_pattern_simulation[field] = simulate_interval(d, field)(num_sims)
@@ -93,7 +93,7 @@ def model_desires(
93
93
  query_pattern_simulation[field] = [d] * num_sims
94
94
 
95
95
  data_shape_simulation = {}
96
- for field in sorted(data_shape.model_fields):
96
+ for field in sorted(DataShape.model_fields):
97
97
  d = getattr(data_shape, field)
98
98
  if isinstance(d, Interval):
99
99
  data_shape_simulation[field] = simulate_interval(d, field)(num_sims)
@@ -104,14 +104,11 @@ def model_desires(
104
104
  query_pattern = QueryPattern(
105
105
  **{
106
106
  f: query_pattern_simulation[f][sim]
107
- for f in sorted(query_pattern.model_fields)
107
+ for f in sorted(QueryPattern.model_fields)
108
108
  }
109
109
  )
110
110
  data_shape = DataShape(
111
- **{
112
- f: data_shape_simulation[f][sim]
113
- for f in sorted(data_shape.model_fields)
114
- }
111
+ **{f: data_shape_simulation[f][sim] for f in sorted(DataShape.model_fields)}
115
112
  )
116
113
 
117
114
  d = desires.model_copy(update={"query_pattern": None, "data_shape": None})
@@ -129,7 +126,7 @@ def model_desires_percentiles(
129
126
 
130
127
  query_pattern_simulation = {}
131
128
  query_pattern_means = {}
132
- for field in sorted(query_pattern.model_fields):
129
+ for field in sorted(QueryPattern.model_fields):
133
130
  d = getattr(query_pattern, field)
134
131
  if isinstance(d, Interval):
135
132
  query_pattern_simulation[field] = interval_percentile(d, percentiles)
@@ -143,7 +140,7 @@ def model_desires_percentiles(
143
140
 
144
141
  data_shape_simulation = {}
145
142
  data_shape_means = {}
146
- for field in sorted(data_shape.model_fields):
143
+ for field in sorted(DataShape.model_fields):
147
144
  d = getattr(data_shape, field)
148
145
  if isinstance(d, Interval):
149
146
  data_shape_simulation[field] = interval_percentile(d, percentiles)
@@ -161,13 +158,13 @@ def model_desires_percentiles(
161
158
  query_pattern = QueryPattern(
162
159
  **{
163
160
  f: query_pattern_simulation[f][i]
164
- for f in sorted(query_pattern.model_fields)
161
+ for f in sorted(QueryPattern.model_fields)
165
162
  }
166
163
  )
167
164
  except Exception as exp:
168
165
  raise exp
169
166
  data_shape = DataShape(
170
- **{f: data_shape_simulation[f][i] for f in sorted(data_shape.model_fields)}
167
+ **{f: data_shape_simulation[f][i] for f in sorted(DataShape.model_fields)}
171
168
  )
172
169
  d = desires.model_copy(deep=True)
173
170
  d.query_pattern = query_pattern
@@ -175,10 +172,10 @@ def model_desires_percentiles(
175
172
  results.append(d)
176
173
 
177
174
  mean_qp = QueryPattern(
178
- **{f: query_pattern_means[f] for f in sorted(query_pattern.model_fields)}
175
+ **{f: query_pattern_means[f] for f in sorted(QueryPattern.model_fields)}
179
176
  )
180
177
  mean_ds = DataShape(
181
- **{f: data_shape_means[f] for f in sorted(data_shape.model_fields)}
178
+ **{f: data_shape_means[f] for f in sorted(DataShape.model_fields)}
182
179
  )
183
180
  d = desires.model_copy(deep=True)
184
181
  d.query_pattern = mean_qp
@@ -298,7 +295,7 @@ def _add_requirement(
298
295
 
299
296
  requirements = accum[requirement.requirement_type]
300
297
 
301
- for field in sorted(requirement.model_fields):
298
+ for field in sorted(CapacityRequirement.model_fields):
302
299
  d = getattr(requirement, field)
303
300
  if isinstance(d, Interval):
304
301
  if field not in requirements:
@@ -46,7 +46,8 @@ class NflxCounterArguments(NflxJavaAppArguments):
46
46
  )
47
47
  counter_cardinality: NflxCounterCardinality = Field(
48
48
  alias="counter.cardinality",
49
- description="Low means < 1,000, medium (1,000—100,000), high means > 100,000.",
49
+ description="Low means < 10,000, medium (10,000—1,000,000), high means "
50
+ "> 1,000,000.",
50
51
  )
51
52
  counter_mode: NflxCounterMode = Field(
52
53
  alias="counter.mode",
@@ -92,36 +93,57 @@ class NflxCounterCapacityModel(CapacityModel):
92
93
  def compose_with(
93
94
  user_desires: CapacityDesires, extra_model_arguments: Dict[str, Any]
94
95
  ) -> Tuple[Tuple[str, Callable[[CapacityDesires], CapacityDesires]], ...]:
95
- stores = [("org.netflix.evcache", lambda x: x)]
96
- if extra_model_arguments["counter.mode"] != NflxCounterMode.best_effort.value:
96
+ stores = []
97
97
 
98
+ if extra_model_arguments["counter.mode"] == NflxCounterMode.best_effort.value:
99
+ stores.append(("org.netflix.evcache", lambda x: x))
100
+ else:
101
+ # Shared evcache cluster is used for eventual and exact counters
98
102
  def _modify_cassandra_desires(
99
103
  user_desires: CapacityDesires,
100
104
  ) -> CapacityDesires:
101
105
  modified = user_desires.model_copy(deep=True)
106
+ counter_cardinality = extra_model_arguments["counter.cardinality"]
107
+
108
+ counter_deltas_per_second = (
109
+ user_desires.query_pattern.estimated_write_per_second
110
+ )
102
111
 
103
- # counts per second
104
- cps = user_desires.query_pattern.estimated_write_per_second
112
+ # low cardinality : rollups happen once every 60 seconds
113
+ # medium cardinality : rollups happen once every 30 seconds
114
+ # high cardinality : rollups happen once every 10 seconds
115
+ # TODO: Account for read amplification from time slice configs
116
+ # for better model accuracy
117
+ if counter_cardinality == NflxCounterCardinality.low.value:
118
+ rollups_per_second = counter_deltas_per_second.scale(0.0167)
119
+ elif counter_cardinality == NflxCounterCardinality.medium.value:
120
+ rollups_per_second = counter_deltas_per_second.scale(0.0333)
121
+ else:
122
+ rollups_per_second = counter_deltas_per_second.scale(0.1)
105
123
 
106
- # rollups happen once every 10 seconds after a write
107
- rps = cps.scale(0.1)
108
- modified.query_pattern.estimated_read_per_second = rps
124
+ modified.query_pattern.estimated_read_per_second = rollups_per_second
109
125
 
110
126
  # storage size fix
111
- event_size = 128 # bytes
112
- count_size = 64 # bytes
127
+ delta_event_size = 256 # bytes
128
+ rolled_up_count_size = 128 # bytes
113
129
  GiB = 1024 * 1024 * 1024
114
- retention = timedelta(days=1).total_seconds()
130
+
131
+ # Events can be discarded as soon as rollup is complete
132
+ # We default to a 1 day slice with 2 day retention
133
+ retention = timedelta(days=2).total_seconds()
134
+
115
135
  cardinality = {
116
- "low": 1_000,
117
- "medium": 10_000,
118
- "high": 100_000,
136
+ "low": 10_000,
137
+ "medium": 100_000,
138
+ "high": 1_000_000,
119
139
  }[extra_model_arguments["counter.cardinality"]]
120
140
 
121
- event_store = cps.scale(count_size * retention / GiB)
122
- count_store = event_size * cardinality / GiB
123
- store = event_store.offset(count_store)
124
- modified.data_shape.estimated_state_size_gib = store
141
+ event_storage_size = counter_deltas_per_second.scale(
142
+ delta_event_size * retention / GiB
143
+ )
144
+ rollup_storage_size = rolled_up_count_size * cardinality / GiB
145
+ total_store_size = event_storage_size.offset(rollup_storage_size)
146
+ modified.data_shape.estimated_state_size_gib = total_store_size
125
147
 
126
148
  return modified
127
149
 
@@ -45,7 +45,11 @@ def _gamma_fn_from_params(
45
45
 
46
46
  def f(k: float) -> float:
47
47
  zero = high / low
48
- return float(gammaf(k, high_p * k / mid) / gammaf(k, low_p * k / mid) - zero)
48
+ high_x = high_p * k / mid
49
+ low_x = low_p * k / mid
50
+ result = gammaf(k, high_x) / gammaf(k, low_x) - zero
51
+ # Use .item() to convert NumPy scalar to Python float
52
+ return result.item() if hasattr(result, "item") else float(result)
49
53
 
50
54
  return f
51
55
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: service-capacity-modeling
3
- Version: 0.3.78
3
+ Version: 0.3.80
4
4
  Summary: Contains utilities for modeling capacity for pluggable workloads
5
5
  Author: Joseph Lynch
6
6
  Author-email: josephl@netflix.com
@@ -1,7 +1,7 @@
1
1
  service_capacity_modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- service_capacity_modeling/capacity_planner.py,sha256=nmjAWpUtL5EKbEtQzSGWrM8gPSAVc7EX5M4ApiLbQnA,32828
2
+ service_capacity_modeling/capacity_planner.py,sha256=JRagEFlg3u_zB1N5GzGsKAN55JZLad6p4IF_PmL8kcg,32780
3
3
  service_capacity_modeling/interface.py,sha256=p-pOUQUdA6VA-moTjFSBLQAZCRvcjFaAfb2jOmxMWsY,39074
4
- service_capacity_modeling/stats.py,sha256=ievz2f1H-a6u8wHwXBQ47e2EhqH9BDrdL6VZAXQDK2w,5964
4
+ service_capacity_modeling/stats.py,sha256=LCNUcQPfwF5hhIZwsfAsDe4ZbnuhDnl3vQHKfpK61Xc,6142
5
5
  service_capacity_modeling/hardware/__init__.py,sha256=P5ostvoSOMUqPODtepeFYb4qfTVH0E73mMFraP49rYU,9196
6
6
  service_capacity_modeling/hardware/profiles/__init__.py,sha256=7-y3JbCBkgzaAjFla2RIymREcImdZ51HTl3yn3vzoGw,1602
7
7
  service_capacity_modeling/hardware/profiles/profiles.txt,sha256=tOfSR3B0E0uAOaXd5SLI3ioq83UYZ3yhK7UHhsK4awQ,49
@@ -53,7 +53,7 @@ service_capacity_modeling/models/org/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeu
53
53
  service_capacity_modeling/models/org/netflix/__init__.py,sha256=W6rKkhSdPhjD-awm7mYakAhw7VKLNJYkqv-U3LfkMew,2444
54
54
  service_capacity_modeling/models/org/netflix/aurora.py,sha256=Js33ZjxCtt34HiDPsWRT9mjKCAsnnCo9du15QArVFMo,13073
55
55
  service_capacity_modeling/models/org/netflix/cassandra.py,sha256=Fp37bHtWRJctJYYJhW78YbLeFXoX26QxREP_BMEItNE,39003
56
- service_capacity_modeling/models/org/netflix/counter.py,sha256=T-lBgxUMxZUojDyMJBR3HQI1u6fJujuPiQ6rGTZaMl4,9278
56
+ service_capacity_modeling/models/org/netflix/counter.py,sha256=XopKlNMdvO5EbxAggn-KW_q7L3aKtXLXbry4ocl6i5Q,10494
57
57
  service_capacity_modeling/models/org/netflix/crdb.py,sha256=iW7tyG8jpXhHIdXrw3DPYSHRAknPN42MlCRLJO4o9C8,20826
58
58
  service_capacity_modeling/models/org/netflix/ddb.py,sha256=9qRiuTqWev9zbYFFzewyowU7M41uALsuLklYx20yAXw,26502
59
59
  service_capacity_modeling/models/org/netflix/elasticsearch.py,sha256=zPrC6b2LNrAh3IWE3HCMUEYASacjYbHChbO4WZSMma4,25234
@@ -75,9 +75,9 @@ service_capacity_modeling/tools/auto_shape.py,sha256=Jx9H2ay9-H_kUDjtB141owQNxGF
75
75
  service_capacity_modeling/tools/fetch_pricing.py,sha256=Qp-XMymkY1dvtyS51RufmEpfgOHv-IQ-XyzS8wp2-qM,4021
76
76
  service_capacity_modeling/tools/generate_missing.py,sha256=F7YqvMJAV4nZc20GNrlIsnQSF8_77sLgwYZqc5k4LDg,3099
77
77
  service_capacity_modeling/tools/instance_families.py,sha256=e5RuYkCLUITvsAazDH12B6KjX_PaBsv6Ne3mj0HK_sQ,9223
78
- service_capacity_modeling-0.3.78.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
79
- service_capacity_modeling-0.3.78.dist-info/METADATA,sha256=p1ASGJf0gody4zS66LKxxJQH8wyXFcxWTSfpfVfDhME,10361
80
- service_capacity_modeling-0.3.78.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
81
- service_capacity_modeling-0.3.78.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
82
- service_capacity_modeling-0.3.78.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
83
- service_capacity_modeling-0.3.78.dist-info/RECORD,,
78
+ service_capacity_modeling-0.3.80.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
79
+ service_capacity_modeling-0.3.80.dist-info/METADATA,sha256=XBKHEm8Iy72isKJusZ0FifYh8KrWacjh45kd-oXp7m0,10361
80
+ service_capacity_modeling-0.3.80.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
81
+ service_capacity_modeling-0.3.80.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
82
+ service_capacity_modeling-0.3.80.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
83
+ service_capacity_modeling-0.3.80.dist-info/RECORD,,