service-capacity-modeling 0.3.61__tar.gz → 0.3.63__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of service-capacity-modeling might be problematic. Click here for more details.

Files changed (96) hide show
  1. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/PKG-INFO +18 -6
  2. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/README.md +17 -5
  3. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/capacity_planner.py +0 -1
  4. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/__init__.py +1 -2
  5. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/interface.py +1 -1
  6. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/common.py +2 -3
  7. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/cassandra.py +4 -5
  8. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/crdb.py +0 -1
  9. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/ddb.py +9 -9
  10. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/elasticsearch.py +0 -1
  11. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/evcache.py +0 -1
  12. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/graphkv.py +0 -1
  13. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/kafka.py +2 -6
  14. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/stateless_java.py +0 -1
  15. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/wal.py +0 -1
  16. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/zookeeper.py +0 -2
  17. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/tools/auto_shape.py +3 -3
  18. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/tools/fetch_pricing.py +0 -1
  19. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling.egg-info/PKG-INFO +18 -6
  20. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_hardware_shapes.py +3 -3
  21. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/LICENSE +0 -0
  22. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/__init__.py +0 -0
  23. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/__init__.py +0 -0
  24. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/pricing/aws/3yr-reserved_ec2.json +0 -0
  25. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/pricing/aws/3yr-reserved_zz-overrides.json +0 -0
  26. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/profiles.txt +0 -0
  27. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5.json +0 -0
  28. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5a.json +0 -0
  29. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5d.json +0 -0
  30. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5n.json +0 -0
  31. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6a.json +0 -0
  32. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6i.json +0 -0
  33. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6id.json +0 -0
  34. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c7a.json +0 -0
  35. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c7i.json +0 -0
  36. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m4.json +0 -0
  37. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m5.json +0 -0
  38. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m5n.json +0 -0
  39. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6a.json +0 -0
  40. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6i.json +0 -0
  41. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6id.json +0 -0
  42. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6idn.json +0 -0
  43. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6in.json +0 -0
  44. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m7a.json +0 -0
  45. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m7i.json +0 -0
  46. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r4.json +0 -0
  47. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r5.json +0 -0
  48. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r5n.json +0 -0
  49. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6a.json +0 -0
  50. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6i.json +0 -0
  51. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6id.json +0 -0
  52. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6idn.json +0 -0
  53. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6in.json +0 -0
  54. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r7a.json +0 -0
  55. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r7i.json +0 -0
  56. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_drives.json +0 -0
  57. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_instances.json +0 -0
  58. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_services.json +0 -0
  59. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/__init__.py +0 -0
  60. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/headroom_strategy.py +0 -0
  61. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/__init__.py +0 -0
  62. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/__init__.py +0 -0
  63. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/aurora.py +0 -0
  64. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/counter.py +0 -0
  65. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/entity.py +0 -0
  66. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/iso_date_math.py +0 -0
  67. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/key_value.py +0 -0
  68. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/postgres.py +0 -0
  69. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/rds.py +0 -0
  70. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/time_series.py +0 -0
  71. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/org/netflix/time_series_config.py +0 -0
  72. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/models/utils.py +0 -0
  73. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/stats.py +0 -0
  74. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/tools/__init__.py +0 -0
  75. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/tools/generate_missing.py +0 -0
  76. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling/tools/instance_families.py +0 -0
  77. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling.egg-info/SOURCES.txt +0 -0
  78. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling.egg-info/dependency_links.txt +0 -0
  79. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling.egg-info/entry_points.txt +0 -0
  80. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling.egg-info/requires.txt +0 -0
  81. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/service_capacity_modeling.egg-info/top_level.txt +0 -0
  82. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/setup.cfg +0 -0
  83. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/setup.py +0 -0
  84. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_arguments.py +0 -0
  85. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_buffers.py +0 -0
  86. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_common.py +0 -0
  87. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_desire_merge.py +0 -0
  88. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_generate_scenarios.py +0 -0
  89. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_hardware.py +0 -0
  90. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_headroom_strategy.py +0 -0
  91. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_io2.py +0 -0
  92. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_model_dump.py +0 -0
  93. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_reproducible.py +0 -0
  94. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_simulation.py +0 -0
  95. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_utils.py +0 -0
  96. {service_capacity_modeling-0.3.61 → service_capacity_modeling-0.3.63}/tests/test_working_set.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: service-capacity-modeling
3
- Version: 0.3.61
3
+ Version: 0.3.63
4
4
  Summary: Contains utilities for modeling capacity for pluggable workloads
5
5
  Author: Joseph Lynch
6
6
  Author-email: josephl@netflix.com
@@ -251,16 +251,28 @@ tox -e py38 -- -k test_<your_functionality> --pdb --pdbcls=IPython.terminal.debu
251
251
  ```
252
252
 
253
253
  ### Pre-commit / Linting
254
- To install the pre-commit linter
255
- ```
256
- pre-commit install
257
- ```
258
-
259
254
  To run the linting manually:
260
255
  ```
261
256
  tox -e pre-commit
262
257
  ```
263
258
 
259
+ ### Installing Pre-commit Hooks
260
+
261
+ This repository includes a custom pre-commit hook that runs all linting and formatting checks through the tox environment. To install it:
262
+
263
+ ```bash
264
+ # Install the custom pre-commit hook
265
+ tox -e install-hooks
266
+
267
+ # Or manually copy the hook
268
+ cp hooks/pre-commit .git/hooks/pre-commit
269
+ chmod +x .git/hooks/pre-commit
270
+ ```
271
+
272
+ The hook will automatically:
273
+ - Create the tox pre-commit environment if it doesn't exist
274
+ - Run all pre-commit checks (ruff, flake8, etc.)
275
+ - Ensure all code quality standards are met before commits
264
276
 
265
277
  ### PyCharm IDE Setup
266
278
  Use one of the test environments for IDE development, e.g. `tox -e py310` and then
@@ -221,16 +221,28 @@ tox -e py38 -- -k test_<your_functionality> --pdb --pdbcls=IPython.terminal.debu
221
221
  ```
222
222
 
223
223
  ### Pre-commit / Linting
224
- To install the pre-commit linter
225
- ```
226
- pre-commit install
227
- ```
228
-
229
224
  To run the linting manually:
230
225
  ```
231
226
  tox -e pre-commit
232
227
  ```
233
228
 
229
+ ### Installing Pre-commit Hooks
230
+
231
+ This repository includes a custom pre-commit hook that runs all linting and formatting checks through the tox environment. To install it:
232
+
233
+ ```bash
234
+ # Install the custom pre-commit hook
235
+ tox -e install-hooks
236
+
237
+ # Or manually copy the hook
238
+ cp hooks/pre-commit .git/hooks/pre-commit
239
+ chmod +x .git/hooks/pre-commit
240
+ ```
241
+
242
+ The hook will automatically:
243
+ - Create the tox pre-commit environment if it doesn't exist
244
+ - Run all pre-commit checks (ruff, flake8, etc.)
245
+ - Ensure all code quality standards are met before commits
234
246
 
235
247
  ### PyCharm IDE Setup
236
248
  Use one of the test environments for IDE development, e.g. `tox -e py310` and then
@@ -586,7 +586,6 @@ class CapacityPlanner:
586
586
 
587
587
  # Calculates the minimum cpu, memory, and network requirements based on desires.
588
588
  def _per_instance_requirements(self, desires) -> Tuple[int, float]:
589
-
590
589
  # Applications often set fixed reservations of heap or OS memory
591
590
  per_instance_mem = (
592
591
  desires.data_shape.reserved_instance_app_mem_gib
@@ -110,8 +110,7 @@ def merge_hardware(existing: Hardware, override: Hardware) -> Hardware:
110
110
  for shape in existing_keys | override_keys:
111
111
  if shape in existing_keys and shape in override_keys:
112
112
  raise ValueError(
113
- f"Duplicate shape {shape}! "
114
- "Only one file should contain a shape"
113
+ f"Duplicate shape {shape}! Only one file should contain a shape"
115
114
  )
116
115
  if shape not in existing_keys:
117
116
  merged_field[shape] = override_field[shape]
@@ -405,7 +405,7 @@ class Instance(ExcludeUnsetModel):
405
405
  self_dict = self.model_dump()
406
406
  other_dict = overrides.model_dump(exclude_unset=True)
407
407
 
408
- for (k, v) in other_dict.items():
408
+ for k, v in other_dict.items():
409
409
  # TODO we need a deep merge on drive (recursive merge)
410
410
  if k in ("platforms",):
411
411
  # Unique merge platforms
@@ -355,9 +355,8 @@ def compute_stateful_zone( # pylint: disable=too-many-positional-arguments
355
355
  # or less IOs for a given data size as well as space
356
356
  # Contract for disk ios is
357
357
  # (per_node_size_gib, node_count) -> (read_ios, write_ios)
358
- required_disk_ios: Callable[
359
- [float, int], Tuple[float, float]
360
- ] = lambda size_gib, count: (0, 0),
358
+ required_disk_ios: Callable[[float, int], Tuple[float, float]] = lambda size_gib,
359
+ count: (0, 0),
361
360
  required_disk_space: Callable[[float], float] = lambda size_gib: size_gib,
362
361
  # The maximum amount of state we can hold per node in the database
363
362
  # typically you don't want stateful systems going much higher than a
@@ -13,7 +13,6 @@ from service_capacity_modeling.interface import AccessConsistency
13
13
  from service_capacity_modeling.interface import AccessPattern
14
14
  from service_capacity_modeling.interface import Buffer
15
15
  from service_capacity_modeling.interface import BufferComponent
16
- from service_capacity_modeling.interface import BufferIntent
17
16
  from service_capacity_modeling.interface import Buffers
18
17
  from service_capacity_modeling.interface import CapacityDesires
19
18
  from service_capacity_modeling.interface import CapacityPlan
@@ -629,10 +628,10 @@ class NflxCassandraCapacityModel(CapacityModel):
629
628
  require_attached_disks: bool = extra_model_arguments.get(
630
629
  "require_attached_disks", False
631
630
  )
632
- required_cluster_size: Optional[
633
- int
634
- ] = NflxCassandraCapacityModel.get_required_cluster_size(
635
- desires.service_tier, extra_model_arguments
631
+ required_cluster_size: Optional[int] = (
632
+ NflxCassandraCapacityModel.get_required_cluster_size(
633
+ desires.service_tier, extra_model_arguments
634
+ )
636
635
  )
637
636
 
638
637
  max_rps_to_disk: int = extra_model_arguments.get("max_rps_to_disk", 500)
@@ -143,7 +143,6 @@ def _estimate_cockroachdb_cluster_zonal( # noqa=E501 pylint: disable=too-many-p
143
143
  min_vcpu_per_instance: int = 4,
144
144
  license_fee_per_core: float = 0.0,
145
145
  ) -> Optional[CapacityPlan]:
146
-
147
146
  if instance.cpu < min_vcpu_per_instance:
148
147
  return None
149
148
 
@@ -191,9 +191,9 @@ def _get_write_consistency_percentages(
191
191
  transactional_write_percent = 0.0
192
192
  non_transactional_write_percent = 1.0
193
193
  total_percent = transactional_write_percent + non_transactional_write_percent
194
- assert (
195
- total_percent == 1
196
- ), "transactional_write_percent, non_transactional_write_percent should sum to 1"
194
+ assert total_percent == 1, (
195
+ "transactional_write_percent, non_transactional_write_percent should sum to 1"
196
+ )
197
197
  return {
198
198
  "transactional_write_percent": transactional_write_percent,
199
199
  "non_transactional_write_percent": non_transactional_write_percent,
@@ -452,9 +452,9 @@ class NflxDynamoDBCapacityModel(CapacityModel):
452
452
  "data_transfer_gib": data_transfer_plan.total_data_transfer_gib,
453
453
  "target_utilization_percentage": target_util_percentage,
454
454
  }
455
- requirement_context[
456
- "replicated_write_capacity_units"
457
- ] = write_plan.replicated_write_capacity_units
455
+ requirement_context["replicated_write_capacity_units"] = (
456
+ write_plan.replicated_write_capacity_units
457
+ )
458
458
 
459
459
  dynamo_costs = {
460
460
  "dynamo.regional-writes": write_plan.total_annual_write_cost,
@@ -462,9 +462,9 @@ class NflxDynamoDBCapacityModel(CapacityModel):
462
462
  "dynamo.regional-storage": storage_plan.total_annual_data_storage_cost,
463
463
  }
464
464
 
465
- dynamo_costs[
466
- "dynamo.regional-transfer"
467
- ] = data_transfer_plan.total_annual_data_transfer_cost
465
+ dynamo_costs["dynamo.regional-transfer"] = (
466
+ data_transfer_plan.total_annual_data_transfer_cost
467
+ )
468
468
 
469
469
  dynamo_costs["dynamo.data-backup"] = backup_plan.total_annual_backup_cost
470
470
 
@@ -184,7 +184,6 @@ class NflxElasticsearchDataCapacityModel(CapacityModel):
184
184
  desires: CapacityDesires,
185
185
  extra_model_arguments: Dict[str, Any],
186
186
  ) -> Optional[CapacityPlan]:
187
-
188
187
  copies_per_region: int = _target_rf(
189
188
  desires, extra_model_arguments.get("copies_per_region", None)
190
189
  )
@@ -217,7 +217,6 @@ def _estimate_evcache_cluster_zonal( # noqa: C901,E501 pylint: disable=too-many
217
217
  min_instance_memory_gib: int = 12,
218
218
  cross_region_replication: Replication = Replication.none,
219
219
  ) -> Optional[CapacityPlan]:
220
-
221
220
  # EVCache doesn't like to deploy on single CPU instances
222
221
  if instance.cpu < 2:
223
222
  return None
@@ -30,7 +30,6 @@ class NflxGraphKVCapacityModel(CapacityModel):
30
30
  desires: CapacityDesires,
31
31
  extra_model_arguments: Dict[str, Any],
32
32
  ) -> Optional[CapacityPlan]:
33
-
34
33
  graphkv_app = nflx_java_app_capacity_model.capacity_plan(
35
34
  instance=instance,
36
35
  drive=drive,
@@ -110,10 +110,8 @@ def _estimate_kafka_requirement( # pylint: disable=too-many-positional-argument
110
110
  (write_mib_per_second * MIB_IN_BYTES) * copies_per_region
111
111
  ) / MEGABIT_IN_BYTES
112
112
  bw_out = (
113
- (
114
- (read_mib_per_second * MIB_IN_BYTES)
115
- + ((write_mib_per_second * MIB_IN_BYTES) * (copies_per_region - 1))
116
- )
113
+ (read_mib_per_second * MIB_IN_BYTES)
114
+ + ((write_mib_per_second * MIB_IN_BYTES) * (copies_per_region - 1))
117
115
  ) / MEGABIT_IN_BYTES
118
116
  if (
119
117
  current_zonal_capacity
@@ -252,7 +250,6 @@ def _estimate_kafka_cluster_zonal( # noqa: C901
252
250
  min_instance_memory_gib: int = 12,
253
251
  require_same_instance_family: bool = True,
254
252
  ) -> Optional[CapacityPlan]:
255
-
256
253
  # Kafka doesn't like to deploy on single CPU instances or with < 12 GiB of ram
257
254
  if instance.cpu < min_instance_cpu or instance.ram_gib < min_instance_memory_gib:
258
255
  return None
@@ -469,7 +466,6 @@ class NflxKafkaArguments(BaseModel):
469
466
 
470
467
 
471
468
  class NflxKafkaCapacityModel(CapacityModel):
472
-
473
469
  HA_DEFAULT_REPLICATION_FACTOR = 2
474
470
  SC_DEFAULT_REPLICATION_FACTOR = 3
475
471
 
@@ -91,7 +91,6 @@ def _estimate_java_app_region( # pylint: disable=too-many-positional-arguments
91
91
  failover: bool = True,
92
92
  jvm_memory_overhead: float = 2,
93
93
  ) -> Optional[CapacityPlan]:
94
-
95
94
  if drive.name != "gp2":
96
95
  return None
97
96
 
@@ -30,7 +30,6 @@ class NflxWALCapacityModel(CapacityModel):
30
30
  desires: CapacityDesires,
31
31
  extra_model_arguments: Dict[str, Any],
32
32
  ) -> Optional[CapacityPlan]:
33
-
34
33
  wal_app = nflx_java_app_capacity_model.capacity_plan(
35
34
  instance=instance,
36
35
  drive=drive,
@@ -35,7 +35,6 @@ def _zk_requirement(
35
35
  heap_overhead: float,
36
36
  disk_overhead: float,
37
37
  ) -> Optional[CapacityRequirement]:
38
-
39
38
  # We only deploy Zookeeper to fast ephemeral storage
40
39
  # Due to fsync latency to the disk.
41
40
  if instance.drive is None:
@@ -94,7 +93,6 @@ class NflxZookeeperCapacityModel(CapacityModel):
94
93
  desires: CapacityDesires,
95
94
  extra_model_arguments: Dict[str, Any],
96
95
  ) -> Optional[CapacityPlan]:
97
-
98
96
  # We only deploy Zookeeper to 3 zone regions at this time
99
97
  if context.zones_in_region != 3:
100
98
  return None
@@ -178,9 +178,9 @@ def pull_family(
178
178
  # The biggest size drive is the single tenant one
179
179
  disk_max_size = max(disk_max_size, int(disk["SizeInGB"]))
180
180
 
181
- instance_jsons_dict[
182
- instance_type_json["InstanceType"].split(".")[1]
183
- ] = instance_type_json
181
+ instance_jsons_dict[instance_type_json["InstanceType"].split(".")[1]] = (
182
+ instance_type_json
183
+ )
184
184
 
185
185
  if disk_type.name.startswith("local"):
186
186
  if (
@@ -22,7 +22,6 @@ def extract_3yr_upfront_price(price_data):
22
22
  and term_attrs.get("PurchaseOption") == "All Upfront"
23
23
  and term_attrs.get("OfferingClass") == "standard"
24
24
  ):
25
-
26
25
  # Get upfront fee
27
26
  for dim in term["priceDimensions"].values():
28
27
  if dim["unit"] == "Quantity":
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: service-capacity-modeling
3
- Version: 0.3.61
3
+ Version: 0.3.63
4
4
  Summary: Contains utilities for modeling capacity for pluggable workloads
5
5
  Author: Joseph Lynch
6
6
  Author-email: josephl@netflix.com
@@ -251,16 +251,28 @@ tox -e py38 -- -k test_<your_functionality> --pdb --pdbcls=IPython.terminal.debu
251
251
  ```
252
252
 
253
253
  ### Pre-commit / Linting
254
- To install the pre-commit linter
255
- ```
256
- pre-commit install
257
- ```
258
-
259
254
  To run the linting manually:
260
255
  ```
261
256
  tox -e pre-commit
262
257
  ```
263
258
 
259
+ ### Installing Pre-commit Hooks
260
+
261
+ This repository includes a custom pre-commit hook that runs all linting and formatting checks through the tox environment. To install it:
262
+
263
+ ```bash
264
+ # Install the custom pre-commit hook
265
+ tox -e install-hooks
266
+
267
+ # Or manually copy the hook
268
+ cp hooks/pre-commit .git/hooks/pre-commit
269
+ chmod +x .git/hooks/pre-commit
270
+ ```
271
+
272
+ The hook will automatically:
273
+ - Create the tox pre-commit environment if it doesn't exist
274
+ - Run all pre-commit checks (ruff, flake8, etc.)
275
+ - Ensure all code quality standards are met before commits
264
276
 
265
277
  ### PyCharm IDE Setup
266
278
  Use one of the test environments for IDE development, e.g. `tox -e py310` and then
@@ -120,9 +120,9 @@ def test_performance_increases_with_generation() -> None:
120
120
  + f" vs {next_inst} perf={next_perf}"
121
121
  )
122
122
 
123
- assert (
124
- len(failed_msgs) == 0
125
- ), f"Not all generations passed the performance test, {failed_msgs}."
123
+ assert len(failed_msgs) == 0, (
124
+ f"Not all generations passed the performance test, {failed_msgs}."
125
+ )
126
126
 
127
127
 
128
128
  def test_memory_proportional_to_cpu() -> None: