service-capacity-modeling 0.3.99__tar.gz → 0.3.101__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/PKG-INFO +1 -1
  2. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/aurora.py +1 -1
  3. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/control.py +32 -5
  4. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/entity.py +35 -2
  5. service_capacity_modeling-0.3.101/service_capacity_modeling/tools/capture_baseline_costs.py +350 -0
  6. service_capacity_modeling-0.3.101/service_capacity_modeling/tools/data/__init__.py +0 -0
  7. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling.egg-info/PKG-INFO +1 -1
  8. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling.egg-info/SOURCES.txt +2 -0
  9. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/LICENSE +0 -0
  10. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/README.md +0 -0
  11. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/__init__.py +0 -0
  12. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/capacity_planner.py +0 -0
  13. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/enum_utils.py +0 -0
  14. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/__init__.py +0 -0
  15. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/__init__.py +0 -0
  16. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/pricing/aws/3yr-reserved_ec2.json +0 -0
  17. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/pricing/aws/3yr-reserved_rds.json +0 -0
  18. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/pricing/aws/3yr-reserved_zz-overrides.json +0 -0
  19. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/profiles.txt +0 -0
  20. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5.json +0 -0
  21. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5a.json +0 -0
  22. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5d.json +0 -0
  23. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c5n.json +0 -0
  24. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6a.json +0 -0
  25. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6i.json +0 -0
  26. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c6id.json +0 -0
  27. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c7a.json +0 -0
  28. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c7i.json +0 -0
  29. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_c8i.json +0 -0
  30. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_db_r6g.json +0 -0
  31. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_db_r6i.json +0 -0
  32. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_db_r7g.json +0 -0
  33. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_db_r7i.json +0 -0
  34. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_i3en.json +0 -0
  35. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_i4i.json +0 -0
  36. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_i7i.json +0 -0
  37. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m4.json +0 -0
  38. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m5.json +0 -0
  39. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m5n.json +0 -0
  40. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6a.json +0 -0
  41. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6i.json +0 -0
  42. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6id.json +0 -0
  43. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6idn.json +0 -0
  44. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m6in.json +0 -0
  45. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m7a.json +0 -0
  46. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m7i.json +0 -0
  47. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_m8i.json +0 -0
  48. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r4.json +0 -0
  49. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r5.json +0 -0
  50. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r5n.json +0 -0
  51. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6a.json +0 -0
  52. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6i.json +0 -0
  53. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6id.json +0 -0
  54. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6idn.json +0 -0
  55. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r6in.json +0 -0
  56. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r7a.json +0 -0
  57. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r7i.json +0 -0
  58. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/auto_r8i.json +0 -0
  59. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_drives.json +0 -0
  60. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_instances.json +0 -0
  61. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/hardware/profiles/shapes/aws/manual_services.json +0 -0
  62. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/interface.py +0 -0
  63. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/__init__.py +0 -0
  64. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/common.py +0 -0
  65. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/headroom_strategy.py +0 -0
  66. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/__init__.py +0 -0
  67. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/__init__.py +1 -1
  68. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/cassandra.py +0 -0
  69. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/counter.py +0 -0
  70. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/crdb.py +0 -0
  71. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/ddb.py +0 -0
  72. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/elasticsearch.py +0 -0
  73. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/evcache.py +0 -0
  74. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/graphkv.py +0 -0
  75. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/iso_date_math.py +0 -0
  76. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/kafka.py +0 -0
  77. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/key_value.py +0 -0
  78. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/postgres.py +0 -0
  79. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/rds.py +0 -0
  80. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/stateless_java.py +0 -0
  81. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/time_series.py +0 -0
  82. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/time_series_config.py +0 -0
  83. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/wal.py +0 -0
  84. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/org/netflix/zookeeper.py +0 -0
  85. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/models/utils.py +0 -0
  86. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/stats.py +0 -0
  87. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/tools/__init__.py +0 -0
  88. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/tools/auto_shape.py +0 -0
  89. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/tools/fetch_pricing.py +0 -0
  90. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/tools/generate_missing.py +0 -0
  91. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling/tools/instance_families.py +0 -0
  92. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling.egg-info/dependency_links.txt +0 -0
  93. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling.egg-info/entry_points.txt +0 -0
  94. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling.egg-info/requires.txt +0 -0
  95. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/service_capacity_modeling.egg-info/top_level.txt +0 -0
  96. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/setup.cfg +0 -0
  97. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/setup.py +0 -0
  98. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_arguments.py +0 -0
  99. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_buffers.py +0 -0
  100. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_common.py +0 -0
  101. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_desire_merge.py +0 -0
  102. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_enum_utils.py +0 -0
  103. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_generate_scenarios.py +0 -0
  104. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_hardware.py +0 -0
  105. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_hardware_shapes.py +0 -0
  106. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_headroom_strategy.py +0 -0
  107. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_io2.py +0 -0
  108. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_model_dump.py +0 -0
  109. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_reproducible.py +0 -0
  110. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_simulation.py +0 -0
  111. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_utils.py +0 -0
  112. {service_capacity_modeling-0.3.99 → service_capacity_modeling-0.3.101}/tests/test_working_set.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: service-capacity-modeling
3
- Version: 0.3.99
3
+ Version: 0.3.101
4
4
  Summary: Contains utilities for modeling capacity for pluggable workloads
5
5
  Author: Joseph Lynch
6
6
  Author-email: josephl@netflix.com
@@ -307,7 +307,7 @@ class NflxAuroraCapacityModel(CapacityModel):
307
307
 
308
308
  @staticmethod
309
309
  def allowed_platforms() -> Tuple[Platform, ...]:
310
- return Platform.aurora_mysql, Platform.aurora_mysql
310
+ return Platform.aurora_mysql, Platform.aurora_postgres
311
311
 
312
312
  @staticmethod
313
313
  def default_desires(
@@ -67,23 +67,50 @@ class NflxControlCapacityModel(CapacityModel):
67
67
  def compose_with(
68
68
  user_desires: CapacityDesires, extra_model_arguments: Dict[str, Any]
69
69
  ) -> Tuple[Tuple[str, Callable[[CapacityDesires], CapacityDesires]], ...]:
70
- def _modify_aurora_desires(
70
+ def _modify_rds_desires(
71
71
  user_desires: CapacityDesires,
72
72
  ) -> CapacityDesires:
73
+ """RDS proxy for Control service."""
73
74
  relaxed = user_desires.model_copy(deep=True)
74
75
 
75
- # Aurora doesn't support tier 0, so downgrade to tier 1
76
+ # RDS doesn't support tier 0
76
77
  if relaxed.service_tier == 0:
77
78
  relaxed.service_tier = 1
78
79
 
79
- # Control caches reads in memory, only writes go to Aurora
80
- # Set read QPS to minimal since Aurora only handles writes
80
+ # Control caches reads, so proxy only sees writes + minimal reads
81
+ relaxed.query_pattern.estimated_read_per_second = certain_int(1)
82
+ if relaxed.query_pattern.estimated_write_per_second:
83
+ relaxed.query_pattern.estimated_write_per_second = (
84
+ relaxed.query_pattern.estimated_write_per_second.scale(0.05)
85
+ )
86
+
87
+ # Minimal data footprint for connection metadata
88
+ relaxed.data_shape.estimated_state_size_gib = (
89
+ relaxed.data_shape.estimated_state_size_gib.scale(0.01)
90
+ )
91
+
92
+ return relaxed
93
+
94
+ def _modify_postgres_desires(
95
+ user_desires: CapacityDesires,
96
+ ) -> CapacityDesires:
97
+ relaxed = user_desires.model_copy(deep=True)
98
+
99
+ # Postgres doesn't support tier 0, so downgrade to tier 1
100
+ if relaxed.service_tier == 0:
101
+ relaxed.service_tier = 1
102
+
103
+ # Control caches reads in memory, only writes go to Postgres
104
+ # Set read QPS to minimal since Postgres only handles writes
81
105
  if relaxed.query_pattern.estimated_read_per_second:
82
106
  relaxed.query_pattern.estimated_read_per_second = certain_int(1)
83
107
 
84
108
  return relaxed
85
109
 
86
- return (("org.netflix.aurora", _modify_aurora_desires),)
110
+ return (
111
+ ("org.netflix.rds", _modify_rds_desires),
112
+ ("org.netflix.postgres", _modify_postgres_desires),
113
+ )
87
114
 
88
115
  @staticmethod
89
116
  def default_desires(
@@ -30,6 +30,10 @@ class NflxEntityCapacityModel(CapacityModel):
30
30
  desires: CapacityDesires,
31
31
  extra_model_arguments: Dict[str, Any],
32
32
  ) -> Optional[CapacityPlan]:
33
+ # Entity doesn't support tier 0
34
+ if desires.service_tier == 0:
35
+ return None
36
+
33
37
  # Entity wants 20GiB root volumes
34
38
  extra_model_arguments.setdefault("root_disk_gib", 20)
35
39
 
@@ -59,7 +63,35 @@ class NflxEntityCapacityModel(CapacityModel):
59
63
  def compose_with(
60
64
  user_desires: CapacityDesires, extra_model_arguments: Dict[str, Any]
61
65
  ) -> Tuple[Tuple[str, Callable[[CapacityDesires], CapacityDesires]], ...]:
62
- def _modify_crdb_desires(
66
+ def _modify_rds_desires(
67
+ user_desires: CapacityDesires,
68
+ ) -> CapacityDesires:
69
+ """RDS proxy handles connection pooling and auth translation.
70
+ Capacity needs are much lower than the backend database."""
71
+ relaxed = user_desires.model_copy(deep=True)
72
+
73
+ # RDS doesn't support tier 0
74
+ if relaxed.service_tier == 0:
75
+ relaxed.service_tier = 1
76
+
77
+ # Proxy layer sees ~5% of actual load due to connection pooling
78
+ if relaxed.query_pattern.estimated_read_per_second:
79
+ relaxed.query_pattern.estimated_read_per_second = (
80
+ relaxed.query_pattern.estimated_read_per_second.scale(0.05)
81
+ )
82
+ if relaxed.query_pattern.estimated_write_per_second:
83
+ relaxed.query_pattern.estimated_write_per_second = (
84
+ relaxed.query_pattern.estimated_write_per_second.scale(0.05)
85
+ )
86
+
87
+ # Proxy only needs to store connection metadata, not actual data
88
+ relaxed.data_shape.estimated_state_size_gib = (
89
+ relaxed.data_shape.estimated_state_size_gib.scale(0.01)
90
+ )
91
+
92
+ return relaxed
93
+
94
+ def _modify_postgres_desires(
63
95
  user_desires: CapacityDesires,
64
96
  ) -> CapacityDesires:
65
97
  relaxed = user_desires.model_copy(deep=True)
@@ -96,7 +128,8 @@ class NflxEntityCapacityModel(CapacityModel):
96
128
  return relaxed
97
129
 
98
130
  return (
99
- ("org.netflix.cockroachdb", _modify_crdb_desires),
131
+ ("org.netflix.rds", _modify_rds_desires),
132
+ ("org.netflix.postgres", _modify_postgres_desires),
100
133
  ("org.netflix.key-value", lambda x: x),
101
134
  ("org.netflix.elasticsearch", _modify_elasticsearch_desires),
102
135
  )
@@ -0,0 +1,350 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Capture current cost outputs for regression testing.
4
+
5
+ This script runs capacity planning for various scenarios and captures
6
+ the cost breakdowns to use as baselines for regression tests.
7
+
8
+ Usage:
9
+ python -m service_capacity_modeling.tools.capture_baseline_costs
10
+ """
11
+
12
+ import json
13
+ from pathlib import Path
14
+ from typing import Any
15
+
16
+ from service_capacity_modeling.capacity_planner import planner
17
+ from service_capacity_modeling.interface import (
18
+ AccessConsistency,
19
+ CapacityDesires,
20
+ certain_float,
21
+ certain_int,
22
+ Consistency,
23
+ DataShape,
24
+ GlobalConsistency,
25
+ Interval,
26
+ QueryPattern,
27
+ )
28
+
29
+
30
+ def capture_costs(
31
+ model_name: str,
32
+ region: str,
33
+ desires: CapacityDesires,
34
+ extra_args: dict[str, Any] | None = None,
35
+ scenario_name: str = "",
36
+ ) -> dict[str, Any]:
37
+ """Capture all cost breakdown for a planning scenario."""
38
+ try:
39
+ cap_plans = planner.plan_certain(
40
+ model_name=model_name,
41
+ region=region,
42
+ desires=desires,
43
+ num_results=1,
44
+ extra_model_arguments=extra_args or {},
45
+ )
46
+
47
+ if not cap_plans:
48
+ return {"error": "No capacity plans generated", "scenario": scenario_name}
49
+
50
+ cap_plan = cap_plans[0]
51
+ clusters = cap_plan.candidate_clusters
52
+
53
+ result = {
54
+ "scenario": scenario_name,
55
+ "model": model_name,
56
+ "region": region,
57
+ "service_tier": desires.service_tier,
58
+ "annual_costs": dict(
59
+ sorted((k, float(v)) for k, v in clusters.annual_costs.items())
60
+ ),
61
+ "total_annual_cost": float(clusters.total_annual_cost),
62
+ "cluster_count": len(clusters.zonal) + len(clusters.regional),
63
+ "service_count": len(clusters.services),
64
+ }
65
+
66
+ # Add instance info
67
+ if clusters.zonal:
68
+ result["instance_name"] = clusters.zonal[0].instance.name
69
+ result["instance_count"] = clusters.zonal[0].count
70
+ result["deployment"] = "zonal"
71
+ elif clusters.regional:
72
+ result["instance_name"] = clusters.regional[0].instance.name
73
+ result["instance_count"] = clusters.regional[0].count
74
+ result["deployment"] = "regional"
75
+
76
+ return result
77
+ except (ValueError, KeyError, AttributeError) as e:
78
+ return {"error": str(e), "scenario": scenario_name}
79
+
80
+
81
+ # Define test scenarios for each service
82
+ # Each scenario: (model_name, region, desires, extra_args, scenario_name)
83
+ scenarios: list[tuple[str, str, CapacityDesires, dict[str, Any] | None, str]] = []
84
+
85
+ # RDS scenarios
86
+ rds_small = CapacityDesires(
87
+ service_tier=1,
88
+ query_pattern=QueryPattern(
89
+ estimated_read_per_second=certain_int(200),
90
+ estimated_write_per_second=certain_int(100),
91
+ estimated_mean_read_latency_ms=certain_float(10),
92
+ estimated_mean_write_latency_ms=certain_float(10),
93
+ ),
94
+ data_shape=DataShape(
95
+ estimated_state_size_gib=certain_int(50),
96
+ ),
97
+ )
98
+
99
+ rds_tier3 = CapacityDesires(
100
+ service_tier=3,
101
+ query_pattern=QueryPattern(
102
+ estimated_read_per_second=certain_int(200),
103
+ estimated_write_per_second=certain_int(100),
104
+ estimated_mean_read_latency_ms=certain_float(20),
105
+ estimated_mean_write_latency_ms=certain_float(20),
106
+ ),
107
+ data_shape=DataShape(
108
+ estimated_state_size_gib=certain_int(200),
109
+ ),
110
+ )
111
+
112
+ scenarios.extend(
113
+ [
114
+ ("org.netflix.rds", "us-east-1", rds_small, None, "rds_small_tier1"),
115
+ ("org.netflix.rds", "us-east-1", rds_tier3, None, "rds_tier3"),
116
+ ]
117
+ )
118
+
119
+ # Aurora scenarios
120
+ aurora_small = CapacityDesires(
121
+ service_tier=1,
122
+ query_pattern=QueryPattern(
123
+ estimated_read_per_second=certain_int(100),
124
+ estimated_write_per_second=certain_int(100),
125
+ estimated_mean_read_latency_ms=certain_float(10),
126
+ estimated_mean_write_latency_ms=certain_float(10),
127
+ ),
128
+ data_shape=DataShape(
129
+ estimated_state_size_gib=certain_int(50),
130
+ ),
131
+ )
132
+
133
+ aurora_tier3 = CapacityDesires(
134
+ service_tier=3,
135
+ query_pattern=QueryPattern(
136
+ estimated_read_per_second=certain_int(200),
137
+ estimated_write_per_second=certain_int(100),
138
+ estimated_mean_read_latency_ms=certain_float(10),
139
+ estimated_mean_write_latency_ms=certain_float(10),
140
+ ),
141
+ data_shape=DataShape(
142
+ estimated_state_size_gib=certain_int(200),
143
+ ),
144
+ )
145
+
146
+ scenarios.extend(
147
+ [
148
+ ("org.netflix.aurora", "us-east-1", aurora_small, None, "aurora_small_tier1"),
149
+ ("org.netflix.aurora", "us-east-1", aurora_tier3, None, "aurora_tier3"),
150
+ ]
151
+ )
152
+
153
+ # Cassandra scenarios
154
+ cassandra_small_high_qps = CapacityDesires(
155
+ service_tier=1,
156
+ query_pattern=QueryPattern(
157
+ estimated_read_per_second=certain_int(100_000),
158
+ estimated_write_per_second=certain_int(100_000),
159
+ estimated_mean_read_latency_ms=certain_float(0.5),
160
+ estimated_mean_write_latency_ms=certain_float(0.4),
161
+ ),
162
+ data_shape=DataShape(
163
+ estimated_state_size_gib=certain_int(10),
164
+ ),
165
+ )
166
+
167
+ cassandra_high_writes = CapacityDesires(
168
+ service_tier=1,
169
+ query_pattern=QueryPattern(
170
+ estimated_read_per_second=certain_int(10_000),
171
+ estimated_write_per_second=certain_int(500_000),
172
+ ),
173
+ data_shape=DataShape(
174
+ estimated_state_size_gib=certain_int(300),
175
+ ),
176
+ )
177
+
178
+ scenarios.extend(
179
+ [
180
+ (
181
+ "org.netflix.cassandra",
182
+ "us-east-1",
183
+ cassandra_small_high_qps,
184
+ {"require_local_disks": True},
185
+ "cassandra_small_high_qps_local",
186
+ ),
187
+ (
188
+ "org.netflix.cassandra",
189
+ "us-east-1",
190
+ cassandra_high_writes,
191
+ {"require_local_disks": False, "copies_per_region": 2},
192
+ "cassandra_high_writes_ebs",
193
+ ),
194
+ ]
195
+ )
196
+
197
+ # Kafka scenarios - Kafka uses throughput-based sizing via write_size
198
+ # 100 MiB/s throughput with 2 consumers, 1 producer
199
+ throughput = 100 * 1024 * 1024 # 100 MiB/s
200
+ kafka_throughput = CapacityDesires(
201
+ service_tier=1,
202
+ query_pattern=QueryPattern(
203
+ estimated_read_per_second=Interval(low=1, mid=2, high=2, confidence=0.98),
204
+ estimated_write_per_second=Interval(low=1, mid=1, high=1, confidence=0.98),
205
+ estimated_mean_write_size_bytes=Interval(
206
+ low=throughput, mid=throughput, high=throughput * 2, confidence=0.98
207
+ ),
208
+ ),
209
+ )
210
+
211
+ scenarios.extend(
212
+ [
213
+ (
214
+ "org.netflix.kafka",
215
+ "us-east-1",
216
+ kafka_throughput,
217
+ {"require_local_disks": False},
218
+ "kafka_100mib_throughput",
219
+ ),
220
+ ]
221
+ )
222
+
223
+ # EVCache scenarios
224
+ # Tiny EVCache - small cluster to show spread cost (< 10 instances = spread penalty)
225
+ evcache_tiny = CapacityDesires(
226
+ service_tier=1,
227
+ query_pattern=QueryPattern(
228
+ estimated_read_per_second=certain_int(1_000),
229
+ estimated_write_per_second=certain_int(100),
230
+ estimated_mean_read_latency_ms=certain_float(1.0),
231
+ ),
232
+ data_shape=DataShape(
233
+ estimated_state_size_gib=certain_int(1),
234
+ estimated_state_item_count=Interval(
235
+ low=10_000, mid=100_000, high=200_000, confidence=0.98
236
+ ),
237
+ ),
238
+ )
239
+
240
+ evcache_small = CapacityDesires(
241
+ service_tier=1,
242
+ query_pattern=QueryPattern(
243
+ estimated_read_per_second=certain_int(100_000),
244
+ estimated_write_per_second=certain_int(10_000),
245
+ estimated_mean_read_latency_ms=certain_float(1.0),
246
+ ),
247
+ data_shape=DataShape(
248
+ estimated_state_size_gib=certain_int(10),
249
+ estimated_state_item_count=Interval(
250
+ low=1_000_000, mid=10_000_000, high=20_000_000, confidence=0.98
251
+ ),
252
+ ),
253
+ )
254
+
255
+ evcache_large = CapacityDesires(
256
+ service_tier=1,
257
+ query_pattern=QueryPattern(
258
+ estimated_read_per_second=certain_int(500_000),
259
+ estimated_write_per_second=certain_int(50_000),
260
+ estimated_mean_read_latency_ms=certain_float(1.0),
261
+ ),
262
+ data_shape=DataShape(
263
+ estimated_state_size_gib=certain_int(500),
264
+ estimated_state_item_count=Interval(
265
+ low=10_000_000, mid=100_000_000, high=200_000_000, confidence=0.98
266
+ ),
267
+ ),
268
+ )
269
+
270
+ scenarios.extend(
271
+ [
272
+ (
273
+ "org.netflix.evcache",
274
+ "us-east-1",
275
+ evcache_tiny,
276
+ {"cross_region_replication": "none"},
277
+ "evcache_tiny_with_spread",
278
+ ),
279
+ (
280
+ "org.netflix.evcache",
281
+ "us-east-1",
282
+ evcache_small,
283
+ {"cross_region_replication": "none"},
284
+ "evcache_small_no_replication",
285
+ ),
286
+ (
287
+ "org.netflix.evcache",
288
+ "us-east-1",
289
+ evcache_large,
290
+ {"cross_region_replication": "sets", "copies_per_region": 2},
291
+ "evcache_large_with_replication",
292
+ ),
293
+ ]
294
+ )
295
+
296
+ # Key-Value scenarios (composite: Cassandra + EVCache)
297
+ # Uses evcache_large desires with eventual consistency to enable caching layer
298
+ kv_with_cache = evcache_large.model_copy(deep=True)
299
+ kv_with_cache.query_pattern.access_consistency = GlobalConsistency(
300
+ same_region=Consistency(target_consistency=AccessConsistency.eventual),
301
+ cross_region=Consistency(target_consistency=AccessConsistency.best_effort),
302
+ )
303
+
304
+ scenarios.extend(
305
+ [
306
+ (
307
+ "org.netflix.key-value",
308
+ "us-east-1",
309
+ kv_with_cache,
310
+ None,
311
+ "kv_with_cache",
312
+ ),
313
+ ]
314
+ )
315
+
316
+ # Export as dict for tests to import (single source of truth)
317
+ SCENARIOS: dict[str, dict[str, Any]] = {
318
+ name: {
319
+ "model": model,
320
+ "region": region,
321
+ "desires": desires,
322
+ "extra_args": extra_args,
323
+ }
324
+ for model, region, desires, extra_args, name in scenarios
325
+ }
326
+
327
+
328
+ if __name__ == "__main__":
329
+ # Capture all scenarios
330
+ results = []
331
+ for model, region, desires, extra_args, scenario_name in scenarios:
332
+ print(f"Capturing: {scenario_name}...")
333
+ result = capture_costs(model, region, desires, extra_args, scenario_name)
334
+ results.append(result)
335
+
336
+ if "error" in result:
337
+ print(f" ERROR: {result['error']}")
338
+ else:
339
+ print(f" Total cost: ${result['total_annual_cost']:,.2f}")
340
+ print(f" Cost breakdown: {list(result['annual_costs'].keys())}")
341
+
342
+ # Save results
343
+ output_file = Path(__file__).parent / "data" / "baseline_costs.json"
344
+ with open(output_file, "w", encoding="utf-8") as f:
345
+ json.dump(results, f, indent=2, sort_keys=True)
346
+ f.write("\n") # Ensure trailing newline for pre-commit
347
+
348
+ print(f"\nResults saved to: {output_file}")
349
+ success_count = len([r for r in results if "error" not in r])
350
+ print(f"Total scenarios captured: {success_count}/{len(results)}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: service-capacity-modeling
3
- Version: 0.3.99
3
+ Version: 0.3.101
4
4
  Summary: Contains utilities for modeling capacity for pluggable workloads
5
5
  Author: Joseph Lynch
6
6
  Author-email: josephl@netflix.com
@@ -88,9 +88,11 @@ service_capacity_modeling/models/org/netflix/wal.py
88
88
  service_capacity_modeling/models/org/netflix/zookeeper.py
89
89
  service_capacity_modeling/tools/__init__.py
90
90
  service_capacity_modeling/tools/auto_shape.py
91
+ service_capacity_modeling/tools/capture_baseline_costs.py
91
92
  service_capacity_modeling/tools/fetch_pricing.py
92
93
  service_capacity_modeling/tools/generate_missing.py
93
94
  service_capacity_modeling/tools/instance_families.py
95
+ service_capacity_modeling/tools/data/__init__.py
94
96
  tests/test_arguments.py
95
97
  tests/test_buffers.py
96
98
  tests/test_common.py
@@ -33,7 +33,6 @@ def models() -> Dict[str, Any]:
33
33
  "org.netflix.counter": nflx_counter_capacity_model,
34
34
  "org.netflix.zookeeper": nflx_zookeeper_capacity_model,
35
35
  "org.netflix.evcache": nflx_evcache_capacity_model,
36
- "org.netflix.rds": nflx_rds_capacity_model,
37
36
  "org.netflix.elasticsearch": nflx_elasticsearch_capacity_model,
38
37
  "org.netflix.elasticsearch.node": nflx_elasticsearch_data_capacity_model,
39
38
  "org.netflix.elasticsearch.master": nflx_elasticsearch_master_capacity_model,
@@ -43,6 +42,7 @@ def models() -> Dict[str, Any]:
43
42
  "org.netflix.cockroachdb": nflx_cockroachdb_capacity_model,
44
43
  "org.netflix.aurora": nflx_aurora_capacity_model,
45
44
  "org.netflix.postgres": nflx_postgres_capacity_model,
45
+ "org.netflix.rds": nflx_rds_capacity_model,
46
46
  "org.netflix.kafka": nflx_kafka_capacity_model,
47
47
  "org.netflix.dynamodb": nflx_ddb_capacity_model,
48
48
  "org.netflix.wal": nflx_wal_capacity_model,