service-capacity-modeling 0.3.101__py3-none-any.whl → 0.3.103__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- service_capacity_modeling/models/org/netflix/control.py +1 -28
- service_capacity_modeling/models/org/netflix/entity.py +0 -29
- service_capacity_modeling/tools/capture_baseline_costs.py +33 -15
- {service_capacity_modeling-0.3.101.dist-info → service_capacity_modeling-0.3.103.dist-info}/METADATA +1 -1
- {service_capacity_modeling-0.3.101.dist-info → service_capacity_modeling-0.3.103.dist-info}/RECORD +9 -9
- {service_capacity_modeling-0.3.101.dist-info → service_capacity_modeling-0.3.103.dist-info}/WHEEL +1 -1
- {service_capacity_modeling-0.3.101.dist-info → service_capacity_modeling-0.3.103.dist-info}/entry_points.txt +0 -0
- {service_capacity_modeling-0.3.101.dist-info → service_capacity_modeling-0.3.103.dist-info}/licenses/LICENSE +0 -0
- {service_capacity_modeling-0.3.101.dist-info → service_capacity_modeling-0.3.103.dist-info}/top_level.txt +0 -0
|
@@ -67,30 +67,6 @@ class NflxControlCapacityModel(CapacityModel):
|
|
|
67
67
|
def compose_with(
|
|
68
68
|
user_desires: CapacityDesires, extra_model_arguments: Dict[str, Any]
|
|
69
69
|
) -> Tuple[Tuple[str, Callable[[CapacityDesires], CapacityDesires]], ...]:
|
|
70
|
-
def _modify_rds_desires(
|
|
71
|
-
user_desires: CapacityDesires,
|
|
72
|
-
) -> CapacityDesires:
|
|
73
|
-
"""RDS proxy for Control service."""
|
|
74
|
-
relaxed = user_desires.model_copy(deep=True)
|
|
75
|
-
|
|
76
|
-
# RDS doesn't support tier 0
|
|
77
|
-
if relaxed.service_tier == 0:
|
|
78
|
-
relaxed.service_tier = 1
|
|
79
|
-
|
|
80
|
-
# Control caches reads, so proxy only sees writes + minimal reads
|
|
81
|
-
relaxed.query_pattern.estimated_read_per_second = certain_int(1)
|
|
82
|
-
if relaxed.query_pattern.estimated_write_per_second:
|
|
83
|
-
relaxed.query_pattern.estimated_write_per_second = (
|
|
84
|
-
relaxed.query_pattern.estimated_write_per_second.scale(0.05)
|
|
85
|
-
)
|
|
86
|
-
|
|
87
|
-
# Minimal data footprint for connection metadata
|
|
88
|
-
relaxed.data_shape.estimated_state_size_gib = (
|
|
89
|
-
relaxed.data_shape.estimated_state_size_gib.scale(0.01)
|
|
90
|
-
)
|
|
91
|
-
|
|
92
|
-
return relaxed
|
|
93
|
-
|
|
94
70
|
def _modify_postgres_desires(
|
|
95
71
|
user_desires: CapacityDesires,
|
|
96
72
|
) -> CapacityDesires:
|
|
@@ -107,10 +83,7 @@ class NflxControlCapacityModel(CapacityModel):
|
|
|
107
83
|
|
|
108
84
|
return relaxed
|
|
109
85
|
|
|
110
|
-
return (
|
|
111
|
-
("org.netflix.rds", _modify_rds_desires),
|
|
112
|
-
("org.netflix.postgres", _modify_postgres_desires),
|
|
113
|
-
)
|
|
86
|
+
return (("org.netflix.postgres", _modify_postgres_desires),)
|
|
114
87
|
|
|
115
88
|
@staticmethod
|
|
116
89
|
def default_desires(
|
|
@@ -63,34 +63,6 @@ class NflxEntityCapacityModel(CapacityModel):
|
|
|
63
63
|
def compose_with(
|
|
64
64
|
user_desires: CapacityDesires, extra_model_arguments: Dict[str, Any]
|
|
65
65
|
) -> Tuple[Tuple[str, Callable[[CapacityDesires], CapacityDesires]], ...]:
|
|
66
|
-
def _modify_rds_desires(
|
|
67
|
-
user_desires: CapacityDesires,
|
|
68
|
-
) -> CapacityDesires:
|
|
69
|
-
"""RDS proxy handles connection pooling and auth translation.
|
|
70
|
-
Capacity needs are much lower than the backend database."""
|
|
71
|
-
relaxed = user_desires.model_copy(deep=True)
|
|
72
|
-
|
|
73
|
-
# RDS doesn't support tier 0
|
|
74
|
-
if relaxed.service_tier == 0:
|
|
75
|
-
relaxed.service_tier = 1
|
|
76
|
-
|
|
77
|
-
# Proxy layer sees ~5% of actual load due to connection pooling
|
|
78
|
-
if relaxed.query_pattern.estimated_read_per_second:
|
|
79
|
-
relaxed.query_pattern.estimated_read_per_second = (
|
|
80
|
-
relaxed.query_pattern.estimated_read_per_second.scale(0.05)
|
|
81
|
-
)
|
|
82
|
-
if relaxed.query_pattern.estimated_write_per_second:
|
|
83
|
-
relaxed.query_pattern.estimated_write_per_second = (
|
|
84
|
-
relaxed.query_pattern.estimated_write_per_second.scale(0.05)
|
|
85
|
-
)
|
|
86
|
-
|
|
87
|
-
# Proxy only needs to store connection metadata, not actual data
|
|
88
|
-
relaxed.data_shape.estimated_state_size_gib = (
|
|
89
|
-
relaxed.data_shape.estimated_state_size_gib.scale(0.01)
|
|
90
|
-
)
|
|
91
|
-
|
|
92
|
-
return relaxed
|
|
93
|
-
|
|
94
66
|
def _modify_postgres_desires(
|
|
95
67
|
user_desires: CapacityDesires,
|
|
96
68
|
) -> CapacityDesires:
|
|
@@ -128,7 +100,6 @@ class NflxEntityCapacityModel(CapacityModel):
|
|
|
128
100
|
return relaxed
|
|
129
101
|
|
|
130
102
|
return (
|
|
131
|
-
("org.netflix.rds", _modify_rds_desires),
|
|
132
103
|
("org.netflix.postgres", _modify_postgres_desires),
|
|
133
104
|
("org.netflix.key-value", lambda x: x),
|
|
134
105
|
("org.netflix.elasticsearch", _modify_elasticsearch_desires),
|
|
@@ -19,6 +19,7 @@ from service_capacity_modeling.interface import (
|
|
|
19
19
|
CapacityDesires,
|
|
20
20
|
certain_float,
|
|
21
21
|
certain_int,
|
|
22
|
+
ClusterCapacity,
|
|
22
23
|
Consistency,
|
|
23
24
|
DataShape,
|
|
24
25
|
GlobalConsistency,
|
|
@@ -27,6 +28,27 @@ from service_capacity_modeling.interface import (
|
|
|
27
28
|
)
|
|
28
29
|
|
|
29
30
|
|
|
31
|
+
def _format_cluster(cluster: ClusterCapacity, deployment: str) -> dict[str, Any]:
|
|
32
|
+
"""Format a single cluster's details."""
|
|
33
|
+
info: dict[str, Any] = {
|
|
34
|
+
"cluster_type": cluster.cluster_type,
|
|
35
|
+
"deployment": deployment,
|
|
36
|
+
"instance": cluster.instance.name,
|
|
37
|
+
"count": cluster.count,
|
|
38
|
+
"annual_cost": float(cluster.annual_cost),
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
# Add attached drives if present
|
|
42
|
+
if cluster.attached_drives:
|
|
43
|
+
drives = []
|
|
44
|
+
for drive in cluster.attached_drives:
|
|
45
|
+
size_gib = int(drive.size_gib) if drive.size_gib else 0
|
|
46
|
+
drives.append(f"{drive.name} : {size_gib}GB")
|
|
47
|
+
info["attached_drives"] = sorted(drives)
|
|
48
|
+
|
|
49
|
+
return info
|
|
50
|
+
|
|
51
|
+
|
|
30
52
|
def capture_costs(
|
|
31
53
|
model_name: str,
|
|
32
54
|
region: str,
|
|
@@ -48,31 +70,27 @@ def capture_costs(
|
|
|
48
70
|
return {"error": "No capacity plans generated", "scenario": scenario_name}
|
|
49
71
|
|
|
50
72
|
cap_plan = cap_plans[0]
|
|
51
|
-
|
|
73
|
+
candidate = cap_plan.candidate_clusters
|
|
74
|
+
|
|
75
|
+
# Build cluster details for each cluster
|
|
76
|
+
cluster_details = []
|
|
77
|
+
for zonal_cluster in candidate.zonal:
|
|
78
|
+
cluster_details.append(_format_cluster(zonal_cluster, "zonal"))
|
|
79
|
+
for regional_cluster in candidate.regional:
|
|
80
|
+
cluster_details.append(_format_cluster(regional_cluster, "regional"))
|
|
52
81
|
|
|
53
82
|
result = {
|
|
54
83
|
"scenario": scenario_name,
|
|
55
84
|
"model": model_name,
|
|
56
85
|
"region": region,
|
|
57
86
|
"service_tier": desires.service_tier,
|
|
87
|
+
"total_annual_cost": float(candidate.total_annual_cost),
|
|
88
|
+
"clusters": cluster_details,
|
|
58
89
|
"annual_costs": dict(
|
|
59
|
-
sorted((k, float(v)) for k, v in
|
|
90
|
+
sorted((k, float(v)) for k, v in candidate.annual_costs.items())
|
|
60
91
|
),
|
|
61
|
-
"total_annual_cost": float(clusters.total_annual_cost),
|
|
62
|
-
"cluster_count": len(clusters.zonal) + len(clusters.regional),
|
|
63
|
-
"service_count": len(clusters.services),
|
|
64
92
|
}
|
|
65
93
|
|
|
66
|
-
# Add instance info
|
|
67
|
-
if clusters.zonal:
|
|
68
|
-
result["instance_name"] = clusters.zonal[0].instance.name
|
|
69
|
-
result["instance_count"] = clusters.zonal[0].count
|
|
70
|
-
result["deployment"] = "zonal"
|
|
71
|
-
elif clusters.regional:
|
|
72
|
-
result["instance_name"] = clusters.regional[0].instance.name
|
|
73
|
-
result["instance_count"] = clusters.regional[0].count
|
|
74
|
-
result["deployment"] = "regional"
|
|
75
|
-
|
|
76
94
|
return result
|
|
77
95
|
except (ValueError, KeyError, AttributeError) as e:
|
|
78
96
|
return {"error": str(e), "scenario": scenario_name}
|
{service_capacity_modeling-0.3.101.dist-info → service_capacity_modeling-0.3.103.dist-info}/RECORD
RENAMED
|
@@ -59,12 +59,12 @@ service_capacity_modeling/models/org/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeu
|
|
|
59
59
|
service_capacity_modeling/models/org/netflix/__init__.py,sha256=keaBt7dk6DB2VuRINdo8wRfsobK655Gfw3hYjruacJs,2553
|
|
60
60
|
service_capacity_modeling/models/org/netflix/aurora.py,sha256=ZCileXosW1A8D9QfPf8mCu1BIt4xKvzGdqU5iW487Jg,13076
|
|
61
61
|
service_capacity_modeling/models/org/netflix/cassandra.py,sha256=6CV0UIWpK0lcfF9_-LGrUjDomysswCvoMYS3S-wpSOU,39627
|
|
62
|
-
service_capacity_modeling/models/org/netflix/control.py,sha256
|
|
62
|
+
service_capacity_modeling/models/org/netflix/control.py,sha256=4F9yw60mnOvLrhzRwJwk6kGDBB861a9GhPCNGcC9_Ho,5774
|
|
63
63
|
service_capacity_modeling/models/org/netflix/counter.py,sha256=kTDL7dCnkn-XU27_Z1VBc4CCLCPoOqJZe9WgcENHHd4,10517
|
|
64
64
|
service_capacity_modeling/models/org/netflix/crdb.py,sha256=iW7tyG8jpXhHIdXrw3DPYSHRAknPN42MlCRLJO4o9C8,20826
|
|
65
65
|
service_capacity_modeling/models/org/netflix/ddb.py,sha256=9qRiuTqWev9zbYFFzewyowU7M41uALsuLklYx20yAXw,26502
|
|
66
66
|
service_capacity_modeling/models/org/netflix/elasticsearch.py,sha256=zPrC6b2LNrAh3IWE3HCMUEYASacjYbHChbO4WZSMma4,25234
|
|
67
|
-
service_capacity_modeling/models/org/netflix/entity.py,sha256=
|
|
67
|
+
service_capacity_modeling/models/org/netflix/entity.py,sha256=CrexndRmoVA_082XYMIL9LVM13qqI8ILJUPbzH6uYZY,8858
|
|
68
68
|
service_capacity_modeling/models/org/netflix/evcache.py,sha256=BDVRWely3F_3Ecb3Um3dQ024_I6XgvagpRJ6zdP5E18,25687
|
|
69
69
|
service_capacity_modeling/models/org/netflix/graphkv.py,sha256=7ncEhx9lLsN_vGIKNHkvWfDdKffG7cYe91Wr-DB7IjU,8659
|
|
70
70
|
service_capacity_modeling/models/org/netflix/iso_date_math.py,sha256=oC5sgIXDqwOp6-5z2bdTkm-bJLlnzhqcONI_tspHjac,1137
|
|
@@ -79,14 +79,14 @@ service_capacity_modeling/models/org/netflix/wal.py,sha256=QtRlqP_AIVpTg-XEINAfv
|
|
|
79
79
|
service_capacity_modeling/models/org/netflix/zookeeper.py,sha256=T_CkmRqoEVqpERCFPU8xihyaxlNfUHDJXz7dMHM8GD0,7679
|
|
80
80
|
service_capacity_modeling/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
81
81
|
service_capacity_modeling/tools/auto_shape.py,sha256=K248-DayPrcZwLw1dYr47lpeQQwL0ylh1WAoVSdLNxw,23621
|
|
82
|
-
service_capacity_modeling/tools/capture_baseline_costs.py,sha256=
|
|
82
|
+
service_capacity_modeling/tools/capture_baseline_costs.py,sha256=X4uVRHidQOVgtBTTCGfwHkp_OPPce3UZLKzyHimzsUY,11250
|
|
83
83
|
service_capacity_modeling/tools/fetch_pricing.py,sha256=fO84h77cqiiIHF4hZt490RwbZ6JqjB45UsnPpV2AXD4,6122
|
|
84
84
|
service_capacity_modeling/tools/generate_missing.py,sha256=F7YqvMJAV4nZc20GNrlIsnQSF8_77sLgwYZqc5k4LDg,3099
|
|
85
85
|
service_capacity_modeling/tools/instance_families.py,sha256=e5RuYkCLUITvsAazDH12B6KjX_PaBsv6Ne3mj0HK_sQ,9223
|
|
86
86
|
service_capacity_modeling/tools/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
87
|
-
service_capacity_modeling-0.3.
|
|
88
|
-
service_capacity_modeling-0.3.
|
|
89
|
-
service_capacity_modeling-0.3.
|
|
90
|
-
service_capacity_modeling-0.3.
|
|
91
|
-
service_capacity_modeling-0.3.
|
|
92
|
-
service_capacity_modeling-0.3.
|
|
87
|
+
service_capacity_modeling-0.3.103.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
|
|
88
|
+
service_capacity_modeling-0.3.103.dist-info/METADATA,sha256=iPkZURUhQFYGLsQx5MdbadJFiEb_JEHnrClAJVkMPH8,10367
|
|
89
|
+
service_capacity_modeling-0.3.103.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
90
|
+
service_capacity_modeling-0.3.103.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
|
|
91
|
+
service_capacity_modeling-0.3.103.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
|
|
92
|
+
service_capacity_modeling-0.3.103.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|