service-capacity-modeling 0.3.88__py3-none-any.whl → 0.3.90__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- service_capacity_modeling/enum_utils.py +42 -0
- service_capacity_modeling/interface.py +9 -12
- service_capacity_modeling/models/org/netflix/cassandra.py +50 -38
- service_capacity_modeling/models/org/netflix/counter.py +6 -6
- service_capacity_modeling/models/org/netflix/evcache.py +2 -2
- service_capacity_modeling/models/org/netflix/kafka.py +2 -2
- {service_capacity_modeling-0.3.88.dist-info → service_capacity_modeling-0.3.90.dist-info}/METADATA +1 -1
- {service_capacity_modeling-0.3.88.dist-info → service_capacity_modeling-0.3.90.dist-info}/RECORD +12 -12
- {service_capacity_modeling-0.3.88.dist-info → service_capacity_modeling-0.3.90.dist-info}/WHEEL +0 -0
- {service_capacity_modeling-0.3.88.dist-info → service_capacity_modeling-0.3.90.dist-info}/entry_points.txt +0 -0
- {service_capacity_modeling-0.3.88.dist-info → service_capacity_modeling-0.3.90.dist-info}/licenses/LICENSE +0 -0
- {service_capacity_modeling-0.3.88.dist-info → service_capacity_modeling-0.3.90.dist-info}/top_level.txt +0 -0
|
@@ -5,6 +5,7 @@ runtime-accessible docstrings.
|
|
|
5
5
|
|
|
6
6
|
import ast
|
|
7
7
|
import inspect
|
|
8
|
+
import sys
|
|
8
9
|
from enum import Enum
|
|
9
10
|
from functools import partial
|
|
10
11
|
from operator import is_
|
|
@@ -16,6 +17,47 @@ from pydantic.json_schema import JsonSchemaValue
|
|
|
16
17
|
from pydantic_core import CoreSchema
|
|
17
18
|
|
|
18
19
|
|
|
20
|
+
__all__ = ["StrEnum", "enum_docstrings"]
|
|
21
|
+
|
|
22
|
+
# StrEnum backport for Python 3.10 compatibility
|
|
23
|
+
# On Python 3.11+, use the stdlib version
|
|
24
|
+
if sys.version_info >= (3, 11):
|
|
25
|
+
from enum import StrEnum as StrEnum # pylint: disable=useless-import-alias
|
|
26
|
+
else:
|
|
27
|
+
|
|
28
|
+
class StrEnum(str, Enum):
|
|
29
|
+
"""Backport of Python 3.11 StrEnum.
|
|
30
|
+
|
|
31
|
+
Provides consistent string behavior across all Python versions:
|
|
32
|
+
- f"{x}" returns the value (not "Foo.BAR")
|
|
33
|
+
- str(x) returns the value (not "Foo.BAR")
|
|
34
|
+
- x == "value" returns True (string comparison works)
|
|
35
|
+
|
|
36
|
+
This addresses PEP 663 which changed str(Enum) behavior in Python 3.11,
|
|
37
|
+
making (str, Enum) return "Foo.BAR" in f-strings instead of the value.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __new__(cls, value: str, *args: Any, **kwargs: Any) -> "StrEnum":
|
|
41
|
+
if not isinstance(value, str):
|
|
42
|
+
raise TypeError(f"{value!r} is not a string")
|
|
43
|
+
member = str.__new__(cls, value)
|
|
44
|
+
member._value_ = value
|
|
45
|
+
return member
|
|
46
|
+
|
|
47
|
+
def __str__(self) -> str:
|
|
48
|
+
return str(self.value)
|
|
49
|
+
|
|
50
|
+
def __format__(self, format_spec: str) -> str:
|
|
51
|
+
# Ensures f-strings return value, not "Foo.BAR"
|
|
52
|
+
return str(self.value).__format__(format_spec)
|
|
53
|
+
|
|
54
|
+
@staticmethod
|
|
55
|
+
def _generate_next_value_(
|
|
56
|
+
name: str, start: int, count: int, last_values: list[str]
|
|
57
|
+
) -> str:
|
|
58
|
+
return name.lower()
|
|
59
|
+
|
|
60
|
+
|
|
19
61
|
E = TypeVar("E", bound=Enum)
|
|
20
62
|
|
|
21
63
|
|
|
@@ -4,7 +4,6 @@ from __future__ import annotations
|
|
|
4
4
|
import re
|
|
5
5
|
import sys
|
|
6
6
|
from decimal import Decimal
|
|
7
|
-
from enum import Enum
|
|
8
7
|
from fractions import Fraction
|
|
9
8
|
from functools import lru_cache
|
|
10
9
|
from typing import Any
|
|
@@ -22,6 +21,7 @@ from pydantic import ConfigDict
|
|
|
22
21
|
from pydantic import Field
|
|
23
22
|
|
|
24
23
|
from service_capacity_modeling.enum_utils import enum_docstrings
|
|
24
|
+
from service_capacity_modeling.enum_utils import StrEnum
|
|
25
25
|
|
|
26
26
|
GIB_IN_BYTES = 1024 * 1024 * 1024
|
|
27
27
|
MIB_IN_BYTES = 1024 * 1024
|
|
@@ -46,16 +46,13 @@ class ExcludeUnsetModel(BaseModel):
|
|
|
46
46
|
|
|
47
47
|
|
|
48
48
|
@enum_docstrings
|
|
49
|
-
class IntervalModel(
|
|
49
|
+
class IntervalModel(StrEnum):
|
|
50
50
|
"""Statistical distribution models for approximating intervals
|
|
51
51
|
|
|
52
52
|
When we have uncertainty intervals (low, mid, high), we need to choose
|
|
53
53
|
a probability distribution to model that uncertainty for simulation purposes.
|
|
54
54
|
"""
|
|
55
55
|
|
|
56
|
-
def __str__(self) -> str:
|
|
57
|
-
return str(self.value)
|
|
58
|
-
|
|
59
56
|
def __repr__(self) -> str:
|
|
60
57
|
return f"D({self.value})"
|
|
61
58
|
|
|
@@ -204,7 +201,7 @@ def normalized_aws_size(name: str) -> Fraction:
|
|
|
204
201
|
###############################################################################
|
|
205
202
|
|
|
206
203
|
|
|
207
|
-
class Lifecycle(
|
|
204
|
+
class Lifecycle(StrEnum):
|
|
208
205
|
"""Represents the lifecycle of hardware from initial preview
|
|
209
206
|
to end-of-life.
|
|
210
207
|
|
|
@@ -223,7 +220,7 @@ class Lifecycle(str, Enum):
|
|
|
223
220
|
|
|
224
221
|
|
|
225
222
|
@enum_docstrings
|
|
226
|
-
class DriveType(
|
|
223
|
+
class DriveType(StrEnum):
|
|
227
224
|
"""Represents the type and attachment model of storage drives
|
|
228
225
|
|
|
229
226
|
Drives can be either local (ephemeral, instance-attached) or network-attached
|
|
@@ -366,7 +363,7 @@ class Drive(ExcludeUnsetModel):
|
|
|
366
363
|
|
|
367
364
|
|
|
368
365
|
@enum_docstrings
|
|
369
|
-
class Platform(
|
|
366
|
+
class Platform(StrEnum):
|
|
370
367
|
"""Represents the CPU architecture or managed service platform
|
|
371
368
|
|
|
372
369
|
Hardware can run on different CPU architectures (x86_64, ARM) or be a fully
|
|
@@ -617,7 +614,7 @@ class Pricing(ExcludeUnsetModel):
|
|
|
617
614
|
|
|
618
615
|
|
|
619
616
|
@enum_docstrings
|
|
620
|
-
class AccessPattern(
|
|
617
|
+
class AccessPattern(StrEnum):
|
|
621
618
|
"""The access pattern determines capacity planning priorities: latency-sensitive
|
|
622
619
|
services target low P99 latency, while throughput-oriented services optimize
|
|
623
620
|
for maximum requests per second.
|
|
@@ -633,7 +630,7 @@ class AccessPattern(str, Enum):
|
|
|
633
630
|
|
|
634
631
|
|
|
635
632
|
@enum_docstrings
|
|
636
|
-
class AccessConsistency(
|
|
633
|
+
class AccessConsistency(StrEnum):
|
|
637
634
|
"""
|
|
638
635
|
Generally speaking consistency is expensive, so models need to know what
|
|
639
636
|
kind of consistency will be required in order to estimate CPU usage
|
|
@@ -851,7 +848,7 @@ class CurrentClusters(ExcludeUnsetModel):
|
|
|
851
848
|
|
|
852
849
|
|
|
853
850
|
@enum_docstrings
|
|
854
|
-
class BufferComponent(
|
|
851
|
+
class BufferComponent(StrEnum):
|
|
855
852
|
"""Represents well known buffer components such as compute and storage
|
|
856
853
|
|
|
857
854
|
Note that while these are common and defined here for models to share,
|
|
@@ -887,7 +884,7 @@ class BufferComponent(str, Enum):
|
|
|
887
884
|
|
|
888
885
|
|
|
889
886
|
@enum_docstrings
|
|
890
|
-
class BufferIntent(
|
|
887
|
+
class BufferIntent(StrEnum):
|
|
891
888
|
"""Defines the intent of buffer directives for capacity planning"""
|
|
892
889
|
|
|
893
890
|
desired = "desired"
|
|
@@ -638,8 +638,14 @@ def _target_rf(desires: CapacityDesires, user_copies: Optional[int]) -> int:
|
|
|
638
638
|
|
|
639
639
|
|
|
640
640
|
class NflxCassandraArguments(BaseModel):
|
|
641
|
-
|
|
642
|
-
|
|
641
|
+
"""Configuration arguments for the Netflix Cassandra capacity model.
|
|
642
|
+
|
|
643
|
+
This model centralizes all tunable parameters with their defaults.
|
|
644
|
+
Use `from_extra_model_arguments()` to parse a dict into a validated instance.
|
|
645
|
+
"""
|
|
646
|
+
|
|
647
|
+
copies_per_region: Optional[int] = Field(
|
|
648
|
+
default=None,
|
|
643
649
|
description="How many copies of the data will exist e.g. RF=3. If unsupplied"
|
|
644
650
|
" this will be deduced from durability and consistency desires",
|
|
645
651
|
)
|
|
@@ -663,9 +669,13 @@ class NflxCassandraArguments(BaseModel):
|
|
|
663
669
|
default=192,
|
|
664
670
|
description="What is the maximum size of a cluster in this region",
|
|
665
671
|
)
|
|
666
|
-
|
|
667
|
-
default=
|
|
668
|
-
description="
|
|
672
|
+
max_local_data_per_node_gib: int = Field(
|
|
673
|
+
default=1280,
|
|
674
|
+
description="Maximum data per node for local disk instances (GiB)",
|
|
675
|
+
)
|
|
676
|
+
max_attached_data_per_node_gib: int = Field(
|
|
677
|
+
default=2048,
|
|
678
|
+
description="Maximum data per node for attached disk instances (GiB)",
|
|
669
679
|
)
|
|
670
680
|
max_write_buffer_percent: float = Field(
|
|
671
681
|
default=0.25,
|
|
@@ -680,6 +690,26 @@ class NflxCassandraArguments(BaseModel):
|
|
|
680
690
|
"automatically adjust to 0.2",
|
|
681
691
|
)
|
|
682
692
|
|
|
693
|
+
@classmethod
|
|
694
|
+
def from_extra_model_arguments(
|
|
695
|
+
cls, extra_model_arguments: Dict[str, Any]
|
|
696
|
+
) -> "NflxCassandraArguments":
|
|
697
|
+
"""Parse extra_model_arguments dict into a validated NflxCassandraArguments.
|
|
698
|
+
|
|
699
|
+
This centralizes default values - any field not in extra_model_arguments
|
|
700
|
+
will use the default defined in this model.
|
|
701
|
+
|
|
702
|
+
Handles legacy field name mappings:
|
|
703
|
+
- max_local_disk_gib -> max_local_data_per_node_gib (if not explicitly set)
|
|
704
|
+
"""
|
|
705
|
+
# Handle legacy field name: max_local_disk_gib -> max_local_data_per_node_gib
|
|
706
|
+
args = dict(extra_model_arguments)
|
|
707
|
+
if "max_local_data_per_node_gib" not in args and "max_local_disk_gib" in args:
|
|
708
|
+
args["max_local_data_per_node_gib"] = args["max_local_disk_gib"]
|
|
709
|
+
|
|
710
|
+
# Pydantic will use defaults for any missing fields
|
|
711
|
+
return cls.model_validate(args)
|
|
712
|
+
|
|
683
713
|
|
|
684
714
|
class NflxCassandraCapacityModel(CapacityModel):
|
|
685
715
|
def __init__(self) -> None:
|
|
@@ -722,40 +752,22 @@ class NflxCassandraCapacityModel(CapacityModel):
|
|
|
722
752
|
desires: CapacityDesires,
|
|
723
753
|
extra_model_arguments: Dict[str, Any],
|
|
724
754
|
) -> Optional[CapacityPlan]:
|
|
725
|
-
#
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
#
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
desires, extra_model_arguments.get("copies_per_region", None)
|
|
733
|
-
)
|
|
734
|
-
require_local_disks: bool = extra_model_arguments.get(
|
|
735
|
-
"require_local_disks", False
|
|
736
|
-
)
|
|
737
|
-
require_attached_disks: bool = extra_model_arguments.get(
|
|
738
|
-
"require_attached_disks", False
|
|
739
|
-
)
|
|
755
|
+
# Parse extra_model_arguments into a validated model with centralized defaults
|
|
756
|
+
args = NflxCassandraArguments.from_extra_model_arguments(extra_model_arguments)
|
|
757
|
+
|
|
758
|
+
# Use durability and consistency to compute RF if not explicitly set
|
|
759
|
+
copies_per_region = _target_rf(desires, args.copies_per_region)
|
|
760
|
+
|
|
761
|
+
# Validate required_cluster_size for critical tiers
|
|
740
762
|
required_cluster_size: Optional[int] = (
|
|
741
763
|
NflxCassandraCapacityModel.get_required_cluster_size(
|
|
742
764
|
desires.service_tier, extra_model_arguments
|
|
743
765
|
)
|
|
744
766
|
)
|
|
745
767
|
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
"max_local_data_per_node_gib",
|
|
750
|
-
extra_model_arguments.get("max_local_disk_gib", 1280),
|
|
751
|
-
)
|
|
752
|
-
|
|
753
|
-
max_write_buffer_percent: float = min(
|
|
754
|
-
0.5, extra_model_arguments.get("max_write_buffer_percent", 0.25)
|
|
755
|
-
)
|
|
756
|
-
max_table_buffer_percent: float = min(
|
|
757
|
-
0.5, extra_model_arguments.get("max_table_buffer_percent", 0.11)
|
|
758
|
-
)
|
|
768
|
+
# Apply caps to buffer percentages
|
|
769
|
+
max_write_buffer_percent = min(0.5, args.max_write_buffer_percent)
|
|
770
|
+
max_table_buffer_percent = min(0.5, args.max_table_buffer_percent)
|
|
759
771
|
|
|
760
772
|
# Adjust heap defaults for high write clusters
|
|
761
773
|
if (
|
|
@@ -772,12 +784,12 @@ class NflxCassandraCapacityModel(CapacityModel):
|
|
|
772
784
|
desires=desires,
|
|
773
785
|
zones_per_region=context.zones_in_region,
|
|
774
786
|
copies_per_region=copies_per_region,
|
|
775
|
-
require_local_disks=require_local_disks,
|
|
776
|
-
require_attached_disks=require_attached_disks,
|
|
787
|
+
require_local_disks=args.require_local_disks,
|
|
788
|
+
require_attached_disks=args.require_attached_disks,
|
|
777
789
|
required_cluster_size=required_cluster_size,
|
|
778
|
-
max_rps_to_disk=max_rps_to_disk,
|
|
779
|
-
max_regional_size=max_regional_size,
|
|
780
|
-
max_local_data_per_node_gib=max_local_data_per_node_gib,
|
|
790
|
+
max_rps_to_disk=args.max_rps_to_disk,
|
|
791
|
+
max_regional_size=args.max_regional_size,
|
|
792
|
+
max_local_data_per_node_gib=args.max_local_data_per_node_gib,
|
|
781
793
|
max_write_buffer_percent=max_write_buffer_percent,
|
|
782
794
|
max_table_buffer_percent=max_table_buffer_percent,
|
|
783
795
|
)
|
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
from datetime import timedelta
|
|
2
|
-
from enum import Enum
|
|
3
2
|
from typing import Any
|
|
4
3
|
from typing import Callable
|
|
5
4
|
from typing import Dict
|
|
@@ -10,6 +9,7 @@ from pydantic import Field
|
|
|
10
9
|
|
|
11
10
|
from .stateless_java import nflx_java_app_capacity_model
|
|
12
11
|
from .stateless_java import NflxJavaAppArguments
|
|
12
|
+
from service_capacity_modeling.enum_utils import StrEnum
|
|
13
13
|
from service_capacity_modeling.interface import AccessConsistency
|
|
14
14
|
from service_capacity_modeling.interface import AccessPattern
|
|
15
15
|
from service_capacity_modeling.interface import CapacityDesires
|
|
@@ -26,13 +26,13 @@ from service_capacity_modeling.interface import RegionContext
|
|
|
26
26
|
from service_capacity_modeling.models import CapacityModel
|
|
27
27
|
|
|
28
28
|
|
|
29
|
-
class NflxCounterCardinality(
|
|
29
|
+
class NflxCounterCardinality(StrEnum):
|
|
30
30
|
low = "low"
|
|
31
31
|
medium = "medium"
|
|
32
32
|
high = "high"
|
|
33
33
|
|
|
34
34
|
|
|
35
|
-
class NflxCounterMode(
|
|
35
|
+
class NflxCounterMode(StrEnum):
|
|
36
36
|
best_effort = "best-effort"
|
|
37
37
|
eventual = "eventual"
|
|
38
38
|
exact = "exact"
|
|
@@ -95,7 +95,7 @@ class NflxCounterCapacityModel(CapacityModel):
|
|
|
95
95
|
) -> Tuple[Tuple[str, Callable[[CapacityDesires], CapacityDesires]], ...]:
|
|
96
96
|
stores = []
|
|
97
97
|
|
|
98
|
-
if extra_model_arguments["counter.mode"] == NflxCounterMode.best_effort
|
|
98
|
+
if extra_model_arguments["counter.mode"] == NflxCounterMode.best_effort:
|
|
99
99
|
stores.append(("org.netflix.evcache", lambda x: x))
|
|
100
100
|
else:
|
|
101
101
|
# Shared evcache cluster is used for eventual and exact counters
|
|
@@ -114,9 +114,9 @@ class NflxCounterCapacityModel(CapacityModel):
|
|
|
114
114
|
# high cardinality : rollups happen once every 10 seconds
|
|
115
115
|
# TODO: Account for read amplification from time slice configs
|
|
116
116
|
# for better model accuracy
|
|
117
|
-
if counter_cardinality == NflxCounterCardinality.low
|
|
117
|
+
if counter_cardinality == NflxCounterCardinality.low:
|
|
118
118
|
rollups_per_second = counter_deltas_per_second.scale(0.0167)
|
|
119
|
-
elif counter_cardinality == NflxCounterCardinality.medium
|
|
119
|
+
elif counter_cardinality == NflxCounterCardinality.medium:
|
|
120
120
|
rollups_per_second = counter_deltas_per_second.scale(0.0333)
|
|
121
121
|
else:
|
|
122
122
|
rollups_per_second = counter_deltas_per_second.scale(0.1)
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import math
|
|
3
|
-
from enum import Enum
|
|
4
3
|
from typing import Any
|
|
5
4
|
from typing import Dict
|
|
6
5
|
from typing import Optional
|
|
@@ -9,6 +8,7 @@ from typing import Tuple
|
|
|
9
8
|
from pydantic import BaseModel
|
|
10
9
|
from pydantic import Field
|
|
11
10
|
|
|
11
|
+
from service_capacity_modeling.enum_utils import StrEnum
|
|
12
12
|
from service_capacity_modeling.interface import AccessConsistency
|
|
13
13
|
from service_capacity_modeling.interface import AccessPattern
|
|
14
14
|
from service_capacity_modeling.interface import Buffer
|
|
@@ -45,7 +45,7 @@ from service_capacity_modeling.stats import dist_for_interval
|
|
|
45
45
|
logger = logging.getLogger(__name__)
|
|
46
46
|
|
|
47
47
|
|
|
48
|
-
class Replication(
|
|
48
|
+
class Replication(StrEnum):
|
|
49
49
|
none = "none"
|
|
50
50
|
sets = "sets"
|
|
51
51
|
evicts = "evicts"
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import math
|
|
3
|
-
from enum import Enum
|
|
4
3
|
from typing import Any
|
|
5
4
|
from typing import Dict
|
|
6
5
|
from typing import Optional
|
|
@@ -9,6 +8,7 @@ from typing import Tuple
|
|
|
9
8
|
from pydantic import BaseModel
|
|
10
9
|
from pydantic import Field
|
|
11
10
|
|
|
11
|
+
from service_capacity_modeling.enum_utils import StrEnum
|
|
12
12
|
from service_capacity_modeling.interface import AccessConsistency
|
|
13
13
|
from service_capacity_modeling.interface import AccessPattern
|
|
14
14
|
from service_capacity_modeling.interface import Buffer
|
|
@@ -45,7 +45,7 @@ from service_capacity_modeling.models.org.netflix.iso_date_math import iso_to_se
|
|
|
45
45
|
logger = logging.getLogger(__name__)
|
|
46
46
|
|
|
47
47
|
|
|
48
|
-
class ClusterType(
|
|
48
|
+
class ClusterType(StrEnum):
|
|
49
49
|
strong = "strong"
|
|
50
50
|
ha = "high-availability"
|
|
51
51
|
|
{service_capacity_modeling-0.3.88.dist-info → service_capacity_modeling-0.3.90.dist-info}/RECORD
RENAMED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
service_capacity_modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
service_capacity_modeling/capacity_planner.py,sha256=JRagEFlg3u_zB1N5GzGsKAN55JZLad6p4IF_PmL8kcg,32780
|
|
3
|
-
service_capacity_modeling/enum_utils.py,sha256
|
|
4
|
-
service_capacity_modeling/interface.py,sha256=
|
|
3
|
+
service_capacity_modeling/enum_utils.py,sha256=50Rw2kgYoJYCrybSbo9WaPPCWxlF5CyPCQtHxQ3kB18,5229
|
|
4
|
+
service_capacity_modeling/interface.py,sha256=mgX2qUsYf_rVxbYg1rFjRUH-QY11Z87mke4nWa1LwhM,42327
|
|
5
5
|
service_capacity_modeling/stats.py,sha256=LCNUcQPfwF5hhIZwsfAsDe4ZbnuhDnl3vQHKfpK61Xc,6142
|
|
6
6
|
service_capacity_modeling/hardware/__init__.py,sha256=P5ostvoSOMUqPODtepeFYb4qfTVH0E73mMFraP49rYU,9196
|
|
7
7
|
service_capacity_modeling/hardware/profiles/__init__.py,sha256=7-y3JbCBkgzaAjFla2RIymREcImdZ51HTl3yn3vzoGw,1602
|
|
@@ -53,17 +53,17 @@ service_capacity_modeling/models/utils.py,sha256=WosEEg4o1_WSbTb5mL-M1v8JuWJgvS2
|
|
|
53
53
|
service_capacity_modeling/models/org/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
54
54
|
service_capacity_modeling/models/org/netflix/__init__.py,sha256=2Ld2NPxiO3vbYtOMqHtrV4f1nEH390Hoxqo3I5NbBDI,2553
|
|
55
55
|
service_capacity_modeling/models/org/netflix/aurora.py,sha256=Js33ZjxCtt34HiDPsWRT9mjKCAsnnCo9du15QArVFMo,13073
|
|
56
|
-
service_capacity_modeling/models/org/netflix/cassandra.py,sha256=
|
|
56
|
+
service_capacity_modeling/models/org/netflix/cassandra.py,sha256=MUILIhdxlqIxOwMbFJvy7luRYqBxraIkIVU_ZKnjxOo,39627
|
|
57
57
|
service_capacity_modeling/models/org/netflix/control.py,sha256=wkJnqG7Nn7kQNcKEMzl9LFlA9xTAji2wYxL011VAKPI,5762
|
|
58
|
-
service_capacity_modeling/models/org/netflix/counter.py,sha256=
|
|
58
|
+
service_capacity_modeling/models/org/netflix/counter.py,sha256=kTDL7dCnkn-XU27_Z1VBc4CCLCPoOqJZe9WgcENHHd4,10517
|
|
59
59
|
service_capacity_modeling/models/org/netflix/crdb.py,sha256=iW7tyG8jpXhHIdXrw3DPYSHRAknPN42MlCRLJO4o9C8,20826
|
|
60
60
|
service_capacity_modeling/models/org/netflix/ddb.py,sha256=9qRiuTqWev9zbYFFzewyowU7M41uALsuLklYx20yAXw,26502
|
|
61
61
|
service_capacity_modeling/models/org/netflix/elasticsearch.py,sha256=zPrC6b2LNrAh3IWE3HCMUEYASacjYbHChbO4WZSMma4,25234
|
|
62
62
|
service_capacity_modeling/models/org/netflix/entity.py,sha256=VHgEwnGtJAKlhvbE2kTif75OZmIsjjjoZrT6kb1LTgA,8750
|
|
63
|
-
service_capacity_modeling/models/org/netflix/evcache.py,sha256=
|
|
63
|
+
service_capacity_modeling/models/org/netflix/evcache.py,sha256=BDVRWely3F_3Ecb3Um3dQ024_I6XgvagpRJ6zdP5E18,25687
|
|
64
64
|
service_capacity_modeling/models/org/netflix/graphkv.py,sha256=7ncEhx9lLsN_vGIKNHkvWfDdKffG7cYe91Wr-DB7IjU,8659
|
|
65
65
|
service_capacity_modeling/models/org/netflix/iso_date_math.py,sha256=oC5sgIXDqwOp6-5z2bdTkm-bJLlnzhqcONI_tspHjac,1137
|
|
66
|
-
service_capacity_modeling/models/org/netflix/kafka.py,sha256=
|
|
66
|
+
service_capacity_modeling/models/org/netflix/kafka.py,sha256=1Ut1yBHgMCdH5chByuqBSmrZF-XpKdvFxAhRHaRV9nQ,25547
|
|
67
67
|
service_capacity_modeling/models/org/netflix/key_value.py,sha256=WH8NblHqHwnAAumB2Zz1Qd4NBFWDQEQ1rpBcP3fVVQk,9409
|
|
68
68
|
service_capacity_modeling/models/org/netflix/postgres.py,sha256=LBxDqkc-lYxDBu2VwNLuf2Q4o4hU3jPwu4YSt33Oe-8,4128
|
|
69
69
|
service_capacity_modeling/models/org/netflix/rds.py,sha256=8GVmpMhTisZPdT-mP1Sx5U7VAF32lnTI27iYPfGg9CY,10930
|
|
@@ -77,9 +77,9 @@ service_capacity_modeling/tools/auto_shape.py,sha256=Jx9H2ay9-H_kUDjtB141owQNxGF
|
|
|
77
77
|
service_capacity_modeling/tools/fetch_pricing.py,sha256=Qp-XMymkY1dvtyS51RufmEpfgOHv-IQ-XyzS8wp2-qM,4021
|
|
78
78
|
service_capacity_modeling/tools/generate_missing.py,sha256=F7YqvMJAV4nZc20GNrlIsnQSF8_77sLgwYZqc5k4LDg,3099
|
|
79
79
|
service_capacity_modeling/tools/instance_families.py,sha256=e5RuYkCLUITvsAazDH12B6KjX_PaBsv6Ne3mj0HK_sQ,9223
|
|
80
|
-
service_capacity_modeling-0.3.
|
|
81
|
-
service_capacity_modeling-0.3.
|
|
82
|
-
service_capacity_modeling-0.3.
|
|
83
|
-
service_capacity_modeling-0.3.
|
|
84
|
-
service_capacity_modeling-0.3.
|
|
85
|
-
service_capacity_modeling-0.3.
|
|
80
|
+
service_capacity_modeling-0.3.90.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
|
|
81
|
+
service_capacity_modeling-0.3.90.dist-info/METADATA,sha256=QAmQcpV4CC-6rKvYKnmzrb4OjpW7sCQdaFAdB5u9TVo,10366
|
|
82
|
+
service_capacity_modeling-0.3.90.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
83
|
+
service_capacity_modeling-0.3.90.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
|
|
84
|
+
service_capacity_modeling-0.3.90.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
|
|
85
|
+
service_capacity_modeling-0.3.90.dist-info/RECORD,,
|
{service_capacity_modeling-0.3.88.dist-info → service_capacity_modeling-0.3.90.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|