service-capacity-modeling 0.3.85__py3-none-any.whl → 0.3.87__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,113 @@
1
+ """Utilities for working with enums, particularly for adding
2
+ runtime-accessible docstrings.
3
+
4
+ """
5
+
6
+ import ast
7
+ import inspect
8
+ from enum import Enum
9
+ from functools import partial
10
+ from operator import is_
11
+ from typing import Any
12
+ from typing import cast
13
+ from typing import TypeVar
14
+
15
+ from pydantic.json_schema import JsonSchemaValue
16
+ from pydantic_core import CoreSchema
17
+
18
+
19
+ E = TypeVar("E", bound=Enum)
20
+
21
+
22
+ def enum_docstrings(enum: type[E]) -> type[E]:
23
+ """Attach docstrings to enum members at runtime
24
+
25
+ This decorator enables per-member docstrings that are accessible at runtime via
26
+ the __doc__ attribute. Docstrings should be string literals that appear directly
27
+ below the enum member assignment, following PEP 257 conventions.
28
+
29
+ This approach provides both:
30
+ - IDE/tool support (VSCode, PyCharm, Sphinx recognize the PEP 257 pattern)
31
+ - Runtime access via member.__doc__
32
+
33
+ Example:
34
+ @enum_docstrings
35
+ class SomeEnum(Enum):
36
+ \"\"\"Docstring for the SomeEnum enum\"\"\"
37
+
38
+ foo_member = "foo_value"
39
+ \"\"\"Docstring for the foo_member enum member\"\"\"
40
+
41
+ bar_member = "bar_value"
42
+ \"\"\"Docstring for the bar_member enum member\"\"\"
43
+
44
+ # Now accessible at runtime:
45
+ SomeEnum.foo_member.__doc__ # 'Docstring for the foo_member enum member'
46
+
47
+ Implementation:
48
+ This decorator parses the source code AST to extract docstrings that appear
49
+ after member assignments and attaches them to each member's __doc__ attribute.
50
+ If source code is unavailable (e.g., in compiled bytecode), the enum is
51
+ returned unchanged and members will inherit the class docstring.
52
+
53
+ Credit:
54
+ Based on Martijn Pieters' StackOverflow answer:
55
+ https://stackoverflow.com/a/79229811
56
+
57
+ See also:
58
+ https://stackoverflow.com/questions/19330460/how-do-i-put-docstrings-on-enums
59
+ """
60
+ try:
61
+ mod = ast.parse(inspect.getsource(enum))
62
+ except OSError:
63
+ # no source code available (e.g., compiled bytecode)
64
+ return enum
65
+
66
+ if mod.body and isinstance(class_def := mod.body[0], ast.ClassDef):
67
+ # An enum member docstring is unassigned if it is the exact same object
68
+ # as enum.__doc__ (members inherit class docstring by default)
69
+ unassigned = partial(is_, enum.__doc__)
70
+ names = enum.__members__.keys()
71
+ member: E | None = None
72
+
73
+ for node in class_def.body:
74
+ match node:
75
+ case ast.Assign(targets=[ast.Name(id=name)]) if name in names:
76
+ # Enum member assignment, look for a docstring next
77
+ member = enum[name]
78
+ continue
79
+
80
+ case ast.Expr(value=ast.Constant(value=str() as docstring)) if (
81
+ member and unassigned(member.__doc__)
82
+ ):
83
+ # Docstring immediately following a member assignment
84
+ member.__doc__ = docstring
85
+
86
+ case _:
87
+ pass
88
+
89
+ member = None
90
+
91
+ # Add Pydantic JSON schema support for member docstrings
92
+ def __get_pydantic_json_schema__(
93
+ cls: type[E],
94
+ core_schema: CoreSchema,
95
+ handler: Any,
96
+ ) -> JsonSchemaValue:
97
+ """Generate JSON schema with per-member descriptions using oneOf"""
98
+ json_schema = cast(JsonSchemaValue, handler(core_schema))
99
+ json_schema["oneOf"] = [
100
+ {
101
+ "const": member.value,
102
+ "title": member.name,
103
+ "description": member.__doc__,
104
+ }
105
+ for member in cls
106
+ ]
107
+ return json_schema
108
+
109
+ setattr(
110
+ enum, "__get_pydantic_json_schema__", classmethod(__get_pydantic_json_schema__)
111
+ )
112
+
113
+ return enum
@@ -21,6 +21,8 @@ from pydantic import computed_field
21
21
  from pydantic import ConfigDict
22
22
  from pydantic import Field
23
23
 
24
+ from service_capacity_modeling.enum_utils import enum_docstrings
25
+
24
26
  GIB_IN_BYTES = 1024 * 1024 * 1024
25
27
  MIB_IN_BYTES = 1024 * 1024
26
28
  MEGABIT_IN_BYTES = (1000 * 1000) / 8
@@ -43,7 +45,14 @@ class ExcludeUnsetModel(BaseModel):
43
45
  ###############################################################################
44
46
 
45
47
 
48
+ @enum_docstrings
46
49
  class IntervalModel(str, Enum):
50
+ """Statistical distribution models for approximating intervals
51
+
52
+ When we have uncertainty intervals (low, mid, high), we need to choose
53
+ a probability distribution to model that uncertainty for simulation purposes.
54
+ """
55
+
47
56
  def __str__(self) -> str:
48
57
  return str(self.value)
49
58
 
@@ -51,20 +60,29 @@ class IntervalModel(str, Enum):
51
60
  return f"D({self.value})"
52
61
 
53
62
  gamma = "gamma"
63
+ """Gamma distribution - used for modeling right-skewed data"""
64
+
54
65
  beta = "beta"
66
+ """Beta distribution - bounded to [0,1], good for modeling proportions
67
+ and probabilities"""
55
68
 
56
69
 
57
70
  class Interval(ExcludeUnsetModel):
71
+ """Represents an uncertainty interval with confidence bounds"""
72
+
58
73
  low: float
59
74
  mid: float
60
75
  high: float
61
- # How confident are we of this interval
76
+
62
77
  confidence: float = 1.0
63
- # How to approximate this interval (e.g. with a beta distribution)
78
+ """How confident are we of this interval (0.0 to 1.0)"""
79
+
64
80
  model_with: IntervalModel = IntervalModel.beta
65
- # If we should allow simulation of this interval, some models might not
66
- # be able to simulate or some properties might not want to
81
+ """How to approximate this interval for simulation (e.g. beta distribution)"""
82
+
67
83
  allow_simulate: bool = True
84
+ """Whether to allow simulation of this interval. Some models might not support
85
+ simulation or some properties might not want probabilistic treatment."""
68
86
 
69
87
  minimum_value: Optional[float] = None
70
88
  maximum_value: Optional[float] = None
@@ -204,13 +222,29 @@ class Lifecycle(str, Enum):
204
222
  end_of_life = "end-of-life"
205
223
 
206
224
 
225
+ @enum_docstrings
207
226
  class DriveType(str, Enum):
208
- """Represents the type of drive"""
227
+ """Represents the type and attachment model of storage drives
228
+
229
+ Drives can be either local (ephemeral, instance-attached) or network-attached
230
+ (persistent, e.g., EBS), and can use SSD or HDD storage media.
231
+ """
209
232
 
210
233
  local_ssd = "local-ssd"
234
+ """Local SSD storage - ephemeral, physically attached to instance,
235
+ highest performance"""
236
+
211
237
  local_hdd = "local-hdd"
238
+ """Local HDD storage - ephemeral, physically attached to instance,
239
+ lower cost than SSD"""
240
+
212
241
  attached_ssd = "attached-ssd"
242
+ """Network-attached SSD (e.g., EBS gp3) - persistent, survives
243
+ instance termination"""
244
+
213
245
  attached_hdd = "attached-hhd"
246
+ """Network-attached HDD (e.g., EBS st1) - persistent, cost-optimized
247
+ """
214
248
 
215
249
 
216
250
  class Drive(ExcludeUnsetModel):
@@ -225,37 +259,52 @@ class Drive(ExcludeUnsetModel):
225
259
  read_io_per_s: Optional[int] = None
226
260
  write_io_per_s: Optional[int] = None
227
261
  throughput: Optional[int] = None
228
- # If this drive has single tenant IO capacity, for example a single
229
- # physical drive versus a virtualised drive
262
+
230
263
  single_tenant: bool = True
231
- # If this drive can scale, how large can it scale to
264
+ """Whether this drive has single tenant IO capacity (e.g. physical
265
+ drive vs virtualized drive)"""
266
+
232
267
  max_scale_size_gib: int = 0
233
- # If this drive can scale IO, how large can it scale to
268
+ """Maximum size this drive can scale to (in GiB)"""
269
+
234
270
  max_scale_io_per_s: int = 0
235
- # How large is an "IO" against this device
271
+ """Maximum IOPS this drive can scale to"""
272
+
236
273
  block_size_kib: int = 4
237
- # When sequential how much IO is grouped into a single "IO"
238
- # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html
239
- # Some cloud drives can group sequential ops together and DBs take advantage
274
+ """Size of a single IO operation against this device (in KiB)"""
275
+
240
276
  group_size_kib: int = 4
277
+ """When sequential, how much IO is grouped into a single operation
278
+ (in KiB). Some cloud drives (e.g. EBS) can group sequential ops
279
+ together and DBs take advantage.
280
+ See: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/
281
+ ebs-io-characteristics.html"""
241
282
 
242
283
  lifecycle: Lifecycle = Lifecycle.stable
243
284
  compatible_families: List[str] = []
244
285
 
245
286
  annual_cost_per_gib: float = 0
246
- # Tuples of [max_size, annual cost]
247
- # [32000, 0.78], ...
287
+
248
288
  annual_cost_per_read_io: List[Tuple[float, float]] = []
289
+ """Tiered pricing for read IOPS: list of (max_iops, annual_cost)
290
+ tuples. Example: [(32000, 0.78), (160000, 0.384)]"""
291
+
249
292
  annual_cost_per_write_io: List[Tuple[float, float]] = []
293
+ """Tiered pricing for write IOPS: list of (max_iops, annual_cost)
294
+ tuples. Example for io2: [(32000, 0.78), (64000, 0.552), (256000, 0.432)]
295
+ Note: gp3 doesn't distinguish read/write IOPS, so this would be empty for gp3."""
250
296
 
251
- # These defaults are assuming a cloud SSD like a gp2 volume
252
- # If you disagree please change them in your hardware description
253
297
  read_io_latency_ms: FixedInterval = FixedInterval(
254
298
  low=0.8, mid=1, high=2, confidence=0.9
255
299
  )
300
+ """Default read latency assumes cloud SSD (e.g. EBS gp2).
301
+ Override in hardware description if different."""
256
302
  write_io_latency_ms: FixedInterval = FixedInterval(
257
303
  low=0.6, mid=2, high=3, confidence=0.9
258
304
  )
305
+ """Default write latency assumes cloud SSD (e.g. EBS gp2).
306
+ Writes are typically faster than reads due to write buffering.
307
+ Override in hardware description if different."""
259
308
 
260
309
  @property
261
310
  def rand_io_size_kib(self) -> int:
@@ -316,21 +365,28 @@ class Drive(ExcludeUnsetModel):
316
365
  return Drive(name="managed")
317
366
 
318
367
 
368
+ @enum_docstrings
319
369
  class Platform(str, Enum):
320
- """Represents the platform of the hardware
370
+ """Represents the CPU architecture or managed service platform
321
371
 
322
- For example a particular hardware type might offer x86_64, arm, or be a managed
323
- instance type that only works with managed RDBMS like Aurora Postgres.
372
+ Hardware can run on different CPU architectures (x86_64, ARM) or be a fully
373
+ managed service platform (e.g., Aurora). The platform choice affects performance
374
+ characteristics, cost, and compatibility.
324
375
  """
325
376
 
326
- # Most Intel and AMD instance types
327
377
  amd64 = "amd64"
328
- # Graviton and other ARM based instance types
378
+ """x86-64 architecture - most Intel and AMD instance types (e.g., m5,
379
+ c5, r5)"""
380
+
329
381
  arm64 = "arm64"
330
- # Special purpose aurora type
382
+ """ARM64 architecture - Graviton and other ARM-based instances (e.g.,
383
+ m6g, c7g)"""
384
+
331
385
  aurora_mysql = "Aurora MySQL"
332
- # Special purpose aurora type
386
+ """AWS Aurora MySQL-compatible managed database platform"""
387
+
333
388
  aurora_postgres = "Aurora PostgreSQL"
389
+ """AWS Aurora PostgreSQL-compatible managed database platform"""
334
390
 
335
391
 
336
392
  class Instance(ExcludeUnsetModel):
@@ -378,8 +434,11 @@ class Instance(ExcludeUnsetModel):
378
434
  drive: Optional[Drive] = None
379
435
  annual_cost: float = 0
380
436
  lifecycle: Lifecycle = Lifecycle.stable
381
- # Typically hardware has a single platform, but sometimes they can act in multiple
437
+
382
438
  platforms: List[Platform] = [Platform.amd64]
439
+ """CPU platforms this instance supports. Typically hardware has a
440
+ single platform, but sometimes instances can run on multiple platforms
441
+ (e.g. amd64 and arm64)."""
383
442
 
384
443
  family_separator: str = "."
385
444
 
@@ -494,14 +553,20 @@ class Hardware(ExcludeUnsetModel):
494
553
  services: service type -> Service(name, params, cost, etc ...)
495
554
  """
496
555
 
497
- # How many zones of compute exist in this region of compute
498
556
  zones_in_region: int = 3
499
- # Per instance shape information e.g. cpu, ram, cpu etc ...
557
+ """Number of availability zones of compute in this region"""
558
+
500
559
  instances: Dict[str, Instance] = {}
501
- # Per drive type information and cost
560
+ """Instance shapes available (e.g. instance type -> Instance with cpu,
561
+ ram, cost, etc.)"""
562
+
502
563
  drives: Dict[str, Drive] = {}
503
- # Per service information and cost
564
+ """Drive types available (e.g. EBS type -> Drive with cost per
565
+ GiB/year, etc.)"""
566
+
504
567
  services: Dict[str, Service] = {}
568
+ """Managed services available (e.g. service name -> Service with
569
+ params, cost, etc.)"""
505
570
 
506
571
 
507
572
  class GlobalHardware(ExcludeUnsetModel):
@@ -551,49 +616,68 @@ class Pricing(ExcludeUnsetModel):
551
616
  ###############################################################################
552
617
 
553
618
 
619
+ @enum_docstrings
554
620
  class AccessPattern(str, Enum):
621
+ """The access pattern determines capacity planning priorities: latency-sensitive
622
+ services target low P99 latency, while throughput-oriented services optimize
623
+ for maximum requests per second.
624
+ """
625
+
555
626
  latency = "latency"
627
+ """Latency-sensitive - optimize for low P99 latency, typical for
628
+ user-facing services"""
629
+
556
630
  throughput = "throughput"
631
+ """Throughput-oriented - optimize for maximum RPS/bandwidth, typical
632
+ for batch processing"""
557
633
 
558
634
 
635
+ @enum_docstrings
559
636
  class AccessConsistency(str, Enum):
560
- """See https://jepsen.io/consistency
561
-
637
+ """
562
638
  Generally speaking consistency is expensive, so models need to know what
563
639
  kind of consistency will be required in order to estimate CPU usage
564
640
  within a factor of 4-5x correctly.
641
+
642
+ See: https://jepsen.io/consistency for detailed consistency model definitions.
643
+
644
+ Ordered from weakest (cheapest) to strongest (most expensive) consistency:
645
+ - never, best_effort, eventual, read_your_writes (single-item consistency)
646
+ - linearizable, linearizable_stale (single-item with ordering)
647
+ - serializable, serializable_stale (multi-item transactional)
565
648
  """
566
649
 
567
- # You cannot read writes ever
568
650
  never = "never"
651
+ """No read guarantees - writes may never be visible (e.g.,
652
+ fire-and-forget logging)"""
569
653
 
570
- #
571
- # Single item consistency (most services)
572
- #
573
-
574
- # Best Effort: we might lose writes or reads might be stale or missing.
575
- # Most caches offer this level of consistency.
576
- # Eventual: We will eventually reflect the latest successful write but
577
- # there is some (often large) time bound on that eventuality.
578
- # Read-Your-Writes: The first "consistent" offering.
579
654
  best_effort = "best-effort"
655
+ """Best effort - writes/reads may be lost or stale, no guarantees
656
+ (e.g., most caches)"""
657
+
580
658
  eventual = "eventual"
659
+ """Eventual consistency - writes eventually visible with unbounded time
660
+ delay (e.g., DNS, S3)"""
661
+
581
662
  read_your_writes = "read-your-writes"
582
- # Fully lineralizable, writes and reads
663
+ """Read-your-writes - clients see their own writes immediately (e.g.,
664
+ DynamoDB consistent reads)"""
665
+
583
666
  linearizable = "linearizable"
584
- # Writes are linerizable but stale reads are possible (e.g. ZK)
585
- linearizable_stale = "linearizable-stale"
667
+ """Linearizable - all operations appear atomic and in real-time order
668
+ (e.g., etcd, Consul)"""
586
669
 
587
- #
588
- # Multiple item consistency (often "transactional" or "acid" services)
589
- #
670
+ linearizable_stale = "linearizable-stale"
671
+ """Linearizable writes, stale reads allowed - writes ordered, reads may
672
+ lag (e.g., ZooKeeper)"""
590
673
 
591
- # All operations are serializable.
592
- # (e.g. CRDB in default settings)
593
674
  serializable = "serializable"
594
- # Writes are serializable but stale reads are possible
595
- # (e.g. CRDB with stale reads enabled, MySQL with read replicas, etc ...)
675
+ """Serializable transactions - multi-item ACID transactions (e.g.,
676
+ CockroachDB default, Spanner)"""
677
+
596
678
  serializable_stale = "serializable-stale"
679
+ """Serializable writes, stale reads - ACID writes with lagging reads
680
+ (e.g., CRDB stale reads, MySQL replicas)"""
597
681
 
598
682
 
599
683
  AVG_ITEM_SIZE_BYTES: int = 1024
@@ -712,20 +796,17 @@ class DataShape(ExcludeUnsetModel):
712
796
  ),
713
797
  )
714
798
 
715
- # How compressible is this dataset. Note that databases might offer
716
- # better or worse compression strategies that will impact this
717
- # Note that the ratio here is the forward ratio, e.g.
718
- # A ratio of 2 means 2:1 compression (0.5 on disk size)
719
- # A ratio of 5 means 5:1 compression (0.2 on disk size)
720
799
  estimated_compression_ratio: Interval = certain_float(1)
800
+ """Data compression ratio (forward ratio). Examples:
801
+ - 2 means 2:1 compression (0.5x on-disk size)
802
+ - 5 means 5:1 compression (0.2x on-disk size)
803
+ Note: databases may have different compression strategies affecting this."""
721
804
 
722
- # How much fixed memory must be provisioned per instance for the
723
- # application (e.g. for process heap memory)
724
805
  reserved_instance_app_mem_gib: float = 2
806
+ """Fixed memory per instance for application (e.g. process heap memory)"""
725
807
 
726
- # How much fixed memory must be provisioned per instance for the
727
- # system (e.g. for kernel and other system processes)
728
808
  reserved_instance_system_mem_gib: float = 1
809
+ """Fixed memory per instance for system (e.g. kernel and system processes)"""
729
810
 
730
811
  # How durable does this dataset need to be. We want to provision
731
812
  # sufficient replication and backups of data to achieve the target
@@ -769,6 +850,7 @@ class CurrentClusters(ExcludeUnsetModel):
769
850
  services: Sequence[ServiceCapacity] = []
770
851
 
771
852
 
853
+ @enum_docstrings
772
854
  class BufferComponent(str, Enum):
773
855
  """Represents well known buffer components such as compute and storage
774
856
 
@@ -777,15 +859,23 @@ class BufferComponent(str, Enum):
777
859
  the Buffers interface itself (should be str).
778
860
  """
779
861
 
780
- # [Query Pattern] a.k.a. "Traffic" related buffers, e.g. CPU and Network
781
862
  compute = "compute"
782
- # [Data Shape] a.k.a. "Dataset" related buffers, e.g. Disk and Memory
863
+ """[Query Pattern] a.k.a. "Traffic" related buffers, e.g. CPU and Network"""
864
+
783
865
  storage = "storage"
784
- # Resource specific component
866
+ """[Data Shape] a.k.a. "Dataset" related buffers, e.g. Disk and Memory"""
867
+
785
868
  cpu = "cpu"
869
+ """Resource specific component - CPU"""
870
+
786
871
  network = "network"
872
+ """Resource specific component - Network"""
873
+
787
874
  disk = "disk"
875
+ """Resource specific component - Disk"""
876
+
788
877
  memory = "memory"
878
+ """Resource specific component - Memory"""
789
879
 
790
880
  @staticmethod
791
881
  def is_generic(component: str) -> bool:
@@ -796,46 +886,64 @@ class BufferComponent(str, Enum):
796
886
  return not BufferComponent.is_generic(component)
797
887
 
798
888
 
889
+ @enum_docstrings
799
890
  class BufferIntent(str, Enum):
800
- # Most buffers show "desired" buffer, this is the default
891
+ """Defines the intent of buffer directives for capacity planning"""
892
+
801
893
  desired = "desired"
802
- # ratio on top of existing buffers to ensure exists. Generally combined
803
- # with a different desired buffer to ensure we don't just scale needlessly
804
- # This means we can scale up or down as as long as we meet the desired buffer.
894
+ """Most buffers show "desired" buffer, this is the default"""
895
+
805
896
  scale = "scale"
897
+ """Ratio on top of existing buffers to ensure exists. Generally combined with a
898
+ different desired buffer to ensure we don't just scale needlessly. This means we
899
+ can scale up or down as long as we meet the desired buffer."""
806
900
 
807
- # DEPRECATED: Use scale_up/scale_down instead
808
- # Ignores model preferences, just preserve existing buffers
809
- # We rarely actually want to do this since it can cause severe over provisioning
810
901
  preserve = "preserve"
902
+ """DEPRECATED - Use scale_up/scale_down instead. Ignores model preferences, just
903
+ preserve existing buffers. We rarely actually want to do this since it can cause
904
+ severe over provisioning."""
811
905
 
812
- # Scale up if necessary to meet the desired buffer.
813
- # If the existing resource is over-provisioned, do not reduce the requirement.
814
- # If under-provisioned, the requirement can be increased to meet the desired buffer.
815
- # Example: need 20 cores but have 10 → scale up to 20 cores.
816
- # Example 2: need 20 cores but have 40 → do not scale down and require at
817
- # least 40 cores
818
906
  scale_up = "scale_up"
819
- # Scale down if necessary to meet the desired buffer.
820
- # If the existing resource is under-provisioned, do not increase the requirement.
821
- # If over-provisioned, the requirement can be decreased to meet the desired buffer.
822
- # Example: need 20 cores but have 10 → maintain buffer and do not scale up.
823
- # Example 2: need 20 cores but have 40 → scale down to 20 cores.
907
+ """Scale up if necessary to meet the desired buffer. If the existing resource is
908
+ over-provisioned, do not reduce the requirement. If under-provisioned, the
909
+ requirement can be increased to meet the desired buffer.
910
+
911
+ Example: need 20 cores but have 10 → scale up to 20 cores.
912
+
913
+ Example 2: need 20 cores but have 40 → do not scale down and require
914
+ at least 40 cores."""
915
+
824
916
  scale_down = "scale_down"
917
+ """Scale down if necessary to meet the desired buffer. If the existing resource is
918
+ under-provisioned, do not increase the requirement. If over-provisioned, the
919
+ requirement can be decreased to meet the desired buffer.
920
+
921
+ Example: need 20 cores but have 10 → maintain buffer and do not scale up.
922
+
923
+ Example 2: need 20 cores but have 40 → scale down to 20 cores."""
825
924
 
826
925
 
827
926
  class Buffer(ExcludeUnsetModel):
828
- # The value of the buffer expressed as a ratio over "normal" load e.g. 1.5x
927
+ """Represents a buffer (headroom) directive for capacity planning"""
928
+
829
929
  ratio: float = 1.0
830
- # What is the intent of this buffer directive, almost always is desired
930
+ """The buffer value expressed as a ratio over normal load (e.g. 1.5 =
931
+ 50% headroom)"""
932
+
831
933
  intent: BufferIntent = BufferIntent.desired
832
- # The components of buffer this influences, almost always is "compute" (IPC success)
934
+ """The intent of this buffer directive (almost always 'desired')"""
935
+
833
936
  components: List[str] = [BufferComponent.compute]
834
- # If this buffer was made up of other buffers, what contributed
937
+ """The capacity components this buffer influences (almost always
938
+ 'compute' for IPC success)"""
939
+
835
940
  sources: Dict[str, Buffer] = {}
836
- # An optional breadcrumb / context for why this buffer exists.
837
- # E.g. "Background processing" or "bursty workload"
941
+ """If this buffer was composed from other buffers, what
942
+ contributed"""
943
+
838
944
  explanation: str = ""
945
+ """Optional context for why this buffer exists (e.g. 'background
946
+ processing', 'bursty workload')"""
839
947
 
840
948
 
841
949
  class Buffers(ExcludeUnsetModel):
@@ -79,7 +79,6 @@ def _estimate_kafka_requirement( # pylint: disable=too-many-positional-argument
79
79
  copies_per_region: int,
80
80
  hot_retention_seconds: float,
81
81
  zones_per_region: int = 3,
82
- required_zone_size: Optional[int] = None,
83
82
  ) -> Tuple[CapacityRequirement, Tuple[str, ...]]:
84
83
  """Estimate the capacity required for one zone given a regional desire
85
84
 
@@ -119,7 +118,6 @@ def _estimate_kafka_requirement( # pylint: disable=too-many-positional-argument
119
118
  if (
120
119
  current_zonal_capacity
121
120
  and current_zonal_capacity.cluster_instance
122
- and required_zone_size is not None
123
121
  and desires.current_clusters is not None
124
122
  ):
125
123
  # zonal_requirements_from_current uses the midpoint utilization of the
@@ -287,7 +285,6 @@ def _estimate_kafka_cluster_zonal( # noqa: C901
287
285
  zones_per_region=zones_per_region,
288
286
  copies_per_region=copies_per_region,
289
287
  hot_retention_seconds=hot_retention_seconds,
290
- required_zone_size=required_zone_size,
291
288
  )
292
289
 
293
290
  # Account for sidecars and base system memory
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: service-capacity-modeling
3
- Version: 0.3.85
3
+ Version: 0.3.87
4
4
  Summary: Contains utilities for modeling capacity for pluggable workloads
5
5
  Author: Joseph Lynch
6
6
  Author-email: josephl@netflix.com
@@ -1,6 +1,7 @@
1
1
  service_capacity_modeling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  service_capacity_modeling/capacity_planner.py,sha256=JRagEFlg3u_zB1N5GzGsKAN55JZLad6p4IF_PmL8kcg,32780
3
- service_capacity_modeling/interface.py,sha256=isFUhMa1tPjdOVh6B2Cpb6HkGRlGdsgs4ygAY_U_eTQ,39223
3
+ service_capacity_modeling/enum_utils.py,sha256=-jf9IkSg28B8ekXXel1l34FZw5iF525EttyiVft0MLg,3768
4
+ service_capacity_modeling/interface.py,sha256=AV7QlogbCrdjl4nMQQhouyy-2_jNmngU6081NxEC5o4,42370
4
5
  service_capacity_modeling/stats.py,sha256=LCNUcQPfwF5hhIZwsfAsDe4ZbnuhDnl3vQHKfpK61Xc,6142
5
6
  service_capacity_modeling/hardware/__init__.py,sha256=P5ostvoSOMUqPODtepeFYb4qfTVH0E73mMFraP49rYU,9196
6
7
  service_capacity_modeling/hardware/profiles/__init__.py,sha256=7-y3JbCBkgzaAjFla2RIymREcImdZ51HTl3yn3vzoGw,1602
@@ -62,7 +63,7 @@ service_capacity_modeling/models/org/netflix/entity.py,sha256=VHgEwnGtJAKlhvbE2k
62
63
  service_capacity_modeling/models/org/netflix/evcache.py,sha256=x8KwoULVpDm8UXLXEXfH8_zvoBDs8jSfOQLkaWKFcOg,25654
63
64
  service_capacity_modeling/models/org/netflix/graphkv.py,sha256=7ncEhx9lLsN_vGIKNHkvWfDdKffG7cYe91Wr-DB7IjU,8659
64
65
  service_capacity_modeling/models/org/netflix/iso_date_math.py,sha256=oC5sgIXDqwOp6-5z2bdTkm-bJLlnzhqcONI_tspHjac,1137
65
- service_capacity_modeling/models/org/netflix/kafka.py,sha256=LT9T189Tvn22ZfiW9VyL8TXqjCGW3DOvVVIIPiT5rnM,25650
66
+ service_capacity_modeling/models/org/netflix/kafka.py,sha256=JwiRsXOK5hYEr2YZL0SpHK2Z61PhbrItHXitAPdzty8,25514
66
67
  service_capacity_modeling/models/org/netflix/key_value.py,sha256=WH8NblHqHwnAAumB2Zz1Qd4NBFWDQEQ1rpBcP3fVVQk,9409
67
68
  service_capacity_modeling/models/org/netflix/postgres.py,sha256=LBxDqkc-lYxDBu2VwNLuf2Q4o4hU3jPwu4YSt33Oe-8,4128
68
69
  service_capacity_modeling/models/org/netflix/rds.py,sha256=8GVmpMhTisZPdT-mP1Sx5U7VAF32lnTI27iYPfGg9CY,10930
@@ -76,9 +77,9 @@ service_capacity_modeling/tools/auto_shape.py,sha256=Jx9H2ay9-H_kUDjtB141owQNxGF
76
77
  service_capacity_modeling/tools/fetch_pricing.py,sha256=Qp-XMymkY1dvtyS51RufmEpfgOHv-IQ-XyzS8wp2-qM,4021
77
78
  service_capacity_modeling/tools/generate_missing.py,sha256=F7YqvMJAV4nZc20GNrlIsnQSF8_77sLgwYZqc5k4LDg,3099
78
79
  service_capacity_modeling/tools/instance_families.py,sha256=e5RuYkCLUITvsAazDH12B6KjX_PaBsv6Ne3mj0HK_sQ,9223
79
- service_capacity_modeling-0.3.85.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
80
- service_capacity_modeling-0.3.85.dist-info/METADATA,sha256=iypW6C1C9FFmoK5ktOuN2kfEMcwMl0OAPdpa_q1rST8,10366
81
- service_capacity_modeling-0.3.85.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
82
- service_capacity_modeling-0.3.85.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
83
- service_capacity_modeling-0.3.85.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
84
- service_capacity_modeling-0.3.85.dist-info/RECORD,,
80
+ service_capacity_modeling-0.3.87.dist-info/licenses/LICENSE,sha256=nl_Lt5v9VvJ-5lWJDT4ddKAG-VZ-2IaLmbzpgYDz2hU,11343
81
+ service_capacity_modeling-0.3.87.dist-info/METADATA,sha256=inokLEVxd6bZYZIVATdZpnvzEu-98z1teRHWVAVLEDU,10366
82
+ service_capacity_modeling-0.3.87.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
83
+ service_capacity_modeling-0.3.87.dist-info/entry_points.txt,sha256=ZsjzpG5SomWpT1zCE19n1uSXKH2gTI_yc33sdl0vmJg,146
84
+ service_capacity_modeling-0.3.87.dist-info/top_level.txt,sha256=H8XjTCLgR3enHq5t3bIbxt9SeUkUT8HT_SDv2dgIT_A,26
85
+ service_capacity_modeling-0.3.87.dist-info/RECORD,,