cribl-control-plane 0.2.1rc7__py3-none-any.whl → 0.3.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (179) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/errors/__init__.py +5 -8
  3. cribl_control_plane/errors/{healthserverstatus_error.py → healthstatus_error.py} +9 -10
  4. cribl_control_plane/groups_sdk.py +28 -52
  5. cribl_control_plane/health.py +16 -22
  6. cribl_control_plane/models/__init__.py +54 -217
  7. cribl_control_plane/models/appmode.py +14 -0
  8. cribl_control_plane/models/authtoken.py +1 -5
  9. cribl_control_plane/models/cacheconnection.py +0 -20
  10. cribl_control_plane/models/configgroup.py +7 -55
  11. cribl_control_plane/models/configgroupcloud.py +1 -11
  12. cribl_control_plane/models/createconfiggroupbyproductop.py +5 -17
  13. cribl_control_plane/models/createroutesappendbyidop.py +2 -2
  14. cribl_control_plane/models/createversionundoop.py +3 -3
  15. cribl_control_plane/models/cribllakedataset.py +1 -11
  16. cribl_control_plane/models/cribllakedatasetupdate.py +1 -11
  17. cribl_control_plane/models/datasetmetadata.py +1 -11
  18. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +0 -11
  19. cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
  20. cribl_control_plane/models/distributedsummary.py +0 -6
  21. cribl_control_plane/models/error.py +16 -0
  22. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +0 -20
  23. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +0 -20
  24. cribl_control_plane/models/getconfiggroupbyproductandidop.py +0 -11
  25. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +0 -11
  26. cribl_control_plane/models/gethealthinfoop.py +17 -0
  27. cribl_control_plane/models/getsummaryop.py +0 -11
  28. cribl_control_plane/models/hbcriblinfo.py +3 -24
  29. cribl_control_plane/models/{healthserverstatus.py → healthstatus.py} +8 -27
  30. cribl_control_plane/models/heartbeatmetadata.py +0 -3
  31. cribl_control_plane/models/input.py +78 -80
  32. cribl_control_plane/models/inputappscope.py +17 -80
  33. cribl_control_plane/models/inputazureblob.py +1 -33
  34. cribl_control_plane/models/inputcollection.py +1 -24
  35. cribl_control_plane/models/inputconfluentcloud.py +18 -195
  36. cribl_control_plane/models/inputcribl.py +1 -24
  37. cribl_control_plane/models/inputcriblhttp.py +17 -62
  38. cribl_control_plane/models/inputcribllakehttp.py +17 -62
  39. cribl_control_plane/models/inputcriblmetrics.py +1 -24
  40. cribl_control_plane/models/inputcribltcp.py +17 -62
  41. cribl_control_plane/models/inputcrowdstrike.py +1 -54
  42. cribl_control_plane/models/inputdatadogagent.py +17 -62
  43. cribl_control_plane/models/inputdatagen.py +1 -24
  44. cribl_control_plane/models/inputedgeprometheus.py +34 -147
  45. cribl_control_plane/models/inputelastic.py +27 -119
  46. cribl_control_plane/models/inputeventhub.py +1 -182
  47. cribl_control_plane/models/inputexec.py +1 -33
  48. cribl_control_plane/models/inputfile.py +3 -42
  49. cribl_control_plane/models/inputfirehose.py +17 -62
  50. cribl_control_plane/models/inputgooglepubsub.py +1 -36
  51. cribl_control_plane/models/inputgrafana.py +32 -157
  52. cribl_control_plane/models/inputhttp.py +17 -62
  53. cribl_control_plane/models/inputhttpraw.py +17 -62
  54. cribl_control_plane/models/inputjournalfiles.py +1 -24
  55. cribl_control_plane/models/inputkafka.py +17 -189
  56. cribl_control_plane/models/inputkinesis.py +1 -80
  57. cribl_control_plane/models/inputkubeevents.py +1 -24
  58. cribl_control_plane/models/inputkubelogs.py +1 -33
  59. cribl_control_plane/models/inputkubemetrics.py +1 -33
  60. cribl_control_plane/models/inputloki.py +17 -71
  61. cribl_control_plane/models/inputmetrics.py +17 -62
  62. cribl_control_plane/models/inputmodeldriventelemetry.py +17 -62
  63. cribl_control_plane/models/inputmsk.py +18 -81
  64. cribl_control_plane/models/inputnetflow.py +1 -24
  65. cribl_control_plane/models/inputoffice365mgmt.py +1 -67
  66. cribl_control_plane/models/inputoffice365msgtrace.py +1 -67
  67. cribl_control_plane/models/inputoffice365service.py +1 -67
  68. cribl_control_plane/models/inputopentelemetry.py +16 -92
  69. cribl_control_plane/models/inputprometheus.py +34 -138
  70. cribl_control_plane/models/inputprometheusrw.py +17 -71
  71. cribl_control_plane/models/inputrawudp.py +1 -24
  72. cribl_control_plane/models/inputs3.py +1 -45
  73. cribl_control_plane/models/inputs3inventory.py +1 -54
  74. cribl_control_plane/models/inputsecuritylake.py +1 -54
  75. cribl_control_plane/models/inputsnmp.py +1 -40
  76. cribl_control_plane/models/inputsplunk.py +17 -85
  77. cribl_control_plane/models/inputsplunkhec.py +16 -70
  78. cribl_control_plane/models/inputsplunksearch.py +1 -63
  79. cribl_control_plane/models/inputsqs.py +1 -56
  80. cribl_control_plane/models/inputsyslog.py +32 -121
  81. cribl_control_plane/models/inputsystemmetrics.py +9 -142
  82. cribl_control_plane/models/inputsystemstate.py +1 -33
  83. cribl_control_plane/models/inputtcp.py +17 -81
  84. cribl_control_plane/models/inputtcpjson.py +17 -71
  85. cribl_control_plane/models/inputwef.py +1 -71
  86. cribl_control_plane/models/inputwindowsmetrics.py +9 -129
  87. cribl_control_plane/models/inputwineventlogs.py +1 -60
  88. cribl_control_plane/models/inputwiz.py +1 -45
  89. cribl_control_plane/models/inputwizwebhook.py +17 -62
  90. cribl_control_plane/models/inputzscalerhec.py +16 -70
  91. cribl_control_plane/models/jobinfo.py +1 -4
  92. cribl_control_plane/models/jobstatus.py +3 -34
  93. cribl_control_plane/models/listconfiggroupbyproductop.py +0 -11
  94. cribl_control_plane/models/logininfo.py +3 -3
  95. cribl_control_plane/models/masterworkerentry.py +1 -11
  96. cribl_control_plane/models/nodeprovidedinfo.py +1 -11
  97. cribl_control_plane/models/nodeupgradestatus.py +0 -38
  98. cribl_control_plane/models/output.py +88 -93
  99. cribl_control_plane/models/outputazureblob.py +1 -110
  100. cribl_control_plane/models/outputazuredataexplorer.py +87 -452
  101. cribl_control_plane/models/outputazureeventhub.py +19 -281
  102. cribl_control_plane/models/outputazurelogs.py +19 -115
  103. cribl_control_plane/models/outputchronicle.py +19 -115
  104. cribl_control_plane/models/outputclickhouse.py +19 -155
  105. cribl_control_plane/models/outputcloudwatch.py +19 -106
  106. cribl_control_plane/models/outputconfluentcloud.py +38 -311
  107. cribl_control_plane/models/outputcriblhttp.py +19 -135
  108. cribl_control_plane/models/outputcribllake.py +1 -97
  109. cribl_control_plane/models/outputcribltcp.py +19 -132
  110. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +20 -129
  111. cribl_control_plane/models/outputdatadog.py +19 -159
  112. cribl_control_plane/models/outputdataset.py +19 -143
  113. cribl_control_plane/models/outputdiskspool.py +1 -11
  114. cribl_control_plane/models/outputdls3.py +1 -152
  115. cribl_control_plane/models/outputdynatracehttp.py +19 -160
  116. cribl_control_plane/models/outputdynatraceotlp.py +19 -160
  117. cribl_control_plane/models/outputelastic.py +19 -163
  118. cribl_control_plane/models/outputelasticcloud.py +19 -140
  119. cribl_control_plane/models/outputexabeam.py +1 -61
  120. cribl_control_plane/models/outputfilesystem.py +1 -87
  121. cribl_control_plane/models/outputgooglechronicle.py +20 -166
  122. cribl_control_plane/models/outputgooglecloudlogging.py +20 -131
  123. cribl_control_plane/models/outputgooglecloudstorage.py +1 -136
  124. cribl_control_plane/models/outputgooglepubsub.py +19 -106
  125. cribl_control_plane/models/outputgrafanacloud.py +37 -288
  126. cribl_control_plane/models/outputgraphite.py +19 -105
  127. cribl_control_plane/models/outputhoneycomb.py +19 -115
  128. cribl_control_plane/models/outputhumiohec.py +19 -126
  129. cribl_control_plane/models/outputinfluxdb.py +19 -130
  130. cribl_control_plane/models/outputkafka.py +34 -302
  131. cribl_control_plane/models/outputkinesis.py +19 -133
  132. cribl_control_plane/models/outputloki.py +17 -129
  133. cribl_control_plane/models/outputminio.py +1 -145
  134. cribl_control_plane/models/outputmsk.py +34 -193
  135. cribl_control_plane/models/outputnewrelic.py +19 -136
  136. cribl_control_plane/models/outputnewrelicevents.py +20 -128
  137. cribl_control_plane/models/outputopentelemetry.py +19 -178
  138. cribl_control_plane/models/outputprometheus.py +19 -115
  139. cribl_control_plane/models/outputring.py +1 -31
  140. cribl_control_plane/models/outputs3.py +1 -152
  141. cribl_control_plane/models/outputsecuritylake.py +1 -114
  142. cribl_control_plane/models/outputsentinel.py +19 -135
  143. cribl_control_plane/models/outputsentineloneaisiem.py +20 -134
  144. cribl_control_plane/models/outputservicenow.py +19 -168
  145. cribl_control_plane/models/outputsignalfx.py +19 -115
  146. cribl_control_plane/models/outputsns.py +17 -113
  147. cribl_control_plane/models/outputsplunk.py +19 -153
  148. cribl_control_plane/models/outputsplunkhec.py +19 -208
  149. cribl_control_plane/models/outputsplunklb.py +19 -182
  150. cribl_control_plane/models/outputsqs.py +17 -124
  151. cribl_control_plane/models/outputstatsd.py +19 -105
  152. cribl_control_plane/models/outputstatsdext.py +19 -105
  153. cribl_control_plane/models/outputsumologic.py +19 -117
  154. cribl_control_plane/models/outputsyslog.py +96 -259
  155. cribl_control_plane/models/outputtcpjson.py +19 -141
  156. cribl_control_plane/models/outputwavefront.py +19 -115
  157. cribl_control_plane/models/outputwebhook.py +19 -161
  158. cribl_control_plane/models/outputxsiam.py +17 -113
  159. cribl_control_plane/models/packinfo.py +5 -8
  160. cribl_control_plane/models/packinstallinfo.py +5 -8
  161. cribl_control_plane/models/resourcepolicy.py +0 -11
  162. cribl_control_plane/models/{uploadpackresponse.py → routecloneconf.py} +4 -4
  163. cribl_control_plane/models/routeconf.py +4 -3
  164. cribl_control_plane/models/runnablejobcollection.py +9 -72
  165. cribl_control_plane/models/runnablejobexecutor.py +9 -32
  166. cribl_control_plane/models/runnablejobscheduledsearch.py +9 -23
  167. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +0 -11
  168. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +0 -11
  169. cribl_control_plane/packs.py +7 -202
  170. cribl_control_plane/routes_sdk.py +6 -6
  171. cribl_control_plane/tokens.py +15 -23
  172. {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/METADATA +9 -50
  173. cribl_control_plane-0.3.0a1.dist-info/RECORD +330 -0
  174. cribl_control_plane/models/groupcreaterequest.py +0 -171
  175. cribl_control_plane/models/outpostnodeinfo.py +0 -16
  176. cribl_control_plane/models/outputdatabricks.py +0 -482
  177. cribl_control_plane/models/updatepacksop.py +0 -25
  178. cribl_control_plane-0.2.1rc7.dist-info/RECORD +0 -331
  179. {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/WHEEL +0 -0
@@ -1,12 +1,11 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import models, utils
4
+ from cribl_control_plane import utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
- from pydantic import field_serializer
10
9
  from pydantic.functional_validators import PlainValidator
11
10
  from typing import List, Optional
12
11
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -19,20 +18,15 @@ class OutputSqsType(str, Enum):
19
18
  class OutputSqsQueueType(str, Enum, metaclass=utils.OpenEnumMeta):
20
19
  r"""The queue type used (or created). Defaults to Standard."""
21
20
 
22
- # Standard
23
21
  STANDARD = "standard"
24
- # FIFO
25
22
  FIFO = "fifo"
26
23
 
27
24
 
28
25
  class OutputSqsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
29
26
  r"""AWS authentication method. Choose Auto to use IAM roles."""
30
27
 
31
- # Auto
32
28
  AUTO = "auto"
33
- # Manual
34
29
  MANUAL = "manual"
35
- # Secret Key pair
36
30
  SECRET = "secret"
37
31
 
38
32
 
@@ -46,43 +40,33 @@ class OutputSqsSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
46
40
  class OutputSqsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
47
41
  r"""How to handle events when all receivers are exerting backpressure"""
48
42
 
49
- # Block
50
43
  BLOCK = "block"
51
- # Drop
52
44
  DROP = "drop"
53
- # Persistent Queue
54
45
  QUEUE = "queue"
55
46
 
56
47
 
57
- class OutputSqsMode(str, Enum, metaclass=utils.OpenEnumMeta):
58
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
59
-
60
- # Error
61
- ERROR = "error"
62
- # Backpressure
63
- ALWAYS = "always"
64
- # Always On
65
- BACKPRESSURE = "backpressure"
66
-
67
-
68
48
  class OutputSqsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
69
49
  r"""Codec to use to compress the persisted data"""
70
50
 
71
- # None
72
51
  NONE = "none"
73
- # Gzip
74
52
  GZIP = "gzip"
75
53
 
76
54
 
77
55
  class OutputSqsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
78
56
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
79
57
 
80
- # Block
81
58
  BLOCK = "block"
82
- # Drop new data
83
59
  DROP = "drop"
84
60
 
85
61
 
62
+ class OutputSqsMode(str, Enum, metaclass=utils.OpenEnumMeta):
63
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
64
+
65
+ ERROR = "error"
66
+ BACKPRESSURE = "backpressure"
67
+ ALWAYS = "always"
68
+
69
+
86
70
  class OutputSqsPqControlsTypedDict(TypedDict):
87
71
  pass
88
72
 
@@ -148,16 +132,6 @@ class OutputSqsTypedDict(TypedDict):
148
132
  aws_api_key: NotRequired[str]
149
133
  aws_secret: NotRequired[str]
150
134
  r"""Select or create a stored secret that references your access key and secret key"""
151
- pq_strict_ordering: NotRequired[bool]
152
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
153
- pq_rate_per_sec: NotRequired[float]
154
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
155
- pq_mode: NotRequired[OutputSqsMode]
156
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
157
- pq_max_buffer_size: NotRequired[float]
158
- r"""The maximum number of events to hold in memory before writing the events to disk"""
159
- pq_max_backpressure_sec: NotRequired[float]
160
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
161
135
  pq_max_file_size: NotRequired[str]
162
136
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
163
137
  pq_max_size: NotRequired[str]
@@ -168,6 +142,8 @@ class OutputSqsTypedDict(TypedDict):
168
142
  r"""Codec to use to compress the persisted data"""
169
143
  pq_on_backpressure: NotRequired[OutputSqsQueueFullBehavior]
170
144
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
145
+ pq_mode: NotRequired[OutputSqsMode]
146
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
171
147
  pq_controls: NotRequired[OutputSqsPqControlsTypedDict]
172
148
 
173
149
 
@@ -307,32 +283,6 @@ class OutputSqs(BaseModel):
307
283
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
308
284
  r"""Select or create a stored secret that references your access key and secret key"""
309
285
 
310
- pq_strict_ordering: Annotated[
311
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
312
- ] = True
313
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
314
-
315
- pq_rate_per_sec: Annotated[
316
- Optional[float], pydantic.Field(alias="pqRatePerSec")
317
- ] = 0
318
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
319
-
320
- pq_mode: Annotated[
321
- Annotated[Optional[OutputSqsMode], PlainValidator(validate_open_enum(False))],
322
- pydantic.Field(alias="pqMode"),
323
- ] = OutputSqsMode.ERROR
324
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
325
-
326
- pq_max_buffer_size: Annotated[
327
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
328
- ] = 42
329
- r"""The maximum number of events to hold in memory before writing the events to disk"""
330
-
331
- pq_max_backpressure_sec: Annotated[
332
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
333
- ] = 30
334
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
335
-
336
286
  pq_max_file_size: Annotated[
337
287
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
338
288
  ] = "1 MB"
@@ -363,69 +313,12 @@ class OutputSqs(BaseModel):
363
313
  ] = OutputSqsQueueFullBehavior.BLOCK
364
314
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
365
315
 
316
+ pq_mode: Annotated[
317
+ Annotated[Optional[OutputSqsMode], PlainValidator(validate_open_enum(False))],
318
+ pydantic.Field(alias="pqMode"),
319
+ ] = OutputSqsMode.ERROR
320
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
321
+
366
322
  pq_controls: Annotated[
367
323
  Optional[OutputSqsPqControls], pydantic.Field(alias="pqControls")
368
324
  ] = None
369
-
370
- @field_serializer("queue_type")
371
- def serialize_queue_type(self, value):
372
- if isinstance(value, str):
373
- try:
374
- return models.OutputSqsQueueType(value)
375
- except ValueError:
376
- return value
377
- return value
378
-
379
- @field_serializer("aws_authentication_method")
380
- def serialize_aws_authentication_method(self, value):
381
- if isinstance(value, str):
382
- try:
383
- return models.OutputSqsAuthenticationMethod(value)
384
- except ValueError:
385
- return value
386
- return value
387
-
388
- @field_serializer("signature_version")
389
- def serialize_signature_version(self, value):
390
- if isinstance(value, str):
391
- try:
392
- return models.OutputSqsSignatureVersion(value)
393
- except ValueError:
394
- return value
395
- return value
396
-
397
- @field_serializer("on_backpressure")
398
- def serialize_on_backpressure(self, value):
399
- if isinstance(value, str):
400
- try:
401
- return models.OutputSqsBackpressureBehavior(value)
402
- except ValueError:
403
- return value
404
- return value
405
-
406
- @field_serializer("pq_mode")
407
- def serialize_pq_mode(self, value):
408
- if isinstance(value, str):
409
- try:
410
- return models.OutputSqsMode(value)
411
- except ValueError:
412
- return value
413
- return value
414
-
415
- @field_serializer("pq_compress")
416
- def serialize_pq_compress(self, value):
417
- if isinstance(value, str):
418
- try:
419
- return models.OutputSqsCompression(value)
420
- except ValueError:
421
- return value
422
- return value
423
-
424
- @field_serializer("pq_on_backpressure")
425
- def serialize_pq_on_backpressure(self, value):
426
- if isinstance(value, str):
427
- try:
428
- return models.OutputSqsQueueFullBehavior(value)
429
- except ValueError:
430
- return value
431
- return value
@@ -1,12 +1,11 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import models, utils
4
+ from cribl_control_plane import utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
- from pydantic import field_serializer
10
9
  from pydantic.functional_validators import PlainValidator
11
10
  from typing import List, Optional
12
11
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -19,52 +18,40 @@ class OutputStatsdType(str, Enum):
19
18
  class OutputStatsdDestinationProtocol(str, Enum, metaclass=utils.OpenEnumMeta):
20
19
  r"""Protocol to use when communicating with the destination."""
21
20
 
22
- # UDP
23
21
  UDP = "udp"
24
- # TCP
25
22
  TCP = "tcp"
26
23
 
27
24
 
28
25
  class OutputStatsdBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
29
26
  r"""How to handle events when all receivers are exerting backpressure"""
30
27
 
31
- # Block
32
28
  BLOCK = "block"
33
- # Drop
34
29
  DROP = "drop"
35
- # Persistent Queue
36
30
  QUEUE = "queue"
37
31
 
38
32
 
39
- class OutputStatsdMode(str, Enum, metaclass=utils.OpenEnumMeta):
40
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
41
-
42
- # Error
43
- ERROR = "error"
44
- # Backpressure
45
- ALWAYS = "always"
46
- # Always On
47
- BACKPRESSURE = "backpressure"
48
-
49
-
50
33
  class OutputStatsdCompression(str, Enum, metaclass=utils.OpenEnumMeta):
51
34
  r"""Codec to use to compress the persisted data"""
52
35
 
53
- # None
54
36
  NONE = "none"
55
- # Gzip
56
37
  GZIP = "gzip"
57
38
 
58
39
 
59
40
  class OutputStatsdQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
60
41
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
61
42
 
62
- # Block
63
43
  BLOCK = "block"
64
- # Drop new data
65
44
  DROP = "drop"
66
45
 
67
46
 
47
+ class OutputStatsdMode(str, Enum, metaclass=utils.OpenEnumMeta):
48
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
49
+
50
+ ERROR = "error"
51
+ BACKPRESSURE = "backpressure"
52
+ ALWAYS = "always"
53
+
54
+
68
55
  class OutputStatsdPqControlsTypedDict(TypedDict):
69
56
  pass
70
57
 
@@ -106,16 +93,6 @@ class OutputStatsdTypedDict(TypedDict):
106
93
  r"""Amount of time (milliseconds) to wait for a write to complete before assuming connection is dead"""
107
94
  on_backpressure: NotRequired[OutputStatsdBackpressureBehavior]
108
95
  r"""How to handle events when all receivers are exerting backpressure"""
109
- pq_strict_ordering: NotRequired[bool]
110
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
111
- pq_rate_per_sec: NotRequired[float]
112
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
113
- pq_mode: NotRequired[OutputStatsdMode]
114
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
115
- pq_max_buffer_size: NotRequired[float]
116
- r"""The maximum number of events to hold in memory before writing the events to disk"""
117
- pq_max_backpressure_sec: NotRequired[float]
118
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
119
96
  pq_max_file_size: NotRequired[str]
120
97
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
121
98
  pq_max_size: NotRequired[str]
@@ -126,6 +103,8 @@ class OutputStatsdTypedDict(TypedDict):
126
103
  r"""Codec to use to compress the persisted data"""
127
104
  pq_on_backpressure: NotRequired[OutputStatsdQueueFullBehavior]
128
105
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
106
+ pq_mode: NotRequired[OutputStatsdMode]
107
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
129
108
  pq_controls: NotRequired[OutputStatsdPqControlsTypedDict]
130
109
 
131
110
 
@@ -200,34 +179,6 @@ class OutputStatsd(BaseModel):
200
179
  ] = OutputStatsdBackpressureBehavior.BLOCK
201
180
  r"""How to handle events when all receivers are exerting backpressure"""
202
181
 
203
- pq_strict_ordering: Annotated[
204
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
205
- ] = True
206
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
207
-
208
- pq_rate_per_sec: Annotated[
209
- Optional[float], pydantic.Field(alias="pqRatePerSec")
210
- ] = 0
211
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
212
-
213
- pq_mode: Annotated[
214
- Annotated[
215
- Optional[OutputStatsdMode], PlainValidator(validate_open_enum(False))
216
- ],
217
- pydantic.Field(alias="pqMode"),
218
- ] = OutputStatsdMode.ERROR
219
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
220
-
221
- pq_max_buffer_size: Annotated[
222
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
223
- ] = 42
224
- r"""The maximum number of events to hold in memory before writing the events to disk"""
225
-
226
- pq_max_backpressure_sec: Annotated[
227
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
228
- ] = 30
229
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
230
-
231
182
  pq_max_file_size: Annotated[
232
183
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
233
184
  ] = "1 MB"
@@ -258,51 +209,14 @@ class OutputStatsd(BaseModel):
258
209
  ] = OutputStatsdQueueFullBehavior.BLOCK
259
210
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
260
211
 
212
+ pq_mode: Annotated[
213
+ Annotated[
214
+ Optional[OutputStatsdMode], PlainValidator(validate_open_enum(False))
215
+ ],
216
+ pydantic.Field(alias="pqMode"),
217
+ ] = OutputStatsdMode.ERROR
218
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
219
+
261
220
  pq_controls: Annotated[
262
221
  Optional[OutputStatsdPqControls], pydantic.Field(alias="pqControls")
263
222
  ] = None
264
-
265
- @field_serializer("protocol")
266
- def serialize_protocol(self, value):
267
- if isinstance(value, str):
268
- try:
269
- return models.OutputStatsdDestinationProtocol(value)
270
- except ValueError:
271
- return value
272
- return value
273
-
274
- @field_serializer("on_backpressure")
275
- def serialize_on_backpressure(self, value):
276
- if isinstance(value, str):
277
- try:
278
- return models.OutputStatsdBackpressureBehavior(value)
279
- except ValueError:
280
- return value
281
- return value
282
-
283
- @field_serializer("pq_mode")
284
- def serialize_pq_mode(self, value):
285
- if isinstance(value, str):
286
- try:
287
- return models.OutputStatsdMode(value)
288
- except ValueError:
289
- return value
290
- return value
291
-
292
- @field_serializer("pq_compress")
293
- def serialize_pq_compress(self, value):
294
- if isinstance(value, str):
295
- try:
296
- return models.OutputStatsdCompression(value)
297
- except ValueError:
298
- return value
299
- return value
300
-
301
- @field_serializer("pq_on_backpressure")
302
- def serialize_pq_on_backpressure(self, value):
303
- if isinstance(value, str):
304
- try:
305
- return models.OutputStatsdQueueFullBehavior(value)
306
- except ValueError:
307
- return value
308
- return value
@@ -1,12 +1,11 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import models, utils
4
+ from cribl_control_plane import utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
- from pydantic import field_serializer
10
9
  from pydantic.functional_validators import PlainValidator
11
10
  from typing import List, Optional
12
11
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -19,52 +18,40 @@ class OutputStatsdExtType(str, Enum):
19
18
  class OutputStatsdExtDestinationProtocol(str, Enum, metaclass=utils.OpenEnumMeta):
20
19
  r"""Protocol to use when communicating with the destination."""
21
20
 
22
- # UDP
23
21
  UDP = "udp"
24
- # TCP
25
22
  TCP = "tcp"
26
23
 
27
24
 
28
25
  class OutputStatsdExtBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
29
26
  r"""How to handle events when all receivers are exerting backpressure"""
30
27
 
31
- # Block
32
28
  BLOCK = "block"
33
- # Drop
34
29
  DROP = "drop"
35
- # Persistent Queue
36
30
  QUEUE = "queue"
37
31
 
38
32
 
39
- class OutputStatsdExtMode(str, Enum, metaclass=utils.OpenEnumMeta):
40
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
41
-
42
- # Error
43
- ERROR = "error"
44
- # Backpressure
45
- ALWAYS = "always"
46
- # Always On
47
- BACKPRESSURE = "backpressure"
48
-
49
-
50
33
  class OutputStatsdExtCompression(str, Enum, metaclass=utils.OpenEnumMeta):
51
34
  r"""Codec to use to compress the persisted data"""
52
35
 
53
- # None
54
36
  NONE = "none"
55
- # Gzip
56
37
  GZIP = "gzip"
57
38
 
58
39
 
59
40
  class OutputStatsdExtQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
60
41
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
61
42
 
62
- # Block
63
43
  BLOCK = "block"
64
- # Drop new data
65
44
  DROP = "drop"
66
45
 
67
46
 
47
+ class OutputStatsdExtMode(str, Enum, metaclass=utils.OpenEnumMeta):
48
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
49
+
50
+ ERROR = "error"
51
+ BACKPRESSURE = "backpressure"
52
+ ALWAYS = "always"
53
+
54
+
68
55
  class OutputStatsdExtPqControlsTypedDict(TypedDict):
69
56
  pass
70
57
 
@@ -106,16 +93,6 @@ class OutputStatsdExtTypedDict(TypedDict):
106
93
  r"""Amount of time (milliseconds) to wait for a write to complete before assuming connection is dead"""
107
94
  on_backpressure: NotRequired[OutputStatsdExtBackpressureBehavior]
108
95
  r"""How to handle events when all receivers are exerting backpressure"""
109
- pq_strict_ordering: NotRequired[bool]
110
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
111
- pq_rate_per_sec: NotRequired[float]
112
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
113
- pq_mode: NotRequired[OutputStatsdExtMode]
114
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
115
- pq_max_buffer_size: NotRequired[float]
116
- r"""The maximum number of events to hold in memory before writing the events to disk"""
117
- pq_max_backpressure_sec: NotRequired[float]
118
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
119
96
  pq_max_file_size: NotRequired[str]
120
97
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
121
98
  pq_max_size: NotRequired[str]
@@ -126,6 +103,8 @@ class OutputStatsdExtTypedDict(TypedDict):
126
103
  r"""Codec to use to compress the persisted data"""
127
104
  pq_on_backpressure: NotRequired[OutputStatsdExtQueueFullBehavior]
128
105
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
106
+ pq_mode: NotRequired[OutputStatsdExtMode]
107
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
129
108
  pq_controls: NotRequired[OutputStatsdExtPqControlsTypedDict]
130
109
 
131
110
 
@@ -200,34 +179,6 @@ class OutputStatsdExt(BaseModel):
200
179
  ] = OutputStatsdExtBackpressureBehavior.BLOCK
201
180
  r"""How to handle events when all receivers are exerting backpressure"""
202
181
 
203
- pq_strict_ordering: Annotated[
204
- Optional[bool], pydantic.Field(alias="pqStrictOrdering")
205
- ] = True
206
- r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
207
-
208
- pq_rate_per_sec: Annotated[
209
- Optional[float], pydantic.Field(alias="pqRatePerSec")
210
- ] = 0
211
- r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
212
-
213
- pq_mode: Annotated[
214
- Annotated[
215
- Optional[OutputStatsdExtMode], PlainValidator(validate_open_enum(False))
216
- ],
217
- pydantic.Field(alias="pqMode"),
218
- ] = OutputStatsdExtMode.ERROR
219
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
220
-
221
- pq_max_buffer_size: Annotated[
222
- Optional[float], pydantic.Field(alias="pqMaxBufferSize")
223
- ] = 42
224
- r"""The maximum number of events to hold in memory before writing the events to disk"""
225
-
226
- pq_max_backpressure_sec: Annotated[
227
- Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
228
- ] = 30
229
- r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
230
-
231
182
  pq_max_file_size: Annotated[
232
183
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
233
184
  ] = "1 MB"
@@ -259,51 +210,14 @@ class OutputStatsdExt(BaseModel):
259
210
  ] = OutputStatsdExtQueueFullBehavior.BLOCK
260
211
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
261
212
 
213
+ pq_mode: Annotated[
214
+ Annotated[
215
+ Optional[OutputStatsdExtMode], PlainValidator(validate_open_enum(False))
216
+ ],
217
+ pydantic.Field(alias="pqMode"),
218
+ ] = OutputStatsdExtMode.ERROR
219
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
220
+
262
221
  pq_controls: Annotated[
263
222
  Optional[OutputStatsdExtPqControls], pydantic.Field(alias="pqControls")
264
223
  ] = None
265
-
266
- @field_serializer("protocol")
267
- def serialize_protocol(self, value):
268
- if isinstance(value, str):
269
- try:
270
- return models.OutputStatsdExtDestinationProtocol(value)
271
- except ValueError:
272
- return value
273
- return value
274
-
275
- @field_serializer("on_backpressure")
276
- def serialize_on_backpressure(self, value):
277
- if isinstance(value, str):
278
- try:
279
- return models.OutputStatsdExtBackpressureBehavior(value)
280
- except ValueError:
281
- return value
282
- return value
283
-
284
- @field_serializer("pq_mode")
285
- def serialize_pq_mode(self, value):
286
- if isinstance(value, str):
287
- try:
288
- return models.OutputStatsdExtMode(value)
289
- except ValueError:
290
- return value
291
- return value
292
-
293
- @field_serializer("pq_compress")
294
- def serialize_pq_compress(self, value):
295
- if isinstance(value, str):
296
- try:
297
- return models.OutputStatsdExtCompression(value)
298
- except ValueError:
299
- return value
300
- return value
301
-
302
- @field_serializer("pq_on_backpressure")
303
- def serialize_pq_on_backpressure(self, value):
304
- if isinstance(value, str):
305
- try:
306
- return models.OutputStatsdExtQueueFullBehavior(value)
307
- except ValueError:
308
- return value
309
- return value