cribl-control-plane 0.0.50__py3-none-any.whl → 0.0.50rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (176) hide show
  1. cribl_control_plane/_version.py +3 -5
  2. cribl_control_plane/errors/healthstatus_error.py +8 -2
  3. cribl_control_plane/groups_sdk.py +4 -4
  4. cribl_control_plane/health.py +6 -2
  5. cribl_control_plane/models/__init__.py +56 -31
  6. cribl_control_plane/models/cacheconnection.py +10 -2
  7. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  8. cribl_control_plane/models/cloudprovider.py +2 -1
  9. cribl_control_plane/models/configgroup.py +24 -4
  10. cribl_control_plane/models/configgroupcloud.py +6 -2
  11. cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
  12. cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
  13. cribl_control_plane/models/createversionpushop.py +5 -5
  14. cribl_control_plane/models/cribllakedataset.py +8 -2
  15. cribl_control_plane/models/datasetmetadata.py +8 -2
  16. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
  17. cribl_control_plane/models/error.py +16 -0
  18. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
  19. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
  20. cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
  21. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
  22. cribl_control_plane/models/gethealthinfoop.py +17 -0
  23. cribl_control_plane/models/getsummaryop.py +7 -2
  24. cribl_control_plane/models/getversionshowop.py +6 -5
  25. cribl_control_plane/models/gitinfo.py +14 -3
  26. cribl_control_plane/models/gitshowresult.py +19 -0
  27. cribl_control_plane/models/hbcriblinfo.py +24 -3
  28. cribl_control_plane/models/healthstatus.py +7 -4
  29. cribl_control_plane/models/heartbeatmetadata.py +3 -0
  30. cribl_control_plane/models/input.py +65 -63
  31. cribl_control_plane/models/inputappscope.py +34 -14
  32. cribl_control_plane/models/inputazureblob.py +17 -6
  33. cribl_control_plane/models/inputcollection.py +11 -4
  34. cribl_control_plane/models/inputconfluentcloud.py +41 -32
  35. cribl_control_plane/models/inputcribl.py +11 -4
  36. cribl_control_plane/models/inputcriblhttp.py +23 -8
  37. cribl_control_plane/models/inputcribllakehttp.py +22 -10
  38. cribl_control_plane/models/inputcriblmetrics.py +12 -4
  39. cribl_control_plane/models/inputcribltcp.py +23 -8
  40. cribl_control_plane/models/inputcrowdstrike.py +26 -10
  41. cribl_control_plane/models/inputdatadogagent.py +24 -8
  42. cribl_control_plane/models/inputdatagen.py +11 -4
  43. cribl_control_plane/models/inputedgeprometheus.py +58 -24
  44. cribl_control_plane/models/inputelastic.py +40 -14
  45. cribl_control_plane/models/inputeventhub.py +15 -6
  46. cribl_control_plane/models/inputexec.py +14 -6
  47. cribl_control_plane/models/inputfile.py +15 -6
  48. cribl_control_plane/models/inputfirehose.py +23 -8
  49. cribl_control_plane/models/inputgooglepubsub.py +19 -6
  50. cribl_control_plane/models/inputgrafana.py +67 -24
  51. cribl_control_plane/models/inputhttp.py +23 -8
  52. cribl_control_plane/models/inputhttpraw.py +23 -8
  53. cribl_control_plane/models/inputjournalfiles.py +12 -4
  54. cribl_control_plane/models/inputkafka.py +41 -28
  55. cribl_control_plane/models/inputkinesis.py +38 -14
  56. cribl_control_plane/models/inputkubeevents.py +11 -4
  57. cribl_control_plane/models/inputkubelogs.py +16 -8
  58. cribl_control_plane/models/inputkubemetrics.py +16 -8
  59. cribl_control_plane/models/inputloki.py +29 -10
  60. cribl_control_plane/models/inputmetrics.py +23 -8
  61. cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
  62. cribl_control_plane/models/inputmsk.py +48 -30
  63. cribl_control_plane/models/inputnetflow.py +11 -4
  64. cribl_control_plane/models/inputoffice365mgmt.py +33 -14
  65. cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
  66. cribl_control_plane/models/inputoffice365service.py +35 -16
  67. cribl_control_plane/models/inputopentelemetry.py +38 -16
  68. cribl_control_plane/models/inputprometheus.py +50 -18
  69. cribl_control_plane/models/inputprometheusrw.py +30 -10
  70. cribl_control_plane/models/inputrawudp.py +11 -4
  71. cribl_control_plane/models/inputs3.py +21 -8
  72. cribl_control_plane/models/inputs3inventory.py +26 -10
  73. cribl_control_plane/models/inputsecuritylake.py +27 -10
  74. cribl_control_plane/models/inputsnmp.py +16 -6
  75. cribl_control_plane/models/inputsplunk.py +33 -12
  76. cribl_control_plane/models/inputsplunkhec.py +29 -10
  77. cribl_control_plane/models/inputsplunksearch.py +33 -14
  78. cribl_control_plane/models/inputsqs.py +27 -10
  79. cribl_control_plane/models/inputsyslog.py +43 -16
  80. cribl_control_plane/models/inputsystemmetrics.py +48 -24
  81. cribl_control_plane/models/inputsystemstate.py +16 -8
  82. cribl_control_plane/models/inputtcp.py +29 -10
  83. cribl_control_plane/models/inputtcpjson.py +29 -10
  84. cribl_control_plane/models/inputwef.py +37 -14
  85. cribl_control_plane/models/inputwindowsmetrics.py +44 -24
  86. cribl_control_plane/models/inputwineventlogs.py +20 -10
  87. cribl_control_plane/models/inputwiz.py +21 -8
  88. cribl_control_plane/models/inputwizwebhook.py +23 -8
  89. cribl_control_plane/models/inputzscalerhec.py +29 -10
  90. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  91. cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
  92. cribl_control_plane/models/masterworkerentry.py +7 -2
  93. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  94. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  95. cribl_control_plane/models/nodeprovidedinfo.py +3 -0
  96. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  97. cribl_control_plane/models/nodeupgradestate.py +2 -1
  98. cribl_control_plane/models/nodeupgradestatus.py +13 -5
  99. cribl_control_plane/models/output.py +84 -79
  100. cribl_control_plane/models/outputazureblob.py +48 -18
  101. cribl_control_plane/models/outputazuredataexplorer.py +73 -28
  102. cribl_control_plane/models/outputazureeventhub.py +40 -18
  103. cribl_control_plane/models/outputazurelogs.py +35 -12
  104. cribl_control_plane/models/outputclickhouse.py +55 -20
  105. cribl_control_plane/models/outputcloudwatch.py +29 -10
  106. cribl_control_plane/models/outputconfluentcloud.py +71 -44
  107. cribl_control_plane/models/outputcriblhttp.py +44 -16
  108. cribl_control_plane/models/outputcribllake.py +46 -16
  109. cribl_control_plane/models/outputcribltcp.py +45 -18
  110. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
  111. cribl_control_plane/models/outputdatabricks.py +282 -0
  112. cribl_control_plane/models/outputdatadog.py +48 -20
  113. cribl_control_plane/models/outputdataset.py +46 -18
  114. cribl_control_plane/models/outputdiskspool.py +7 -2
  115. cribl_control_plane/models/outputdls3.py +68 -24
  116. cribl_control_plane/models/outputdynatracehttp.py +53 -20
  117. cribl_control_plane/models/outputdynatraceotlp.py +55 -22
  118. cribl_control_plane/models/outputelastic.py +43 -18
  119. cribl_control_plane/models/outputelasticcloud.py +36 -12
  120. cribl_control_plane/models/outputexabeam.py +29 -10
  121. cribl_control_plane/models/outputfilesystem.py +39 -14
  122. cribl_control_plane/models/outputgooglechronicle.py +50 -16
  123. cribl_control_plane/models/outputgooglecloudlogging.py +50 -18
  124. cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
  125. cribl_control_plane/models/outputgooglepubsub.py +31 -10
  126. cribl_control_plane/models/outputgrafanacloud.py +97 -32
  127. cribl_control_plane/models/outputgraphite.py +31 -14
  128. cribl_control_plane/models/outputhoneycomb.py +35 -12
  129. cribl_control_plane/models/outputhumiohec.py +43 -16
  130. cribl_control_plane/models/outputinfluxdb.py +42 -16
  131. cribl_control_plane/models/outputkafka.py +69 -40
  132. cribl_control_plane/models/outputkinesis.py +40 -16
  133. cribl_control_plane/models/outputloki.py +41 -16
  134. cribl_control_plane/models/outputminio.py +65 -24
  135. cribl_control_plane/models/outputmsk.py +77 -42
  136. cribl_control_plane/models/outputnewrelic.py +43 -18
  137. cribl_control_plane/models/outputnewrelicevents.py +41 -14
  138. cribl_control_plane/models/outputopentelemetry.py +67 -26
  139. cribl_control_plane/models/outputprometheus.py +35 -12
  140. cribl_control_plane/models/outputring.py +19 -8
  141. cribl_control_plane/models/outputs3.py +68 -26
  142. cribl_control_plane/models/outputsecuritylake.py +52 -18
  143. cribl_control_plane/models/outputsentinel.py +45 -18
  144. cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
  145. cribl_control_plane/models/outputservicenow.py +60 -24
  146. cribl_control_plane/models/outputsignalfx.py +37 -14
  147. cribl_control_plane/models/outputsns.py +36 -14
  148. cribl_control_plane/models/outputsplunk.py +60 -24
  149. cribl_control_plane/models/outputsplunkhec.py +35 -12
  150. cribl_control_plane/models/outputsplunklb.py +77 -30
  151. cribl_control_plane/models/outputsqs.py +41 -16
  152. cribl_control_plane/models/outputstatsd.py +30 -14
  153. cribl_control_plane/models/outputstatsdext.py +29 -12
  154. cribl_control_plane/models/outputsumologic.py +35 -12
  155. cribl_control_plane/models/outputsyslog.py +58 -24
  156. cribl_control_plane/models/outputtcpjson.py +52 -20
  157. cribl_control_plane/models/outputwavefront.py +35 -12
  158. cribl_control_plane/models/outputwebhook.py +58 -22
  159. cribl_control_plane/models/outputxsiam.py +35 -14
  160. cribl_control_plane/models/productscore.py +2 -1
  161. cribl_control_plane/models/rbacresource.py +2 -1
  162. cribl_control_plane/models/resourcepolicy.py +4 -2
  163. cribl_control_plane/models/routeconf.py +3 -4
  164. cribl_control_plane/models/runnablejobcollection.py +30 -13
  165. cribl_control_plane/models/runnablejobexecutor.py +13 -4
  166. cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
  167. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
  168. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
  169. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
  170. cribl_control_plane/models/workertypes.py +2 -1
  171. {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/METADATA +1 -1
  172. cribl_control_plane-0.0.50rc2.dist-info/RECORD +327 -0
  173. cribl_control_plane/models/appmode.py +0 -13
  174. cribl_control_plane/models/routecloneconf.py +0 -13
  175. cribl_control_plane-0.0.50.dist-info/RECORD +0 -325
  176. {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/WHEEL +0 -0
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputKafkaType(str, Enum):
12
15
  KAFKA = "kafka"
13
16
 
14
17
 
15
- class OutputKafkaAcknowledgments(int, Enum):
18
+ class OutputKafkaAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Control the number of required acknowledgments."""
17
20
 
18
21
  ONE = 1
@@ -20,7 +23,7 @@ class OutputKafkaAcknowledgments(int, Enum):
20
23
  MINUS_1 = -1
21
24
 
22
25
 
23
- class OutputKafkaRecordDataFormat(str, Enum):
26
+ class OutputKafkaRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""Format to use to serialize events before writing to Kafka."""
25
28
 
26
29
  JSON = "json"
@@ -28,7 +31,7 @@ class OutputKafkaRecordDataFormat(str, Enum):
28
31
  PROTOBUF = "protobuf"
29
32
 
30
33
 
31
- class OutputKafkaCompression(str, Enum):
34
+ class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
32
35
  r"""Codec to use to compress the data before sending to Kafka"""
33
36
 
34
37
  NONE = "none"
@@ -37,13 +40,6 @@ class OutputKafkaCompression(str, Enum):
37
40
  LZ4 = "lz4"
38
41
 
39
42
 
40
- class OutputKafkaSchemaType(str, Enum):
41
- r"""The schema format used to encode and decode event data"""
42
-
43
- AVRO = "avro"
44
- JSON = "json"
45
-
46
-
47
43
  class OutputKafkaAuthTypedDict(TypedDict):
48
44
  r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
49
45
 
@@ -63,14 +59,18 @@ class OutputKafkaAuth(BaseModel):
63
59
  r"""Select or create a secret that references your credentials"""
64
60
 
65
61
 
66
- class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
62
+ class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(
63
+ str, Enum, metaclass=utils.OpenEnumMeta
64
+ ):
67
65
  TL_SV1 = "TLSv1"
68
66
  TL_SV1_1 = "TLSv1.1"
69
67
  TL_SV1_2 = "TLSv1.2"
70
68
  TL_SV1_3 = "TLSv1.3"
71
69
 
72
70
 
73
- class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
71
+ class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(
72
+ str, Enum, metaclass=utils.OpenEnumMeta
73
+ ):
74
74
  TL_SV1 = "TLSv1"
75
75
  TL_SV1_1 = "TLSv1.1"
76
76
  TL_SV1_2 = "TLSv1.2"
@@ -130,12 +130,18 @@ class OutputKafkaKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
130
130
  r"""Passphrase to use to decrypt private key"""
131
131
 
132
132
  min_version: Annotated[
133
- Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
133
+ Annotated[
134
+ Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
135
+ PlainValidator(validate_open_enum(False)),
136
+ ],
134
137
  pydantic.Field(alias="minVersion"),
135
138
  ] = None
136
139
 
137
140
  max_version: Annotated[
138
- Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
141
+ Annotated[
142
+ Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
143
+ PlainValidator(validate_open_enum(False)),
144
+ ],
139
145
  pydantic.Field(alias="maxVersion"),
140
146
  ] = None
141
147
 
@@ -144,8 +150,6 @@ class OutputKafkaKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
144
150
  disabled: NotRequired[bool]
145
151
  schema_registry_url: NotRequired[str]
146
152
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
147
- schema_type: NotRequired[OutputKafkaSchemaType]
148
- r"""The schema format used to encode and decode event data"""
149
153
  connection_timeout: NotRequired[float]
150
154
  r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
151
155
  request_timeout: NotRequired[float]
@@ -169,11 +173,6 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
169
173
  ] = "http://localhost:8081"
170
174
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
171
175
 
172
- schema_type: Annotated[
173
- Optional[OutputKafkaSchemaType], pydantic.Field(alias="schemaType")
174
- ] = OutputKafkaSchemaType.AVRO
175
- r"""The schema format used to encode and decode event data"""
176
-
177
176
  connection_timeout: Annotated[
178
177
  Optional[float], pydantic.Field(alias="connectionTimeout")
179
178
  ] = 30000
@@ -203,7 +202,7 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
203
202
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
204
203
 
205
204
 
206
- class OutputKafkaSASLMechanism(str, Enum):
205
+ class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
207
206
  PLAIN = "plain"
208
207
  SCRAM_SHA_256 = "scram-sha-256"
209
208
  SCRAM_SHA_512 = "scram-sha-512"
@@ -224,7 +223,9 @@ class OutputKafkaAuthentication(BaseModel):
224
223
 
225
224
  disabled: Optional[bool] = True
226
225
 
227
- mechanism: Optional[OutputKafkaSASLMechanism] = OutputKafkaSASLMechanism.PLAIN
226
+ mechanism: Annotated[
227
+ Optional[OutputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
228
+ ] = OutputKafkaSASLMechanism.PLAIN
228
229
 
229
230
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
230
231
  False
@@ -232,14 +233,14 @@ class OutputKafkaAuthentication(BaseModel):
232
233
  r"""Enable OAuth authentication"""
233
234
 
234
235
 
235
- class OutputKafkaMinimumTLSVersion(str, Enum):
236
+ class OutputKafkaMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
236
237
  TL_SV1 = "TLSv1"
237
238
  TL_SV1_1 = "TLSv1.1"
238
239
  TL_SV1_2 = "TLSv1.2"
239
240
  TL_SV1_3 = "TLSv1.3"
240
241
 
241
242
 
242
- class OutputKafkaMaximumTLSVersion(str, Enum):
243
+ class OutputKafkaMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
243
244
  TL_SV1 = "TLSv1"
244
245
  TL_SV1_1 = "TLSv1.1"
245
246
  TL_SV1_2 = "TLSv1.2"
@@ -299,15 +300,23 @@ class OutputKafkaTLSSettingsClientSide(BaseModel):
299
300
  r"""Passphrase to use to decrypt private key"""
300
301
 
301
302
  min_version: Annotated[
302
- Optional[OutputKafkaMinimumTLSVersion], pydantic.Field(alias="minVersion")
303
+ Annotated[
304
+ Optional[OutputKafkaMinimumTLSVersion],
305
+ PlainValidator(validate_open_enum(False)),
306
+ ],
307
+ pydantic.Field(alias="minVersion"),
303
308
  ] = None
304
309
 
305
310
  max_version: Annotated[
306
- Optional[OutputKafkaMaximumTLSVersion], pydantic.Field(alias="maxVersion")
311
+ Annotated[
312
+ Optional[OutputKafkaMaximumTLSVersion],
313
+ PlainValidator(validate_open_enum(False)),
314
+ ],
315
+ pydantic.Field(alias="maxVersion"),
307
316
  ] = None
308
317
 
309
318
 
310
- class OutputKafkaBackpressureBehavior(str, Enum):
319
+ class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
311
320
  r"""How to handle events when all receivers are exerting backpressure"""
312
321
 
313
322
  BLOCK = "block"
@@ -315,21 +324,21 @@ class OutputKafkaBackpressureBehavior(str, Enum):
315
324
  QUEUE = "queue"
316
325
 
317
326
 
318
- class OutputKafkaPqCompressCompression(str, Enum):
327
+ class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
319
328
  r"""Codec to use to compress the persisted data"""
320
329
 
321
330
  NONE = "none"
322
331
  GZIP = "gzip"
323
332
 
324
333
 
325
- class OutputKafkaQueueFullBehavior(str, Enum):
334
+ class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
326
335
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
327
336
 
328
337
  BLOCK = "block"
329
338
  DROP = "drop"
330
339
 
331
340
 
332
- class OutputKafkaMode(str, Enum):
341
+ class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
333
342
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
334
343
 
335
344
  ERROR = "error"
@@ -441,15 +450,23 @@ class OutputKafka(BaseModel):
441
450
  streamtags: Optional[List[str]] = None
442
451
  r"""Tags for filtering and grouping in @{product}"""
443
452
 
444
- ack: Optional[OutputKafkaAcknowledgments] = OutputKafkaAcknowledgments.ONE
453
+ ack: Annotated[
454
+ Optional[OutputKafkaAcknowledgments], PlainValidator(validate_open_enum(True))
455
+ ] = OutputKafkaAcknowledgments.ONE
445
456
  r"""Control the number of required acknowledgments."""
446
457
 
447
458
  format_: Annotated[
448
- Optional[OutputKafkaRecordDataFormat], pydantic.Field(alias="format")
459
+ Annotated[
460
+ Optional[OutputKafkaRecordDataFormat],
461
+ PlainValidator(validate_open_enum(False)),
462
+ ],
463
+ pydantic.Field(alias="format"),
449
464
  ] = OutputKafkaRecordDataFormat.JSON
450
465
  r"""Format to use to serialize events before writing to Kafka."""
451
466
 
452
- compression: Optional[OutputKafkaCompression] = OutputKafkaCompression.GZIP
467
+ compression: Annotated[
468
+ Optional[OutputKafkaCompression], PlainValidator(validate_open_enum(False))
469
+ ] = OutputKafkaCompression.GZIP
453
470
  r"""Codec to use to compress the data before sending to Kafka"""
454
471
 
455
472
  max_record_size_kb: Annotated[
@@ -512,7 +529,10 @@ class OutputKafka(BaseModel):
512
529
  tls: Optional[OutputKafkaTLSSettingsClientSide] = None
513
530
 
514
531
  on_backpressure: Annotated[
515
- Optional[OutputKafkaBackpressureBehavior],
532
+ Annotated[
533
+ Optional[OutputKafkaBackpressureBehavior],
534
+ PlainValidator(validate_open_enum(False)),
535
+ ],
516
536
  pydantic.Field(alias="onBackpressure"),
517
537
  ] = OutputKafkaBackpressureBehavior.BLOCK
518
538
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -538,18 +558,27 @@ class OutputKafka(BaseModel):
538
558
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
539
559
 
540
560
  pq_compress: Annotated[
541
- Optional[OutputKafkaPqCompressCompression], pydantic.Field(alias="pqCompress")
561
+ Annotated[
562
+ Optional[OutputKafkaPqCompressCompression],
563
+ PlainValidator(validate_open_enum(False)),
564
+ ],
565
+ pydantic.Field(alias="pqCompress"),
542
566
  ] = OutputKafkaPqCompressCompression.NONE
543
567
  r"""Codec to use to compress the persisted data"""
544
568
 
545
569
  pq_on_backpressure: Annotated[
546
- Optional[OutputKafkaQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
570
+ Annotated[
571
+ Optional[OutputKafkaQueueFullBehavior],
572
+ PlainValidator(validate_open_enum(False)),
573
+ ],
574
+ pydantic.Field(alias="pqOnBackpressure"),
547
575
  ] = OutputKafkaQueueFullBehavior.BLOCK
548
576
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
549
577
 
550
- pq_mode: Annotated[Optional[OutputKafkaMode], pydantic.Field(alias="pqMode")] = (
551
- OutputKafkaMode.ERROR
552
- )
578
+ pq_mode: Annotated[
579
+ Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
580
+ pydantic.Field(alias="pqMode"),
581
+ ] = OutputKafkaMode.ERROR
553
582
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
554
583
 
555
584
  pq_controls: Annotated[
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputKinesisType(str, Enum):
12
15
  KINESIS = "kinesis"
13
16
 
14
17
 
15
- class OutputKinesisAuthenticationMethod(str, Enum):
18
+ class OutputKinesisAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
17
20
 
18
21
  AUTO = "auto"
@@ -20,21 +23,21 @@ class OutputKinesisAuthenticationMethod(str, Enum):
20
23
  SECRET = "secret"
21
24
 
22
25
 
23
- class OutputKinesisSignatureVersion(str, Enum):
26
+ class OutputKinesisSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""Signature version to use for signing Kinesis stream requests"""
25
28
 
26
29
  V2 = "v2"
27
30
  V4 = "v4"
28
31
 
29
32
 
30
- class OutputKinesisCompression(str, Enum):
33
+ class OutputKinesisCompression(str, Enum, metaclass=utils.OpenEnumMeta):
31
34
  r"""Compression type to use for records"""
32
35
 
33
36
  NONE = "none"
34
37
  GZIP = "gzip"
35
38
 
36
39
 
37
- class OutputKinesisBackpressureBehavior(str, Enum):
40
+ class OutputKinesisBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
38
41
  r"""How to handle events when all receivers are exerting backpressure"""
39
42
 
40
43
  BLOCK = "block"
@@ -42,21 +45,21 @@ class OutputKinesisBackpressureBehavior(str, Enum):
42
45
  QUEUE = "queue"
43
46
 
44
47
 
45
- class OutputKinesisPqCompressCompression(str, Enum):
48
+ class OutputKinesisPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
46
49
  r"""Codec to use to compress the persisted data"""
47
50
 
48
51
  NONE = "none"
49
52
  GZIP = "gzip"
50
53
 
51
54
 
52
- class OutputKinesisQueueFullBehavior(str, Enum):
55
+ class OutputKinesisQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
53
56
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
54
57
 
55
58
  BLOCK = "block"
56
59
  DROP = "drop"
57
60
 
58
61
 
59
- class OutputKinesisMode(str, Enum):
62
+ class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
60
63
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
61
64
 
62
65
  ERROR = "error"
@@ -167,7 +170,10 @@ class OutputKinesis(BaseModel):
167
170
  r"""Tags for filtering and grouping in @{product}"""
168
171
 
169
172
  aws_authentication_method: Annotated[
170
- Optional[OutputKinesisAuthenticationMethod],
173
+ Annotated[
174
+ Optional[OutputKinesisAuthenticationMethod],
175
+ PlainValidator(validate_open_enum(False)),
176
+ ],
171
177
  pydantic.Field(alias="awsAuthenticationMethod"),
172
178
  ] = OutputKinesisAuthenticationMethod.AUTO
173
179
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -180,7 +186,10 @@ class OutputKinesis(BaseModel):
180
186
  r"""Kinesis stream service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to Kinesis stream-compatible endpoint."""
181
187
 
182
188
  signature_version: Annotated[
183
- Optional[OutputKinesisSignatureVersion],
189
+ Annotated[
190
+ Optional[OutputKinesisSignatureVersion],
191
+ PlainValidator(validate_open_enum(False)),
192
+ ],
184
193
  pydantic.Field(alias="signatureVersion"),
185
194
  ] = OutputKinesisSignatureVersion.V4
186
195
  r"""Signature version to use for signing Kinesis stream requests"""
@@ -228,7 +237,9 @@ class OutputKinesis(BaseModel):
228
237
  ] = 1
229
238
  r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
230
239
 
231
- compression: Optional[OutputKinesisCompression] = OutputKinesisCompression.GZIP
240
+ compression: Annotated[
241
+ Optional[OutputKinesisCompression], PlainValidator(validate_open_enum(False))
242
+ ] = OutputKinesisCompression.GZIP
232
243
  r"""Compression type to use for records"""
233
244
 
234
245
  use_list_shards: Annotated[
@@ -240,7 +251,10 @@ class OutputKinesis(BaseModel):
240
251
  r"""Batch events into a single record as NDJSON"""
241
252
 
242
253
  on_backpressure: Annotated[
243
- Optional[OutputKinesisBackpressureBehavior],
254
+ Annotated[
255
+ Optional[OutputKinesisBackpressureBehavior],
256
+ PlainValidator(validate_open_enum(False)),
257
+ ],
244
258
  pydantic.Field(alias="onBackpressure"),
245
259
  ] = OutputKinesisBackpressureBehavior.BLOCK
246
260
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -266,19 +280,29 @@ class OutputKinesis(BaseModel):
266
280
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
267
281
 
268
282
  pq_compress: Annotated[
269
- Optional[OutputKinesisPqCompressCompression], pydantic.Field(alias="pqCompress")
283
+ Annotated[
284
+ Optional[OutputKinesisPqCompressCompression],
285
+ PlainValidator(validate_open_enum(False)),
286
+ ],
287
+ pydantic.Field(alias="pqCompress"),
270
288
  ] = OutputKinesisPqCompressCompression.NONE
271
289
  r"""Codec to use to compress the persisted data"""
272
290
 
273
291
  pq_on_backpressure: Annotated[
274
- Optional[OutputKinesisQueueFullBehavior],
292
+ Annotated[
293
+ Optional[OutputKinesisQueueFullBehavior],
294
+ PlainValidator(validate_open_enum(False)),
295
+ ],
275
296
  pydantic.Field(alias="pqOnBackpressure"),
276
297
  ] = OutputKinesisQueueFullBehavior.BLOCK
277
298
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
278
299
 
279
- pq_mode: Annotated[Optional[OutputKinesisMode], pydantic.Field(alias="pqMode")] = (
280
- OutputKinesisMode.ERROR
281
- )
300
+ pq_mode: Annotated[
301
+ Annotated[
302
+ Optional[OutputKinesisMode], PlainValidator(validate_open_enum(False))
303
+ ],
304
+ pydantic.Field(alias="pqMode"),
305
+ ] = OutputKinesisMode.ERROR
282
306
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
283
307
 
284
308
  pq_controls: Annotated[
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputLokiType(str, Enum):
12
15
  LOKI = "loki"
13
16
 
14
17
 
15
- class OutputLokiMessageFormat(str, Enum):
18
+ class OutputLokiMessageFormat(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Format to use when sending logs to Loki (Protobuf or JSON)"""
17
20
 
18
21
  PROTOBUF = "protobuf"
@@ -30,7 +33,7 @@ class OutputLokiLabel(BaseModel):
30
33
  name: Optional[str] = ""
31
34
 
32
35
 
33
- class OutputLokiAuthenticationType(str, Enum):
36
+ class OutputLokiAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
34
37
  NONE = "none"
35
38
  TOKEN = "token"
36
39
  TEXT_SECRET = "textSecret"
@@ -49,7 +52,7 @@ class OutputLokiExtraHTTPHeader(BaseModel):
49
52
  name: Optional[str] = None
50
53
 
51
54
 
52
- class OutputLokiFailedRequestLoggingMode(str, Enum):
55
+ class OutputLokiFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
53
56
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
54
57
 
55
58
  PAYLOAD = "payload"
@@ -111,7 +114,7 @@ class OutputLokiTimeoutRetrySettings(BaseModel):
111
114
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
112
115
 
113
116
 
114
- class OutputLokiBackpressureBehavior(str, Enum):
117
+ class OutputLokiBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
115
118
  r"""How to handle events when all receivers are exerting backpressure"""
116
119
 
117
120
  BLOCK = "block"
@@ -119,21 +122,21 @@ class OutputLokiBackpressureBehavior(str, Enum):
119
122
  QUEUE = "queue"
120
123
 
121
124
 
122
- class OutputLokiCompression(str, Enum):
125
+ class OutputLokiCompression(str, Enum, metaclass=utils.OpenEnumMeta):
123
126
  r"""Codec to use to compress the persisted data"""
124
127
 
125
128
  NONE = "none"
126
129
  GZIP = "gzip"
127
130
 
128
131
 
129
- class OutputLokiQueueFullBehavior(str, Enum):
132
+ class OutputLokiQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
130
133
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
131
134
 
132
135
  BLOCK = "block"
133
136
  DROP = "drop"
134
137
 
135
138
 
136
- class OutputLokiMode(str, Enum):
139
+ class OutputLokiMode(str, Enum, metaclass=utils.OpenEnumMeta):
137
140
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
138
141
 
139
142
  ERROR = "error"
@@ -259,7 +262,10 @@ class OutputLoki(BaseModel):
259
262
  r"""Name of the event field that contains the message to send. If not specified, Stream sends a JSON representation of the whole event."""
260
263
 
261
264
  message_format: Annotated[
262
- Optional[OutputLokiMessageFormat], pydantic.Field(alias="messageFormat")
265
+ Annotated[
266
+ Optional[OutputLokiMessageFormat], PlainValidator(validate_open_enum(False))
267
+ ],
268
+ pydantic.Field(alias="messageFormat"),
263
269
  ] = OutputLokiMessageFormat.PROTOBUF
264
270
  r"""Format to use when sending logs to Loki (Protobuf or JSON)"""
265
271
 
@@ -267,7 +273,11 @@ class OutputLoki(BaseModel):
267
273
  r"""List of labels to send with logs. Labels define Loki streams, so use static labels to avoid proliferating label value combinations and streams. Can be merged and/or overridden by the event's __labels field. Example: '__labels: {host: \"cribl.io\", level: \"error\"}'"""
268
274
 
269
275
  auth_type: Annotated[
270
- Optional[OutputLokiAuthenticationType], pydantic.Field(alias="authType")
276
+ Annotated[
277
+ Optional[OutputLokiAuthenticationType],
278
+ PlainValidator(validate_open_enum(False)),
279
+ ],
280
+ pydantic.Field(alias="authType"),
271
281
  ] = OutputLokiAuthenticationType.NONE
272
282
 
273
283
  concurrency: Optional[float] = 1
@@ -311,7 +321,10 @@ class OutputLoki(BaseModel):
311
321
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
312
322
 
313
323
  failed_request_logging_mode: Annotated[
314
- Optional[OutputLokiFailedRequestLoggingMode],
324
+ Annotated[
325
+ Optional[OutputLokiFailedRequestLoggingMode],
326
+ PlainValidator(validate_open_enum(False)),
327
+ ],
315
328
  pydantic.Field(alias="failedRequestLoggingMode"),
316
329
  ] = OutputLokiFailedRequestLoggingMode.NONE
317
330
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -343,7 +356,11 @@ class OutputLoki(BaseModel):
343
356
  r"""Add per-event HTTP headers from the __headers field to outgoing requests. Events with different headers are batched and sent separately."""
344
357
 
345
358
  on_backpressure: Annotated[
346
- Optional[OutputLokiBackpressureBehavior], pydantic.Field(alias="onBackpressure")
359
+ Annotated[
360
+ Optional[OutputLokiBackpressureBehavior],
361
+ PlainValidator(validate_open_enum(False)),
362
+ ],
363
+ pydantic.Field(alias="onBackpressure"),
347
364
  ] = OutputLokiBackpressureBehavior.BLOCK
348
365
  r"""How to handle events when all receivers are exerting backpressure"""
349
366
 
@@ -388,18 +405,26 @@ class OutputLoki(BaseModel):
388
405
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
389
406
 
390
407
  pq_compress: Annotated[
391
- Optional[OutputLokiCompression], pydantic.Field(alias="pqCompress")
408
+ Annotated[
409
+ Optional[OutputLokiCompression], PlainValidator(validate_open_enum(False))
410
+ ],
411
+ pydantic.Field(alias="pqCompress"),
392
412
  ] = OutputLokiCompression.NONE
393
413
  r"""Codec to use to compress the persisted data"""
394
414
 
395
415
  pq_on_backpressure: Annotated[
396
- Optional[OutputLokiQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
416
+ Annotated[
417
+ Optional[OutputLokiQueueFullBehavior],
418
+ PlainValidator(validate_open_enum(False)),
419
+ ],
420
+ pydantic.Field(alias="pqOnBackpressure"),
397
421
  ] = OutputLokiQueueFullBehavior.BLOCK
398
422
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
399
423
 
400
- pq_mode: Annotated[Optional[OutputLokiMode], pydantic.Field(alias="pqMode")] = (
401
- OutputLokiMode.ERROR
402
- )
424
+ pq_mode: Annotated[
425
+ Annotated[Optional[OutputLokiMode], PlainValidator(validate_open_enum(False))],
426
+ pydantic.Field(alias="pqMode"),
427
+ ] = OutputLokiMode.ERROR
403
428
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
404
429
 
405
430
  pq_controls: Annotated[