cribl-control-plane 0.0.49__py3-none-any.whl → 0.1.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (173) hide show
  1. cribl_control_plane/_version.py +4 -6
  2. cribl_control_plane/errors/healthstatus_error.py +8 -2
  3. cribl_control_plane/health.py +6 -2
  4. cribl_control_plane/models/__init__.py +68 -30
  5. cribl_control_plane/models/cacheconnection.py +10 -2
  6. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  7. cribl_control_plane/models/cloudprovider.py +2 -1
  8. cribl_control_plane/models/configgroup.py +7 -2
  9. cribl_control_plane/models/configgroupcloud.py +6 -2
  10. cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
  11. cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
  12. cribl_control_plane/models/createversionpushop.py +5 -5
  13. cribl_control_plane/models/cribllakedataset.py +8 -2
  14. cribl_control_plane/models/datasetmetadata.py +8 -2
  15. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
  16. cribl_control_plane/models/error.py +16 -0
  17. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
  18. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
  19. cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
  20. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
  21. cribl_control_plane/models/gethealthinfoop.py +17 -0
  22. cribl_control_plane/models/getsummaryop.py +7 -2
  23. cribl_control_plane/models/getversionshowop.py +6 -5
  24. cribl_control_plane/models/gitshowresult.py +19 -0
  25. cribl_control_plane/models/hbcriblinfo.py +24 -3
  26. cribl_control_plane/models/healthstatus.py +7 -4
  27. cribl_control_plane/models/heartbeatmetadata.py +3 -0
  28. cribl_control_plane/models/inputappscope.py +34 -14
  29. cribl_control_plane/models/inputazureblob.py +17 -6
  30. cribl_control_plane/models/inputcollection.py +11 -4
  31. cribl_control_plane/models/inputconfluentcloud.py +41 -32
  32. cribl_control_plane/models/inputcribl.py +11 -4
  33. cribl_control_plane/models/inputcriblhttp.py +23 -8
  34. cribl_control_plane/models/inputcribllakehttp.py +22 -10
  35. cribl_control_plane/models/inputcriblmetrics.py +12 -4
  36. cribl_control_plane/models/inputcribltcp.py +23 -8
  37. cribl_control_plane/models/inputcrowdstrike.py +26 -10
  38. cribl_control_plane/models/inputdatadogagent.py +24 -8
  39. cribl_control_plane/models/inputdatagen.py +11 -4
  40. cribl_control_plane/models/inputedgeprometheus.py +58 -24
  41. cribl_control_plane/models/inputelastic.py +40 -14
  42. cribl_control_plane/models/inputeventhub.py +15 -6
  43. cribl_control_plane/models/inputexec.py +14 -6
  44. cribl_control_plane/models/inputfile.py +15 -6
  45. cribl_control_plane/models/inputfirehose.py +23 -8
  46. cribl_control_plane/models/inputgooglepubsub.py +19 -6
  47. cribl_control_plane/models/inputgrafana.py +67 -24
  48. cribl_control_plane/models/inputhttp.py +23 -8
  49. cribl_control_plane/models/inputhttpraw.py +23 -8
  50. cribl_control_plane/models/inputjournalfiles.py +12 -4
  51. cribl_control_plane/models/inputkafka.py +41 -28
  52. cribl_control_plane/models/inputkinesis.py +38 -14
  53. cribl_control_plane/models/inputkubeevents.py +11 -4
  54. cribl_control_plane/models/inputkubelogs.py +16 -8
  55. cribl_control_plane/models/inputkubemetrics.py +16 -8
  56. cribl_control_plane/models/inputloki.py +29 -10
  57. cribl_control_plane/models/inputmetrics.py +23 -8
  58. cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
  59. cribl_control_plane/models/inputmsk.py +48 -30
  60. cribl_control_plane/models/inputnetflow.py +11 -4
  61. cribl_control_plane/models/inputoffice365mgmt.py +33 -14
  62. cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
  63. cribl_control_plane/models/inputoffice365service.py +35 -16
  64. cribl_control_plane/models/inputopentelemetry.py +38 -16
  65. cribl_control_plane/models/inputprometheus.py +50 -18
  66. cribl_control_plane/models/inputprometheusrw.py +30 -10
  67. cribl_control_plane/models/inputrawudp.py +11 -4
  68. cribl_control_plane/models/inputs3.py +21 -8
  69. cribl_control_plane/models/inputs3inventory.py +26 -10
  70. cribl_control_plane/models/inputsecuritylake.py +27 -10
  71. cribl_control_plane/models/inputsnmp.py +16 -6
  72. cribl_control_plane/models/inputsplunk.py +33 -12
  73. cribl_control_plane/models/inputsplunkhec.py +29 -10
  74. cribl_control_plane/models/inputsplunksearch.py +33 -14
  75. cribl_control_plane/models/inputsqs.py +27 -10
  76. cribl_control_plane/models/inputsyslog.py +43 -16
  77. cribl_control_plane/models/inputsystemmetrics.py +48 -24
  78. cribl_control_plane/models/inputsystemstate.py +16 -8
  79. cribl_control_plane/models/inputtcp.py +29 -10
  80. cribl_control_plane/models/inputtcpjson.py +29 -10
  81. cribl_control_plane/models/inputwef.py +37 -14
  82. cribl_control_plane/models/inputwindowsmetrics.py +44 -24
  83. cribl_control_plane/models/inputwineventlogs.py +20 -10
  84. cribl_control_plane/models/inputwiz.py +21 -8
  85. cribl_control_plane/models/inputwizwebhook.py +23 -8
  86. cribl_control_plane/models/inputzscalerhec.py +29 -10
  87. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  88. cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
  89. cribl_control_plane/models/masterworkerentry.py +7 -2
  90. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  91. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  92. cribl_control_plane/models/nodeprovidedinfo.py +3 -0
  93. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  94. cribl_control_plane/models/nodeupgradestate.py +2 -1
  95. cribl_control_plane/models/nodeupgradestatus.py +13 -5
  96. cribl_control_plane/models/output.py +3 -0
  97. cribl_control_plane/models/outputazureblob.py +48 -18
  98. cribl_control_plane/models/outputazuredataexplorer.py +73 -28
  99. cribl_control_plane/models/outputazureeventhub.py +40 -18
  100. cribl_control_plane/models/outputazurelogs.py +35 -12
  101. cribl_control_plane/models/outputclickhouse.py +55 -20
  102. cribl_control_plane/models/outputcloudwatch.py +29 -10
  103. cribl_control_plane/models/outputconfluentcloud.py +71 -44
  104. cribl_control_plane/models/outputcriblhttp.py +44 -16
  105. cribl_control_plane/models/outputcribllake.py +46 -16
  106. cribl_control_plane/models/outputcribltcp.py +45 -18
  107. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
  108. cribl_control_plane/models/outputdatabricks.py +439 -0
  109. cribl_control_plane/models/outputdatadog.py +48 -20
  110. cribl_control_plane/models/outputdataset.py +46 -18
  111. cribl_control_plane/models/outputdiskspool.py +7 -2
  112. cribl_control_plane/models/outputdls3.py +68 -24
  113. cribl_control_plane/models/outputdynatracehttp.py +53 -20
  114. cribl_control_plane/models/outputdynatraceotlp.py +55 -22
  115. cribl_control_plane/models/outputelastic.py +43 -18
  116. cribl_control_plane/models/outputelasticcloud.py +36 -12
  117. cribl_control_plane/models/outputexabeam.py +29 -10
  118. cribl_control_plane/models/outputfilesystem.py +39 -14
  119. cribl_control_plane/models/outputgooglechronicle.py +50 -16
  120. cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
  121. cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
  122. cribl_control_plane/models/outputgooglepubsub.py +31 -10
  123. cribl_control_plane/models/outputgrafanacloud.py +97 -32
  124. cribl_control_plane/models/outputgraphite.py +31 -14
  125. cribl_control_plane/models/outputhoneycomb.py +35 -12
  126. cribl_control_plane/models/outputhumiohec.py +43 -16
  127. cribl_control_plane/models/outputinfluxdb.py +42 -16
  128. cribl_control_plane/models/outputkafka.py +69 -40
  129. cribl_control_plane/models/outputkinesis.py +40 -16
  130. cribl_control_plane/models/outputloki.py +41 -16
  131. cribl_control_plane/models/outputminio.py +65 -24
  132. cribl_control_plane/models/outputmsk.py +77 -42
  133. cribl_control_plane/models/outputnewrelic.py +43 -18
  134. cribl_control_plane/models/outputnewrelicevents.py +41 -14
  135. cribl_control_plane/models/outputopentelemetry.py +67 -26
  136. cribl_control_plane/models/outputprometheus.py +35 -12
  137. cribl_control_plane/models/outputring.py +19 -8
  138. cribl_control_plane/models/outputs3.py +68 -26
  139. cribl_control_plane/models/outputsecuritylake.py +52 -18
  140. cribl_control_plane/models/outputsentinel.py +45 -18
  141. cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
  142. cribl_control_plane/models/outputservicenow.py +60 -24
  143. cribl_control_plane/models/outputsignalfx.py +37 -14
  144. cribl_control_plane/models/outputsns.py +36 -14
  145. cribl_control_plane/models/outputsplunk.py +60 -24
  146. cribl_control_plane/models/outputsplunkhec.py +35 -12
  147. cribl_control_plane/models/outputsplunklb.py +77 -30
  148. cribl_control_plane/models/outputsqs.py +41 -16
  149. cribl_control_plane/models/outputstatsd.py +30 -14
  150. cribl_control_plane/models/outputstatsdext.py +29 -12
  151. cribl_control_plane/models/outputsumologic.py +35 -12
  152. cribl_control_plane/models/outputsyslog.py +58 -24
  153. cribl_control_plane/models/outputtcpjson.py +52 -20
  154. cribl_control_plane/models/outputwavefront.py +35 -12
  155. cribl_control_plane/models/outputwebhook.py +58 -22
  156. cribl_control_plane/models/outputxsiam.py +35 -14
  157. cribl_control_plane/models/productscore.py +2 -1
  158. cribl_control_plane/models/rbacresource.py +2 -1
  159. cribl_control_plane/models/resourcepolicy.py +4 -2
  160. cribl_control_plane/models/routeconf.py +3 -4
  161. cribl_control_plane/models/runnablejobcollection.py +30 -13
  162. cribl_control_plane/models/runnablejobexecutor.py +13 -4
  163. cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
  164. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
  165. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
  166. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
  167. cribl_control_plane/models/workertypes.py +2 -1
  168. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0b1.dist-info}/METADATA +1 -1
  169. cribl_control_plane-0.1.0b1.dist-info/RECORD +327 -0
  170. cribl_control_plane/models/appmode.py +0 -13
  171. cribl_control_plane/models/routecloneconf.py +0 -13
  172. cribl_control_plane-0.0.49.dist-info/RECORD +0 -325
  173. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0b1.dist-info}/WHEEL +0 -0
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputMskType(str, Enum):
12
15
  MSK = "msk"
13
16
 
14
17
 
15
- class OutputMskAcknowledgments(int, Enum):
18
+ class OutputMskAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Control the number of required acknowledgments."""
17
20
 
18
21
  ONE = 1
@@ -20,7 +23,7 @@ class OutputMskAcknowledgments(int, Enum):
20
23
  MINUS_1 = -1
21
24
 
22
25
 
23
- class OutputMskRecordDataFormat(str, Enum):
26
+ class OutputMskRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""Format to use to serialize events before writing to Kafka."""
25
28
 
26
29
  JSON = "json"
@@ -28,7 +31,7 @@ class OutputMskRecordDataFormat(str, Enum):
28
31
  PROTOBUF = "protobuf"
29
32
 
30
33
 
31
- class OutputMskCompression(str, Enum):
34
+ class OutputMskCompression(str, Enum, metaclass=utils.OpenEnumMeta):
32
35
  r"""Codec to use to compress the data before sending to Kafka"""
33
36
 
34
37
  NONE = "none"
@@ -37,13 +40,6 @@ class OutputMskCompression(str, Enum):
37
40
  LZ4 = "lz4"
38
41
 
39
42
 
40
- class OutputMskSchemaType(str, Enum):
41
- r"""The schema format used to encode and decode event data"""
42
-
43
- AVRO = "avro"
44
- JSON = "json"
45
-
46
-
47
43
  class OutputMskAuthTypedDict(TypedDict):
48
44
  r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
49
45
 
@@ -63,14 +59,18 @@ class OutputMskAuth(BaseModel):
63
59
  r"""Select or create a secret that references your credentials"""
64
60
 
65
61
 
66
- class OutputMskKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
62
+ class OutputMskKafkaSchemaRegistryMinimumTLSVersion(
63
+ str, Enum, metaclass=utils.OpenEnumMeta
64
+ ):
67
65
  TL_SV1 = "TLSv1"
68
66
  TL_SV1_1 = "TLSv1.1"
69
67
  TL_SV1_2 = "TLSv1.2"
70
68
  TL_SV1_3 = "TLSv1.3"
71
69
 
72
70
 
73
- class OutputMskKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
71
+ class OutputMskKafkaSchemaRegistryMaximumTLSVersion(
72
+ str, Enum, metaclass=utils.OpenEnumMeta
73
+ ):
74
74
  TL_SV1 = "TLSv1"
75
75
  TL_SV1_1 = "TLSv1.1"
76
76
  TL_SV1_2 = "TLSv1.2"
@@ -130,12 +130,18 @@ class OutputMskKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
130
130
  r"""Passphrase to use to decrypt private key"""
131
131
 
132
132
  min_version: Annotated[
133
- Optional[OutputMskKafkaSchemaRegistryMinimumTLSVersion],
133
+ Annotated[
134
+ Optional[OutputMskKafkaSchemaRegistryMinimumTLSVersion],
135
+ PlainValidator(validate_open_enum(False)),
136
+ ],
134
137
  pydantic.Field(alias="minVersion"),
135
138
  ] = None
136
139
 
137
140
  max_version: Annotated[
138
- Optional[OutputMskKafkaSchemaRegistryMaximumTLSVersion],
141
+ Annotated[
142
+ Optional[OutputMskKafkaSchemaRegistryMaximumTLSVersion],
143
+ PlainValidator(validate_open_enum(False)),
144
+ ],
139
145
  pydantic.Field(alias="maxVersion"),
140
146
  ] = None
141
147
 
@@ -144,8 +150,6 @@ class OutputMskKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
144
150
  disabled: NotRequired[bool]
145
151
  schema_registry_url: NotRequired[str]
146
152
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
147
- schema_type: NotRequired[OutputMskSchemaType]
148
- r"""The schema format used to encode and decode event data"""
149
153
  connection_timeout: NotRequired[float]
150
154
  r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
151
155
  request_timeout: NotRequired[float]
@@ -169,11 +173,6 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
169
173
  ] = "http://localhost:8081"
170
174
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
171
175
 
172
- schema_type: Annotated[
173
- Optional[OutputMskSchemaType], pydantic.Field(alias="schemaType")
174
- ] = OutputMskSchemaType.AVRO
175
- r"""The schema format used to encode and decode event data"""
176
-
177
176
  connection_timeout: Annotated[
178
177
  Optional[float], pydantic.Field(alias="connectionTimeout")
179
178
  ] = 30000
@@ -203,7 +202,7 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
203
202
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
204
203
 
205
204
 
206
- class OutputMskAuthenticationMethod(str, Enum):
205
+ class OutputMskAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
207
206
  r"""AWS authentication method. Choose Auto to use IAM roles."""
208
207
 
209
208
  AUTO = "auto"
@@ -211,21 +210,21 @@ class OutputMskAuthenticationMethod(str, Enum):
211
210
  SECRET = "secret"
212
211
 
213
212
 
214
- class OutputMskSignatureVersion(str, Enum):
213
+ class OutputMskSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
215
214
  r"""Signature version to use for signing MSK cluster requests"""
216
215
 
217
216
  V2 = "v2"
218
217
  V4 = "v4"
219
218
 
220
219
 
221
- class OutputMskMinimumTLSVersion(str, Enum):
220
+ class OutputMskMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
222
221
  TL_SV1 = "TLSv1"
223
222
  TL_SV1_1 = "TLSv1.1"
224
223
  TL_SV1_2 = "TLSv1.2"
225
224
  TL_SV1_3 = "TLSv1.3"
226
225
 
227
226
 
228
- class OutputMskMaximumTLSVersion(str, Enum):
227
+ class OutputMskMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
229
228
  TL_SV1 = "TLSv1"
230
229
  TL_SV1_1 = "TLSv1.1"
231
230
  TL_SV1_2 = "TLSv1.2"
@@ -285,15 +284,23 @@ class OutputMskTLSSettingsClientSide(BaseModel):
285
284
  r"""Passphrase to use to decrypt private key"""
286
285
 
287
286
  min_version: Annotated[
288
- Optional[OutputMskMinimumTLSVersion], pydantic.Field(alias="minVersion")
287
+ Annotated[
288
+ Optional[OutputMskMinimumTLSVersion],
289
+ PlainValidator(validate_open_enum(False)),
290
+ ],
291
+ pydantic.Field(alias="minVersion"),
289
292
  ] = None
290
293
 
291
294
  max_version: Annotated[
292
- Optional[OutputMskMaximumTLSVersion], pydantic.Field(alias="maxVersion")
295
+ Annotated[
296
+ Optional[OutputMskMaximumTLSVersion],
297
+ PlainValidator(validate_open_enum(False)),
298
+ ],
299
+ pydantic.Field(alias="maxVersion"),
293
300
  ] = None
294
301
 
295
302
 
296
- class OutputMskBackpressureBehavior(str, Enum):
303
+ class OutputMskBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
297
304
  r"""How to handle events when all receivers are exerting backpressure"""
298
305
 
299
306
  BLOCK = "block"
@@ -301,21 +308,21 @@ class OutputMskBackpressureBehavior(str, Enum):
301
308
  QUEUE = "queue"
302
309
 
303
310
 
304
- class OutputMskPqCompressCompression(str, Enum):
311
+ class OutputMskPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
305
312
  r"""Codec to use to compress the persisted data"""
306
313
 
307
314
  NONE = "none"
308
315
  GZIP = "gzip"
309
316
 
310
317
 
311
- class OutputMskQueueFullBehavior(str, Enum):
318
+ class OutputMskQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
312
319
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
313
320
 
314
321
  BLOCK = "block"
315
322
  DROP = "drop"
316
323
 
317
324
 
318
- class OutputMskMode(str, Enum):
325
+ class OutputMskMode(str, Enum, metaclass=utils.OpenEnumMeta):
319
326
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
320
327
 
321
328
  ERROR = "error"
@@ -452,15 +459,23 @@ class OutputMsk(BaseModel):
452
459
  streamtags: Optional[List[str]] = None
453
460
  r"""Tags for filtering and grouping in @{product}"""
454
461
 
455
- ack: Optional[OutputMskAcknowledgments] = OutputMskAcknowledgments.ONE
462
+ ack: Annotated[
463
+ Optional[OutputMskAcknowledgments], PlainValidator(validate_open_enum(True))
464
+ ] = OutputMskAcknowledgments.ONE
456
465
  r"""Control the number of required acknowledgments."""
457
466
 
458
467
  format_: Annotated[
459
- Optional[OutputMskRecordDataFormat], pydantic.Field(alias="format")
468
+ Annotated[
469
+ Optional[OutputMskRecordDataFormat],
470
+ PlainValidator(validate_open_enum(False)),
471
+ ],
472
+ pydantic.Field(alias="format"),
460
473
  ] = OutputMskRecordDataFormat.JSON
461
474
  r"""Format to use to serialize events before writing to Kafka."""
462
475
 
463
- compression: Optional[OutputMskCompression] = OutputMskCompression.GZIP
476
+ compression: Annotated[
477
+ Optional[OutputMskCompression], PlainValidator(validate_open_enum(False))
478
+ ] = OutputMskCompression.GZIP
464
479
  r"""Codec to use to compress the data before sending to Kafka"""
465
480
 
466
481
  max_record_size_kb: Annotated[
@@ -518,7 +533,10 @@ class OutputMsk(BaseModel):
518
533
  r"""Specifies a time window during which @{product} can reauthenticate if needed. Creates the window measuring backward from the moment when credentials are set to expire."""
519
534
 
520
535
  aws_authentication_method: Annotated[
521
- Optional[OutputMskAuthenticationMethod],
536
+ Annotated[
537
+ Optional[OutputMskAuthenticationMethod],
538
+ PlainValidator(validate_open_enum(False)),
539
+ ],
522
540
  pydantic.Field(alias="awsAuthenticationMethod"),
523
541
  ] = OutputMskAuthenticationMethod.AUTO
524
542
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -531,7 +549,11 @@ class OutputMsk(BaseModel):
531
549
  r"""MSK cluster service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to MSK cluster-compatible endpoint."""
532
550
 
533
551
  signature_version: Annotated[
534
- Optional[OutputMskSignatureVersion], pydantic.Field(alias="signatureVersion")
552
+ Annotated[
553
+ Optional[OutputMskSignatureVersion],
554
+ PlainValidator(validate_open_enum(False)),
555
+ ],
556
+ pydantic.Field(alias="signatureVersion"),
535
557
  ] = OutputMskSignatureVersion.V4
536
558
  r"""Signature version to use for signing MSK cluster requests"""
537
559
 
@@ -568,7 +590,11 @@ class OutputMsk(BaseModel):
568
590
  tls: Optional[OutputMskTLSSettingsClientSide] = None
569
591
 
570
592
  on_backpressure: Annotated[
571
- Optional[OutputMskBackpressureBehavior], pydantic.Field(alias="onBackpressure")
593
+ Annotated[
594
+ Optional[OutputMskBackpressureBehavior],
595
+ PlainValidator(validate_open_enum(False)),
596
+ ],
597
+ pydantic.Field(alias="onBackpressure"),
572
598
  ] = OutputMskBackpressureBehavior.BLOCK
573
599
  r"""How to handle events when all receivers are exerting backpressure"""
574
600
 
@@ -598,18 +624,27 @@ class OutputMsk(BaseModel):
598
624
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
599
625
 
600
626
  pq_compress: Annotated[
601
- Optional[OutputMskPqCompressCompression], pydantic.Field(alias="pqCompress")
627
+ Annotated[
628
+ Optional[OutputMskPqCompressCompression],
629
+ PlainValidator(validate_open_enum(False)),
630
+ ],
631
+ pydantic.Field(alias="pqCompress"),
602
632
  ] = OutputMskPqCompressCompression.NONE
603
633
  r"""Codec to use to compress the persisted data"""
604
634
 
605
635
  pq_on_backpressure: Annotated[
606
- Optional[OutputMskQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
636
+ Annotated[
637
+ Optional[OutputMskQueueFullBehavior],
638
+ PlainValidator(validate_open_enum(False)),
639
+ ],
640
+ pydantic.Field(alias="pqOnBackpressure"),
607
641
  ] = OutputMskQueueFullBehavior.BLOCK
608
642
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
609
643
 
610
- pq_mode: Annotated[Optional[OutputMskMode], pydantic.Field(alias="pqMode")] = (
611
- OutputMskMode.ERROR
612
- )
644
+ pq_mode: Annotated[
645
+ Annotated[Optional[OutputMskMode], PlainValidator(validate_open_enum(False))],
646
+ pydantic.Field(alias="pqMode"),
647
+ ] = OutputMskMode.ERROR
613
648
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
614
649
 
615
650
  pq_controls: Annotated[
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputNewrelicType(str, Enum):
12
15
  NEWRELIC = "newrelic"
13
16
 
14
17
 
15
- class OutputNewrelicRegion(str, Enum):
18
+ class OutputNewrelicRegion(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Which New Relic region endpoint to use."""
17
20
 
18
21
  US = "US"
@@ -20,7 +23,7 @@ class OutputNewrelicRegion(str, Enum):
20
23
  CUSTOM = "Custom"
21
24
 
22
25
 
23
- class FieldName(str, Enum):
26
+ class FieldName(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  SERVICE = "service"
25
28
  HOSTNAME = "hostname"
26
29
  TIMESTAMP = "timestamp"
@@ -34,7 +37,7 @@ class OutputNewrelicMetadatumTypedDict(TypedDict):
34
37
 
35
38
 
36
39
  class OutputNewrelicMetadatum(BaseModel):
37
- name: FieldName
40
+ name: Annotated[FieldName, PlainValidator(validate_open_enum(False))]
38
41
 
39
42
  value: str
40
43
  r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
@@ -51,7 +54,7 @@ class OutputNewrelicExtraHTTPHeader(BaseModel):
51
54
  name: Optional[str] = None
52
55
 
53
56
 
54
- class OutputNewrelicFailedRequestLoggingMode(str, Enum):
57
+ class OutputNewrelicFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
55
58
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
56
59
 
57
60
  PAYLOAD = "payload"
@@ -113,7 +116,7 @@ class OutputNewrelicTimeoutRetrySettings(BaseModel):
113
116
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
114
117
 
115
118
 
116
- class OutputNewrelicBackpressureBehavior(str, Enum):
119
+ class OutputNewrelicBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
117
120
  r"""How to handle events when all receivers are exerting backpressure"""
118
121
 
119
122
  BLOCK = "block"
@@ -121,28 +124,28 @@ class OutputNewrelicBackpressureBehavior(str, Enum):
121
124
  QUEUE = "queue"
122
125
 
123
126
 
124
- class OutputNewrelicAuthenticationMethod(str, Enum):
127
+ class OutputNewrelicAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
125
128
  r"""Enter API key directly, or select a stored secret"""
126
129
 
127
130
  MANUAL = "manual"
128
131
  SECRET = "secret"
129
132
 
130
133
 
131
- class OutputNewrelicCompression(str, Enum):
134
+ class OutputNewrelicCompression(str, Enum, metaclass=utils.OpenEnumMeta):
132
135
  r"""Codec to use to compress the persisted data"""
133
136
 
134
137
  NONE = "none"
135
138
  GZIP = "gzip"
136
139
 
137
140
 
138
- class OutputNewrelicQueueFullBehavior(str, Enum):
141
+ class OutputNewrelicQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
139
142
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
140
143
 
141
144
  BLOCK = "block"
142
145
  DROP = "drop"
143
146
 
144
147
 
145
- class OutputNewrelicMode(str, Enum):
148
+ class OutputNewrelicMode(str, Enum, metaclass=utils.OpenEnumMeta):
146
149
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
147
150
 
148
151
  ERROR = "error"
@@ -257,7 +260,9 @@ class OutputNewrelic(BaseModel):
257
260
  streamtags: Optional[List[str]] = None
258
261
  r"""Tags for filtering and grouping in @{product}"""
259
262
 
260
- region: Optional[OutputNewrelicRegion] = OutputNewrelicRegion.US
263
+ region: Annotated[
264
+ Optional[OutputNewrelicRegion], PlainValidator(validate_open_enum(False))
265
+ ] = OutputNewrelicRegion.US
261
266
  r"""Which New Relic region endpoint to use."""
262
267
 
263
268
  log_type: Annotated[Optional[str], pydantic.Field(alias="logType")] = ""
@@ -313,7 +318,10 @@ class OutputNewrelic(BaseModel):
313
318
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
314
319
 
315
320
  failed_request_logging_mode: Annotated[
316
- Optional[OutputNewrelicFailedRequestLoggingMode],
321
+ Annotated[
322
+ Optional[OutputNewrelicFailedRequestLoggingMode],
323
+ PlainValidator(validate_open_enum(False)),
324
+ ],
317
325
  pydantic.Field(alias="failedRequestLoggingMode"),
318
326
  ] = OutputNewrelicFailedRequestLoggingMode.NONE
319
327
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -340,13 +348,20 @@ class OutputNewrelic(BaseModel):
340
348
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
341
349
 
342
350
  on_backpressure: Annotated[
343
- Optional[OutputNewrelicBackpressureBehavior],
351
+ Annotated[
352
+ Optional[OutputNewrelicBackpressureBehavior],
353
+ PlainValidator(validate_open_enum(False)),
354
+ ],
344
355
  pydantic.Field(alias="onBackpressure"),
345
356
  ] = OutputNewrelicBackpressureBehavior.BLOCK
346
357
  r"""How to handle events when all receivers are exerting backpressure"""
347
358
 
348
359
  auth_type: Annotated[
349
- Optional[OutputNewrelicAuthenticationMethod], pydantic.Field(alias="authType")
360
+ Annotated[
361
+ Optional[OutputNewrelicAuthenticationMethod],
362
+ PlainValidator(validate_open_enum(False)),
363
+ ],
364
+ pydantic.Field(alias="authType"),
350
365
  ] = OutputNewrelicAuthenticationMethod.MANUAL
351
366
  r"""Enter API key directly, or select a stored secret"""
352
367
 
@@ -373,19 +388,29 @@ class OutputNewrelic(BaseModel):
373
388
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
374
389
 
375
390
  pq_compress: Annotated[
376
- Optional[OutputNewrelicCompression], pydantic.Field(alias="pqCompress")
391
+ Annotated[
392
+ Optional[OutputNewrelicCompression],
393
+ PlainValidator(validate_open_enum(False)),
394
+ ],
395
+ pydantic.Field(alias="pqCompress"),
377
396
  ] = OutputNewrelicCompression.NONE
378
397
  r"""Codec to use to compress the persisted data"""
379
398
 
380
399
  pq_on_backpressure: Annotated[
381
- Optional[OutputNewrelicQueueFullBehavior],
400
+ Annotated[
401
+ Optional[OutputNewrelicQueueFullBehavior],
402
+ PlainValidator(validate_open_enum(False)),
403
+ ],
382
404
  pydantic.Field(alias="pqOnBackpressure"),
383
405
  ] = OutputNewrelicQueueFullBehavior.BLOCK
384
406
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
385
407
 
386
- pq_mode: Annotated[Optional[OutputNewrelicMode], pydantic.Field(alias="pqMode")] = (
387
- OutputNewrelicMode.ERROR
388
- )
408
+ pq_mode: Annotated[
409
+ Annotated[
410
+ Optional[OutputNewrelicMode], PlainValidator(validate_open_enum(False))
411
+ ],
412
+ pydantic.Field(alias="pqMode"),
413
+ ] = OutputNewrelicMode.ERROR
389
414
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
390
415
 
391
416
  pq_controls: Annotated[
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputNewrelicEventsType(str, Enum):
12
15
  NEWRELIC_EVENTS = "newrelic_events"
13
16
 
14
17
 
15
- class OutputNewrelicEventsRegion(str, Enum):
18
+ class OutputNewrelicEventsRegion(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Which New Relic region endpoint to use."""
17
20
 
18
21
  US = "US"
@@ -31,7 +34,9 @@ class OutputNewrelicEventsExtraHTTPHeader(BaseModel):
31
34
  name: Optional[str] = None
32
35
 
33
36
 
34
- class OutputNewrelicEventsFailedRequestLoggingMode(str, Enum):
37
+ class OutputNewrelicEventsFailedRequestLoggingMode(
38
+ str, Enum, metaclass=utils.OpenEnumMeta
39
+ ):
35
40
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
36
41
 
37
42
  PAYLOAD = "payload"
@@ -93,7 +98,7 @@ class OutputNewrelicEventsTimeoutRetrySettings(BaseModel):
93
98
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
94
99
 
95
100
 
96
- class OutputNewrelicEventsBackpressureBehavior(str, Enum):
101
+ class OutputNewrelicEventsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
97
102
  r"""How to handle events when all receivers are exerting backpressure"""
98
103
 
99
104
  BLOCK = "block"
@@ -101,28 +106,28 @@ class OutputNewrelicEventsBackpressureBehavior(str, Enum):
101
106
  QUEUE = "queue"
102
107
 
103
108
 
104
- class OutputNewrelicEventsAuthenticationMethod(str, Enum):
109
+ class OutputNewrelicEventsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
105
110
  r"""Enter API key directly, or select a stored secret"""
106
111
 
107
112
  MANUAL = "manual"
108
113
  SECRET = "secret"
109
114
 
110
115
 
111
- class OutputNewrelicEventsCompression(str, Enum):
116
+ class OutputNewrelicEventsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
112
117
  r"""Codec to use to compress the persisted data"""
113
118
 
114
119
  NONE = "none"
115
120
  GZIP = "gzip"
116
121
 
117
122
 
118
- class OutputNewrelicEventsQueueFullBehavior(str, Enum):
123
+ class OutputNewrelicEventsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
119
124
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
120
125
 
121
126
  BLOCK = "block"
122
127
  DROP = "drop"
123
128
 
124
129
 
125
- class OutputNewrelicEventsMode(str, Enum):
130
+ class OutputNewrelicEventsMode(str, Enum, metaclass=utils.OpenEnumMeta):
126
131
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
127
132
 
128
133
  ERROR = "error"
@@ -243,7 +248,9 @@ class OutputNewrelicEvents(BaseModel):
243
248
  streamtags: Optional[List[str]] = None
244
249
  r"""Tags for filtering and grouping in @{product}"""
245
250
 
246
- region: Optional[OutputNewrelicEventsRegion] = OutputNewrelicEventsRegion.US
251
+ region: Annotated[
252
+ Optional[OutputNewrelicEventsRegion], PlainValidator(validate_open_enum(False))
253
+ ] = OutputNewrelicEventsRegion.US
247
254
  r"""Which New Relic region endpoint to use."""
248
255
 
249
256
  concurrency: Optional[float] = 5
@@ -290,7 +297,10 @@ class OutputNewrelicEvents(BaseModel):
290
297
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
291
298
 
292
299
  failed_request_logging_mode: Annotated[
293
- Optional[OutputNewrelicEventsFailedRequestLoggingMode],
300
+ Annotated[
301
+ Optional[OutputNewrelicEventsFailedRequestLoggingMode],
302
+ PlainValidator(validate_open_enum(False)),
303
+ ],
294
304
  pydantic.Field(alias="failedRequestLoggingMode"),
295
305
  ] = OutputNewrelicEventsFailedRequestLoggingMode.NONE
296
306
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -317,13 +327,19 @@ class OutputNewrelicEvents(BaseModel):
317
327
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
318
328
 
319
329
  on_backpressure: Annotated[
320
- Optional[OutputNewrelicEventsBackpressureBehavior],
330
+ Annotated[
331
+ Optional[OutputNewrelicEventsBackpressureBehavior],
332
+ PlainValidator(validate_open_enum(False)),
333
+ ],
321
334
  pydantic.Field(alias="onBackpressure"),
322
335
  ] = OutputNewrelicEventsBackpressureBehavior.BLOCK
323
336
  r"""How to handle events when all receivers are exerting backpressure"""
324
337
 
325
338
  auth_type: Annotated[
326
- Optional[OutputNewrelicEventsAuthenticationMethod],
339
+ Annotated[
340
+ Optional[OutputNewrelicEventsAuthenticationMethod],
341
+ PlainValidator(validate_open_enum(False)),
342
+ ],
327
343
  pydantic.Field(alias="authType"),
328
344
  ] = OutputNewrelicEventsAuthenticationMethod.MANUAL
329
345
  r"""Enter API key directly, or select a stored secret"""
@@ -346,18 +362,29 @@ class OutputNewrelicEvents(BaseModel):
346
362
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
347
363
 
348
364
  pq_compress: Annotated[
349
- Optional[OutputNewrelicEventsCompression], pydantic.Field(alias="pqCompress")
365
+ Annotated[
366
+ Optional[OutputNewrelicEventsCompression],
367
+ PlainValidator(validate_open_enum(False)),
368
+ ],
369
+ pydantic.Field(alias="pqCompress"),
350
370
  ] = OutputNewrelicEventsCompression.NONE
351
371
  r"""Codec to use to compress the persisted data"""
352
372
 
353
373
  pq_on_backpressure: Annotated[
354
- Optional[OutputNewrelicEventsQueueFullBehavior],
374
+ Annotated[
375
+ Optional[OutputNewrelicEventsQueueFullBehavior],
376
+ PlainValidator(validate_open_enum(False)),
377
+ ],
355
378
  pydantic.Field(alias="pqOnBackpressure"),
356
379
  ] = OutputNewrelicEventsQueueFullBehavior.BLOCK
357
380
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
358
381
 
359
382
  pq_mode: Annotated[
360
- Optional[OutputNewrelicEventsMode], pydantic.Field(alias="pqMode")
383
+ Annotated[
384
+ Optional[OutputNewrelicEventsMode],
385
+ PlainValidator(validate_open_enum(False)),
386
+ ],
387
+ pydantic.Field(alias="pqMode"),
361
388
  ] = OutputNewrelicEventsMode.ERROR
362
389
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
363
390