cribl-control-plane 0.0.49__py3-none-any.whl → 0.1.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (173) hide show
  1. cribl_control_plane/_version.py +4 -6
  2. cribl_control_plane/errors/healthstatus_error.py +8 -2
  3. cribl_control_plane/health.py +6 -2
  4. cribl_control_plane/models/__init__.py +68 -30
  5. cribl_control_plane/models/cacheconnection.py +10 -2
  6. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  7. cribl_control_plane/models/cloudprovider.py +2 -1
  8. cribl_control_plane/models/configgroup.py +7 -2
  9. cribl_control_plane/models/configgroupcloud.py +6 -2
  10. cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
  11. cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
  12. cribl_control_plane/models/createversionpushop.py +5 -5
  13. cribl_control_plane/models/cribllakedataset.py +8 -2
  14. cribl_control_plane/models/datasetmetadata.py +8 -2
  15. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
  16. cribl_control_plane/models/error.py +16 -0
  17. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
  18. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
  19. cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
  20. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
  21. cribl_control_plane/models/gethealthinfoop.py +17 -0
  22. cribl_control_plane/models/getsummaryop.py +7 -2
  23. cribl_control_plane/models/getversionshowop.py +6 -5
  24. cribl_control_plane/models/gitshowresult.py +19 -0
  25. cribl_control_plane/models/hbcriblinfo.py +24 -3
  26. cribl_control_plane/models/healthstatus.py +7 -4
  27. cribl_control_plane/models/heartbeatmetadata.py +3 -0
  28. cribl_control_plane/models/inputappscope.py +34 -14
  29. cribl_control_plane/models/inputazureblob.py +17 -6
  30. cribl_control_plane/models/inputcollection.py +11 -4
  31. cribl_control_plane/models/inputconfluentcloud.py +41 -32
  32. cribl_control_plane/models/inputcribl.py +11 -4
  33. cribl_control_plane/models/inputcriblhttp.py +23 -8
  34. cribl_control_plane/models/inputcribllakehttp.py +22 -10
  35. cribl_control_plane/models/inputcriblmetrics.py +12 -4
  36. cribl_control_plane/models/inputcribltcp.py +23 -8
  37. cribl_control_plane/models/inputcrowdstrike.py +26 -10
  38. cribl_control_plane/models/inputdatadogagent.py +24 -8
  39. cribl_control_plane/models/inputdatagen.py +11 -4
  40. cribl_control_plane/models/inputedgeprometheus.py +58 -24
  41. cribl_control_plane/models/inputelastic.py +40 -14
  42. cribl_control_plane/models/inputeventhub.py +15 -6
  43. cribl_control_plane/models/inputexec.py +14 -6
  44. cribl_control_plane/models/inputfile.py +15 -6
  45. cribl_control_plane/models/inputfirehose.py +23 -8
  46. cribl_control_plane/models/inputgooglepubsub.py +19 -6
  47. cribl_control_plane/models/inputgrafana.py +67 -24
  48. cribl_control_plane/models/inputhttp.py +23 -8
  49. cribl_control_plane/models/inputhttpraw.py +23 -8
  50. cribl_control_plane/models/inputjournalfiles.py +12 -4
  51. cribl_control_plane/models/inputkafka.py +41 -28
  52. cribl_control_plane/models/inputkinesis.py +38 -14
  53. cribl_control_plane/models/inputkubeevents.py +11 -4
  54. cribl_control_plane/models/inputkubelogs.py +16 -8
  55. cribl_control_plane/models/inputkubemetrics.py +16 -8
  56. cribl_control_plane/models/inputloki.py +29 -10
  57. cribl_control_plane/models/inputmetrics.py +23 -8
  58. cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
  59. cribl_control_plane/models/inputmsk.py +48 -30
  60. cribl_control_plane/models/inputnetflow.py +11 -4
  61. cribl_control_plane/models/inputoffice365mgmt.py +33 -14
  62. cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
  63. cribl_control_plane/models/inputoffice365service.py +35 -16
  64. cribl_control_plane/models/inputopentelemetry.py +38 -16
  65. cribl_control_plane/models/inputprometheus.py +50 -18
  66. cribl_control_plane/models/inputprometheusrw.py +30 -10
  67. cribl_control_plane/models/inputrawudp.py +11 -4
  68. cribl_control_plane/models/inputs3.py +21 -8
  69. cribl_control_plane/models/inputs3inventory.py +26 -10
  70. cribl_control_plane/models/inputsecuritylake.py +27 -10
  71. cribl_control_plane/models/inputsnmp.py +16 -6
  72. cribl_control_plane/models/inputsplunk.py +33 -12
  73. cribl_control_plane/models/inputsplunkhec.py +29 -10
  74. cribl_control_plane/models/inputsplunksearch.py +33 -14
  75. cribl_control_plane/models/inputsqs.py +27 -10
  76. cribl_control_plane/models/inputsyslog.py +43 -16
  77. cribl_control_plane/models/inputsystemmetrics.py +48 -24
  78. cribl_control_plane/models/inputsystemstate.py +16 -8
  79. cribl_control_plane/models/inputtcp.py +29 -10
  80. cribl_control_plane/models/inputtcpjson.py +29 -10
  81. cribl_control_plane/models/inputwef.py +37 -14
  82. cribl_control_plane/models/inputwindowsmetrics.py +44 -24
  83. cribl_control_plane/models/inputwineventlogs.py +20 -10
  84. cribl_control_plane/models/inputwiz.py +21 -8
  85. cribl_control_plane/models/inputwizwebhook.py +23 -8
  86. cribl_control_plane/models/inputzscalerhec.py +29 -10
  87. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  88. cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
  89. cribl_control_plane/models/masterworkerentry.py +7 -2
  90. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  91. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  92. cribl_control_plane/models/nodeprovidedinfo.py +3 -0
  93. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  94. cribl_control_plane/models/nodeupgradestate.py +2 -1
  95. cribl_control_plane/models/nodeupgradestatus.py +13 -5
  96. cribl_control_plane/models/output.py +3 -0
  97. cribl_control_plane/models/outputazureblob.py +48 -18
  98. cribl_control_plane/models/outputazuredataexplorer.py +73 -28
  99. cribl_control_plane/models/outputazureeventhub.py +40 -18
  100. cribl_control_plane/models/outputazurelogs.py +35 -12
  101. cribl_control_plane/models/outputclickhouse.py +55 -20
  102. cribl_control_plane/models/outputcloudwatch.py +29 -10
  103. cribl_control_plane/models/outputconfluentcloud.py +71 -44
  104. cribl_control_plane/models/outputcriblhttp.py +44 -16
  105. cribl_control_plane/models/outputcribllake.py +46 -16
  106. cribl_control_plane/models/outputcribltcp.py +45 -18
  107. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
  108. cribl_control_plane/models/outputdatabricks.py +439 -0
  109. cribl_control_plane/models/outputdatadog.py +48 -20
  110. cribl_control_plane/models/outputdataset.py +46 -18
  111. cribl_control_plane/models/outputdiskspool.py +7 -2
  112. cribl_control_plane/models/outputdls3.py +68 -24
  113. cribl_control_plane/models/outputdynatracehttp.py +53 -20
  114. cribl_control_plane/models/outputdynatraceotlp.py +55 -22
  115. cribl_control_plane/models/outputelastic.py +43 -18
  116. cribl_control_plane/models/outputelasticcloud.py +36 -12
  117. cribl_control_plane/models/outputexabeam.py +29 -10
  118. cribl_control_plane/models/outputfilesystem.py +39 -14
  119. cribl_control_plane/models/outputgooglechronicle.py +50 -16
  120. cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
  121. cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
  122. cribl_control_plane/models/outputgooglepubsub.py +31 -10
  123. cribl_control_plane/models/outputgrafanacloud.py +97 -32
  124. cribl_control_plane/models/outputgraphite.py +31 -14
  125. cribl_control_plane/models/outputhoneycomb.py +35 -12
  126. cribl_control_plane/models/outputhumiohec.py +43 -16
  127. cribl_control_plane/models/outputinfluxdb.py +42 -16
  128. cribl_control_plane/models/outputkafka.py +69 -40
  129. cribl_control_plane/models/outputkinesis.py +40 -16
  130. cribl_control_plane/models/outputloki.py +41 -16
  131. cribl_control_plane/models/outputminio.py +65 -24
  132. cribl_control_plane/models/outputmsk.py +77 -42
  133. cribl_control_plane/models/outputnewrelic.py +43 -18
  134. cribl_control_plane/models/outputnewrelicevents.py +41 -14
  135. cribl_control_plane/models/outputopentelemetry.py +67 -26
  136. cribl_control_plane/models/outputprometheus.py +35 -12
  137. cribl_control_plane/models/outputring.py +19 -8
  138. cribl_control_plane/models/outputs3.py +68 -26
  139. cribl_control_plane/models/outputsecuritylake.py +52 -18
  140. cribl_control_plane/models/outputsentinel.py +45 -18
  141. cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
  142. cribl_control_plane/models/outputservicenow.py +60 -24
  143. cribl_control_plane/models/outputsignalfx.py +37 -14
  144. cribl_control_plane/models/outputsns.py +36 -14
  145. cribl_control_plane/models/outputsplunk.py +60 -24
  146. cribl_control_plane/models/outputsplunkhec.py +35 -12
  147. cribl_control_plane/models/outputsplunklb.py +77 -30
  148. cribl_control_plane/models/outputsqs.py +41 -16
  149. cribl_control_plane/models/outputstatsd.py +30 -14
  150. cribl_control_plane/models/outputstatsdext.py +29 -12
  151. cribl_control_plane/models/outputsumologic.py +35 -12
  152. cribl_control_plane/models/outputsyslog.py +58 -24
  153. cribl_control_plane/models/outputtcpjson.py +52 -20
  154. cribl_control_plane/models/outputwavefront.py +35 -12
  155. cribl_control_plane/models/outputwebhook.py +58 -22
  156. cribl_control_plane/models/outputxsiam.py +35 -14
  157. cribl_control_plane/models/productscore.py +2 -1
  158. cribl_control_plane/models/rbacresource.py +2 -1
  159. cribl_control_plane/models/resourcepolicy.py +4 -2
  160. cribl_control_plane/models/routeconf.py +3 -4
  161. cribl_control_plane/models/runnablejobcollection.py +30 -13
  162. cribl_control_plane/models/runnablejobexecutor.py +13 -4
  163. cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
  164. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
  165. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
  166. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
  167. cribl_control_plane/models/workertypes.py +2 -1
  168. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0b1.dist-info}/METADATA +1 -1
  169. cribl_control_plane-0.1.0b1.dist-info/RECORD +327 -0
  170. cribl_control_plane/models/appmode.py +0 -13
  171. cribl_control_plane/models/routecloneconf.py +0 -13
  172. cribl_control_plane-0.0.49.dist-info/RECORD +0 -325
  173. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0b1.dist-info}/WHEEL +0 -0
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -23,7 +26,7 @@ class OutputHumioHecExtraHTTPHeader(BaseModel):
23
26
  name: Optional[str] = None
24
27
 
25
28
 
26
- class OutputHumioHecFailedRequestLoggingMode(str, Enum):
29
+ class OutputHumioHecFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
27
30
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
28
31
 
29
32
  PAYLOAD = "payload"
@@ -31,14 +34,14 @@ class OutputHumioHecFailedRequestLoggingMode(str, Enum):
31
34
  NONE = "none"
32
35
 
33
36
 
34
- class OutputHumioHecRequestFormat(str, Enum):
37
+ class OutputHumioHecRequestFormat(str, Enum, metaclass=utils.OpenEnumMeta):
35
38
  r"""When set to JSON, the event is automatically formatted with required fields before sending. When set to Raw, only the event's `_raw` value is sent."""
36
39
 
37
40
  JSON = "JSON"
38
41
  RAW = "raw"
39
42
 
40
43
 
41
- class OutputHumioHecAuthenticationMethod(str, Enum):
44
+ class OutputHumioHecAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
42
45
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
43
46
 
44
47
  MANUAL = "manual"
@@ -99,7 +102,7 @@ class OutputHumioHecTimeoutRetrySettings(BaseModel):
99
102
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
100
103
 
101
104
 
102
- class OutputHumioHecBackpressureBehavior(str, Enum):
105
+ class OutputHumioHecBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
103
106
  r"""How to handle events when all receivers are exerting backpressure"""
104
107
 
105
108
  BLOCK = "block"
@@ -107,21 +110,21 @@ class OutputHumioHecBackpressureBehavior(str, Enum):
107
110
  QUEUE = "queue"
108
111
 
109
112
 
110
- class OutputHumioHecCompression(str, Enum):
113
+ class OutputHumioHecCompression(str, Enum, metaclass=utils.OpenEnumMeta):
111
114
  r"""Codec to use to compress the persisted data"""
112
115
 
113
116
  NONE = "none"
114
117
  GZIP = "gzip"
115
118
 
116
119
 
117
- class OutputHumioHecQueueFullBehavior(str, Enum):
120
+ class OutputHumioHecQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
118
121
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
119
122
 
120
123
  BLOCK = "block"
121
124
  DROP = "drop"
122
125
 
123
126
 
124
- class OutputHumioHecMode(str, Enum):
127
+ class OutputHumioHecMode(str, Enum, metaclass=utils.OpenEnumMeta):
125
128
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
126
129
 
127
130
  ERROR = "error"
@@ -276,7 +279,10 @@ class OutputHumioHec(BaseModel):
276
279
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
277
280
 
278
281
  failed_request_logging_mode: Annotated[
279
- Optional[OutputHumioHecFailedRequestLoggingMode],
282
+ Annotated[
283
+ Optional[OutputHumioHecFailedRequestLoggingMode],
284
+ PlainValidator(validate_open_enum(False)),
285
+ ],
280
286
  pydantic.Field(alias="failedRequestLoggingMode"),
281
287
  ] = OutputHumioHecFailedRequestLoggingMode.NONE
282
288
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -287,12 +293,20 @@ class OutputHumioHec(BaseModel):
287
293
  r"""List of headers that are safe to log in plain text"""
288
294
 
289
295
  format_: Annotated[
290
- Optional[OutputHumioHecRequestFormat], pydantic.Field(alias="format")
296
+ Annotated[
297
+ Optional[OutputHumioHecRequestFormat],
298
+ PlainValidator(validate_open_enum(False)),
299
+ ],
300
+ pydantic.Field(alias="format"),
291
301
  ] = OutputHumioHecRequestFormat.JSON
292
302
  r"""When set to JSON, the event is automatically formatted with required fields before sending. When set to Raw, only the event's `_raw` value is sent."""
293
303
 
294
304
  auth_type: Annotated[
295
- Optional[OutputHumioHecAuthenticationMethod], pydantic.Field(alias="authType")
305
+ Annotated[
306
+ Optional[OutputHumioHecAuthenticationMethod],
307
+ PlainValidator(validate_open_enum(False)),
308
+ ],
309
+ pydantic.Field(alias="authType"),
296
310
  ] = OutputHumioHecAuthenticationMethod.MANUAL
297
311
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
298
312
 
@@ -313,7 +327,10 @@ class OutputHumioHec(BaseModel):
313
327
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
314
328
 
315
329
  on_backpressure: Annotated[
316
- Optional[OutputHumioHecBackpressureBehavior],
330
+ Annotated[
331
+ Optional[OutputHumioHecBackpressureBehavior],
332
+ PlainValidator(validate_open_enum(False)),
333
+ ],
317
334
  pydantic.Field(alias="onBackpressure"),
318
335
  ] = OutputHumioHecBackpressureBehavior.BLOCK
319
336
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -340,19 +357,29 @@ class OutputHumioHec(BaseModel):
340
357
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
341
358
 
342
359
  pq_compress: Annotated[
343
- Optional[OutputHumioHecCompression], pydantic.Field(alias="pqCompress")
360
+ Annotated[
361
+ Optional[OutputHumioHecCompression],
362
+ PlainValidator(validate_open_enum(False)),
363
+ ],
364
+ pydantic.Field(alias="pqCompress"),
344
365
  ] = OutputHumioHecCompression.NONE
345
366
  r"""Codec to use to compress the persisted data"""
346
367
 
347
368
  pq_on_backpressure: Annotated[
348
- Optional[OutputHumioHecQueueFullBehavior],
369
+ Annotated[
370
+ Optional[OutputHumioHecQueueFullBehavior],
371
+ PlainValidator(validate_open_enum(False)),
372
+ ],
349
373
  pydantic.Field(alias="pqOnBackpressure"),
350
374
  ] = OutputHumioHecQueueFullBehavior.BLOCK
351
375
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
352
376
 
353
- pq_mode: Annotated[Optional[OutputHumioHecMode], pydantic.Field(alias="pqMode")] = (
354
- OutputHumioHecMode.ERROR
355
- )
377
+ pq_mode: Annotated[
378
+ Annotated[
379
+ Optional[OutputHumioHecMode], PlainValidator(validate_open_enum(False))
380
+ ],
381
+ pydantic.Field(alias="pqMode"),
382
+ ] = OutputHumioHecMode.ERROR
356
383
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
357
384
 
358
385
  pq_controls: Annotated[
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputInfluxdbType(str, Enum):
12
15
  INFLUXDB = "influxdb"
13
16
 
14
17
 
15
- class TimestampPrecision(str, Enum):
18
+ class TimestampPrecision(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Sets the precision for the supplied Unix time values. Defaults to milliseconds."""
17
20
 
18
21
  NS = "ns"
@@ -34,7 +37,7 @@ class OutputInfluxdbExtraHTTPHeader(BaseModel):
34
37
  name: Optional[str] = None
35
38
 
36
39
 
37
- class OutputInfluxdbFailedRequestLoggingMode(str, Enum):
40
+ class OutputInfluxdbFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
38
41
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
39
42
 
40
43
  PAYLOAD = "payload"
@@ -96,7 +99,7 @@ class OutputInfluxdbTimeoutRetrySettings(BaseModel):
96
99
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
97
100
 
98
101
 
99
- class OutputInfluxdbBackpressureBehavior(str, Enum):
102
+ class OutputInfluxdbBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
100
103
  r"""How to handle events when all receivers are exerting backpressure"""
101
104
 
102
105
  BLOCK = "block"
@@ -104,7 +107,7 @@ class OutputInfluxdbBackpressureBehavior(str, Enum):
104
107
  QUEUE = "queue"
105
108
 
106
109
 
107
- class OutputInfluxdbAuthenticationType(str, Enum):
110
+ class OutputInfluxdbAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
108
111
  r"""InfluxDB authentication type"""
109
112
 
110
113
  NONE = "none"
@@ -115,21 +118,21 @@ class OutputInfluxdbAuthenticationType(str, Enum):
115
118
  OAUTH = "oauth"
116
119
 
117
120
 
118
- class OutputInfluxdbCompression(str, Enum):
121
+ class OutputInfluxdbCompression(str, Enum, metaclass=utils.OpenEnumMeta):
119
122
  r"""Codec to use to compress the persisted data"""
120
123
 
121
124
  NONE = "none"
122
125
  GZIP = "gzip"
123
126
 
124
127
 
125
- class OutputInfluxdbQueueFullBehavior(str, Enum):
128
+ class OutputInfluxdbQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
126
129
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
127
130
 
128
131
  BLOCK = "block"
129
132
  DROP = "drop"
130
133
 
131
134
 
132
- class OutputInfluxdbMode(str, Enum):
135
+ class OutputInfluxdbMode(str, Enum, metaclass=utils.OpenEnumMeta):
133
136
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
134
137
 
135
138
  ERROR = "error"
@@ -306,7 +309,10 @@ class OutputInfluxdb(BaseModel):
306
309
  r"""The v2 API can be enabled with InfluxDB versions 1.8 and later."""
307
310
 
308
311
  timestamp_precision: Annotated[
309
- Optional[TimestampPrecision], pydantic.Field(alias="timestampPrecision")
312
+ Annotated[
313
+ Optional[TimestampPrecision], PlainValidator(validate_open_enum(False))
314
+ ],
315
+ pydantic.Field(alias="timestampPrecision"),
310
316
  ] = TimestampPrecision.MS
311
317
  r"""Sets the precision for the supplied Unix time values. Defaults to milliseconds."""
312
318
 
@@ -364,7 +370,10 @@ class OutputInfluxdb(BaseModel):
364
370
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
365
371
 
366
372
  failed_request_logging_mode: Annotated[
367
- Optional[OutputInfluxdbFailedRequestLoggingMode],
373
+ Annotated[
374
+ Optional[OutputInfluxdbFailedRequestLoggingMode],
375
+ PlainValidator(validate_open_enum(False)),
376
+ ],
368
377
  pydantic.Field(alias="failedRequestLoggingMode"),
369
378
  ] = OutputInfluxdbFailedRequestLoggingMode.NONE
370
379
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -391,13 +400,20 @@ class OutputInfluxdb(BaseModel):
391
400
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
392
401
 
393
402
  on_backpressure: Annotated[
394
- Optional[OutputInfluxdbBackpressureBehavior],
403
+ Annotated[
404
+ Optional[OutputInfluxdbBackpressureBehavior],
405
+ PlainValidator(validate_open_enum(False)),
406
+ ],
395
407
  pydantic.Field(alias="onBackpressure"),
396
408
  ] = OutputInfluxdbBackpressureBehavior.BLOCK
397
409
  r"""How to handle events when all receivers are exerting backpressure"""
398
410
 
399
411
  auth_type: Annotated[
400
- Optional[OutputInfluxdbAuthenticationType], pydantic.Field(alias="authType")
412
+ Annotated[
413
+ Optional[OutputInfluxdbAuthenticationType],
414
+ PlainValidator(validate_open_enum(False)),
415
+ ],
416
+ pydantic.Field(alias="authType"),
401
417
  ] = OutputInfluxdbAuthenticationType.NONE
402
418
  r"""InfluxDB authentication type"""
403
419
 
@@ -426,19 +442,29 @@ class OutputInfluxdb(BaseModel):
426
442
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
427
443
 
428
444
  pq_compress: Annotated[
429
- Optional[OutputInfluxdbCompression], pydantic.Field(alias="pqCompress")
445
+ Annotated[
446
+ Optional[OutputInfluxdbCompression],
447
+ PlainValidator(validate_open_enum(False)),
448
+ ],
449
+ pydantic.Field(alias="pqCompress"),
430
450
  ] = OutputInfluxdbCompression.NONE
431
451
  r"""Codec to use to compress the persisted data"""
432
452
 
433
453
  pq_on_backpressure: Annotated[
434
- Optional[OutputInfluxdbQueueFullBehavior],
454
+ Annotated[
455
+ Optional[OutputInfluxdbQueueFullBehavior],
456
+ PlainValidator(validate_open_enum(False)),
457
+ ],
435
458
  pydantic.Field(alias="pqOnBackpressure"),
436
459
  ] = OutputInfluxdbQueueFullBehavior.BLOCK
437
460
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
438
461
 
439
- pq_mode: Annotated[Optional[OutputInfluxdbMode], pydantic.Field(alias="pqMode")] = (
440
- OutputInfluxdbMode.ERROR
441
- )
462
+ pq_mode: Annotated[
463
+ Annotated[
464
+ Optional[OutputInfluxdbMode], PlainValidator(validate_open_enum(False))
465
+ ],
466
+ pydantic.Field(alias="pqMode"),
467
+ ] = OutputInfluxdbMode.ERROR
442
468
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
443
469
 
444
470
  pq_controls: Annotated[
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputKafkaType(str, Enum):
12
15
  KAFKA = "kafka"
13
16
 
14
17
 
15
- class OutputKafkaAcknowledgments(int, Enum):
18
+ class OutputKafkaAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Control the number of required acknowledgments."""
17
20
 
18
21
  ONE = 1
@@ -20,7 +23,7 @@ class OutputKafkaAcknowledgments(int, Enum):
20
23
  MINUS_1 = -1
21
24
 
22
25
 
23
- class OutputKafkaRecordDataFormat(str, Enum):
26
+ class OutputKafkaRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""Format to use to serialize events before writing to Kafka."""
25
28
 
26
29
  JSON = "json"
@@ -28,7 +31,7 @@ class OutputKafkaRecordDataFormat(str, Enum):
28
31
  PROTOBUF = "protobuf"
29
32
 
30
33
 
31
- class OutputKafkaCompression(str, Enum):
34
+ class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
32
35
  r"""Codec to use to compress the data before sending to Kafka"""
33
36
 
34
37
  NONE = "none"
@@ -37,13 +40,6 @@ class OutputKafkaCompression(str, Enum):
37
40
  LZ4 = "lz4"
38
41
 
39
42
 
40
- class OutputKafkaSchemaType(str, Enum):
41
- r"""The schema format used to encode and decode event data"""
42
-
43
- AVRO = "avro"
44
- JSON = "json"
45
-
46
-
47
43
  class OutputKafkaAuthTypedDict(TypedDict):
48
44
  r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
49
45
 
@@ -63,14 +59,18 @@ class OutputKafkaAuth(BaseModel):
63
59
  r"""Select or create a secret that references your credentials"""
64
60
 
65
61
 
66
- class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
62
+ class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(
63
+ str, Enum, metaclass=utils.OpenEnumMeta
64
+ ):
67
65
  TL_SV1 = "TLSv1"
68
66
  TL_SV1_1 = "TLSv1.1"
69
67
  TL_SV1_2 = "TLSv1.2"
70
68
  TL_SV1_3 = "TLSv1.3"
71
69
 
72
70
 
73
- class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
71
+ class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(
72
+ str, Enum, metaclass=utils.OpenEnumMeta
73
+ ):
74
74
  TL_SV1 = "TLSv1"
75
75
  TL_SV1_1 = "TLSv1.1"
76
76
  TL_SV1_2 = "TLSv1.2"
@@ -130,12 +130,18 @@ class OutputKafkaKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
130
130
  r"""Passphrase to use to decrypt private key"""
131
131
 
132
132
  min_version: Annotated[
133
- Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
133
+ Annotated[
134
+ Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
135
+ PlainValidator(validate_open_enum(False)),
136
+ ],
134
137
  pydantic.Field(alias="minVersion"),
135
138
  ] = None
136
139
 
137
140
  max_version: Annotated[
138
- Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
141
+ Annotated[
142
+ Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
143
+ PlainValidator(validate_open_enum(False)),
144
+ ],
139
145
  pydantic.Field(alias="maxVersion"),
140
146
  ] = None
141
147
 
@@ -144,8 +150,6 @@ class OutputKafkaKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
144
150
  disabled: NotRequired[bool]
145
151
  schema_registry_url: NotRequired[str]
146
152
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
147
- schema_type: NotRequired[OutputKafkaSchemaType]
148
- r"""The schema format used to encode and decode event data"""
149
153
  connection_timeout: NotRequired[float]
150
154
  r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
151
155
  request_timeout: NotRequired[float]
@@ -169,11 +173,6 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
169
173
  ] = "http://localhost:8081"
170
174
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
171
175
 
172
- schema_type: Annotated[
173
- Optional[OutputKafkaSchemaType], pydantic.Field(alias="schemaType")
174
- ] = OutputKafkaSchemaType.AVRO
175
- r"""The schema format used to encode and decode event data"""
176
-
177
176
  connection_timeout: Annotated[
178
177
  Optional[float], pydantic.Field(alias="connectionTimeout")
179
178
  ] = 30000
@@ -203,7 +202,7 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
203
202
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
204
203
 
205
204
 
206
- class OutputKafkaSASLMechanism(str, Enum):
205
+ class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
207
206
  PLAIN = "plain"
208
207
  SCRAM_SHA_256 = "scram-sha-256"
209
208
  SCRAM_SHA_512 = "scram-sha-512"
@@ -224,7 +223,9 @@ class OutputKafkaAuthentication(BaseModel):
224
223
 
225
224
  disabled: Optional[bool] = True
226
225
 
227
- mechanism: Optional[OutputKafkaSASLMechanism] = OutputKafkaSASLMechanism.PLAIN
226
+ mechanism: Annotated[
227
+ Optional[OutputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
228
+ ] = OutputKafkaSASLMechanism.PLAIN
228
229
 
229
230
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
230
231
  False
@@ -232,14 +233,14 @@ class OutputKafkaAuthentication(BaseModel):
232
233
  r"""Enable OAuth authentication"""
233
234
 
234
235
 
235
- class OutputKafkaMinimumTLSVersion(str, Enum):
236
+ class OutputKafkaMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
236
237
  TL_SV1 = "TLSv1"
237
238
  TL_SV1_1 = "TLSv1.1"
238
239
  TL_SV1_2 = "TLSv1.2"
239
240
  TL_SV1_3 = "TLSv1.3"
240
241
 
241
242
 
242
- class OutputKafkaMaximumTLSVersion(str, Enum):
243
+ class OutputKafkaMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
243
244
  TL_SV1 = "TLSv1"
244
245
  TL_SV1_1 = "TLSv1.1"
245
246
  TL_SV1_2 = "TLSv1.2"
@@ -299,15 +300,23 @@ class OutputKafkaTLSSettingsClientSide(BaseModel):
299
300
  r"""Passphrase to use to decrypt private key"""
300
301
 
301
302
  min_version: Annotated[
302
- Optional[OutputKafkaMinimumTLSVersion], pydantic.Field(alias="minVersion")
303
+ Annotated[
304
+ Optional[OutputKafkaMinimumTLSVersion],
305
+ PlainValidator(validate_open_enum(False)),
306
+ ],
307
+ pydantic.Field(alias="minVersion"),
303
308
  ] = None
304
309
 
305
310
  max_version: Annotated[
306
- Optional[OutputKafkaMaximumTLSVersion], pydantic.Field(alias="maxVersion")
311
+ Annotated[
312
+ Optional[OutputKafkaMaximumTLSVersion],
313
+ PlainValidator(validate_open_enum(False)),
314
+ ],
315
+ pydantic.Field(alias="maxVersion"),
307
316
  ] = None
308
317
 
309
318
 
310
- class OutputKafkaBackpressureBehavior(str, Enum):
319
+ class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
311
320
  r"""How to handle events when all receivers are exerting backpressure"""
312
321
 
313
322
  BLOCK = "block"
@@ -315,21 +324,21 @@ class OutputKafkaBackpressureBehavior(str, Enum):
315
324
  QUEUE = "queue"
316
325
 
317
326
 
318
- class OutputKafkaPqCompressCompression(str, Enum):
327
+ class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
319
328
  r"""Codec to use to compress the persisted data"""
320
329
 
321
330
  NONE = "none"
322
331
  GZIP = "gzip"
323
332
 
324
333
 
325
- class OutputKafkaQueueFullBehavior(str, Enum):
334
+ class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
326
335
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
327
336
 
328
337
  BLOCK = "block"
329
338
  DROP = "drop"
330
339
 
331
340
 
332
- class OutputKafkaMode(str, Enum):
341
+ class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
333
342
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
334
343
 
335
344
  ERROR = "error"
@@ -441,15 +450,23 @@ class OutputKafka(BaseModel):
441
450
  streamtags: Optional[List[str]] = None
442
451
  r"""Tags for filtering and grouping in @{product}"""
443
452
 
444
- ack: Optional[OutputKafkaAcknowledgments] = OutputKafkaAcknowledgments.ONE
453
+ ack: Annotated[
454
+ Optional[OutputKafkaAcknowledgments], PlainValidator(validate_open_enum(True))
455
+ ] = OutputKafkaAcknowledgments.ONE
445
456
  r"""Control the number of required acknowledgments."""
446
457
 
447
458
  format_: Annotated[
448
- Optional[OutputKafkaRecordDataFormat], pydantic.Field(alias="format")
459
+ Annotated[
460
+ Optional[OutputKafkaRecordDataFormat],
461
+ PlainValidator(validate_open_enum(False)),
462
+ ],
463
+ pydantic.Field(alias="format"),
449
464
  ] = OutputKafkaRecordDataFormat.JSON
450
465
  r"""Format to use to serialize events before writing to Kafka."""
451
466
 
452
- compression: Optional[OutputKafkaCompression] = OutputKafkaCompression.GZIP
467
+ compression: Annotated[
468
+ Optional[OutputKafkaCompression], PlainValidator(validate_open_enum(False))
469
+ ] = OutputKafkaCompression.GZIP
453
470
  r"""Codec to use to compress the data before sending to Kafka"""
454
471
 
455
472
  max_record_size_kb: Annotated[
@@ -512,7 +529,10 @@ class OutputKafka(BaseModel):
512
529
  tls: Optional[OutputKafkaTLSSettingsClientSide] = None
513
530
 
514
531
  on_backpressure: Annotated[
515
- Optional[OutputKafkaBackpressureBehavior],
532
+ Annotated[
533
+ Optional[OutputKafkaBackpressureBehavior],
534
+ PlainValidator(validate_open_enum(False)),
535
+ ],
516
536
  pydantic.Field(alias="onBackpressure"),
517
537
  ] = OutputKafkaBackpressureBehavior.BLOCK
518
538
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -538,18 +558,27 @@ class OutputKafka(BaseModel):
538
558
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
539
559
 
540
560
  pq_compress: Annotated[
541
- Optional[OutputKafkaPqCompressCompression], pydantic.Field(alias="pqCompress")
561
+ Annotated[
562
+ Optional[OutputKafkaPqCompressCompression],
563
+ PlainValidator(validate_open_enum(False)),
564
+ ],
565
+ pydantic.Field(alias="pqCompress"),
542
566
  ] = OutputKafkaPqCompressCompression.NONE
543
567
  r"""Codec to use to compress the persisted data"""
544
568
 
545
569
  pq_on_backpressure: Annotated[
546
- Optional[OutputKafkaQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
570
+ Annotated[
571
+ Optional[OutputKafkaQueueFullBehavior],
572
+ PlainValidator(validate_open_enum(False)),
573
+ ],
574
+ pydantic.Field(alias="pqOnBackpressure"),
547
575
  ] = OutputKafkaQueueFullBehavior.BLOCK
548
576
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
549
577
 
550
- pq_mode: Annotated[Optional[OutputKafkaMode], pydantic.Field(alias="pqMode")] = (
551
- OutputKafkaMode.ERROR
552
- )
578
+ pq_mode: Annotated[
579
+ Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
580
+ pydantic.Field(alias="pqMode"),
581
+ ] = OutputKafkaMode.ERROR
553
582
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
554
583
 
555
584
  pq_controls: Annotated[