cribl-control-plane 0.0.50rc2__py3-none-any.whl → 0.0.51__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (176) hide show
  1. cribl_control_plane/_version.py +5 -3
  2. cribl_control_plane/errors/healthstatus_error.py +2 -8
  3. cribl_control_plane/groups_sdk.py +4 -4
  4. cribl_control_plane/health.py +2 -6
  5. cribl_control_plane/models/__init__.py +31 -56
  6. cribl_control_plane/models/appmode.py +13 -0
  7. cribl_control_plane/models/cacheconnection.py +2 -10
  8. cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
  9. cribl_control_plane/models/cloudprovider.py +1 -2
  10. cribl_control_plane/models/configgroup.py +4 -24
  11. cribl_control_plane/models/configgroupcloud.py +2 -6
  12. cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
  13. cribl_control_plane/models/createinputhectokenbyidop.py +5 -6
  14. cribl_control_plane/models/createversionpushop.py +5 -5
  15. cribl_control_plane/models/cribllakedataset.py +2 -8
  16. cribl_control_plane/models/datasetmetadata.py +2 -8
  17. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
  18. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
  19. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
  20. cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
  21. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
  22. cribl_control_plane/models/getsummaryop.py +2 -7
  23. cribl_control_plane/models/getversionshowop.py +5 -6
  24. cribl_control_plane/models/gitinfo.py +3 -14
  25. cribl_control_plane/models/hbcriblinfo.py +3 -24
  26. cribl_control_plane/models/healthstatus.py +4 -7
  27. cribl_control_plane/models/heartbeatmetadata.py +0 -3
  28. cribl_control_plane/models/input.py +63 -65
  29. cribl_control_plane/models/inputappscope.py +14 -34
  30. cribl_control_plane/models/inputazureblob.py +6 -17
  31. cribl_control_plane/models/inputcollection.py +4 -11
  32. cribl_control_plane/models/inputconfluentcloud.py +32 -41
  33. cribl_control_plane/models/inputcribl.py +4 -11
  34. cribl_control_plane/models/inputcriblhttp.py +8 -23
  35. cribl_control_plane/models/inputcribllakehttp.py +10 -22
  36. cribl_control_plane/models/inputcriblmetrics.py +4 -12
  37. cribl_control_plane/models/inputcribltcp.py +8 -23
  38. cribl_control_plane/models/inputcrowdstrike.py +10 -26
  39. cribl_control_plane/models/inputdatadogagent.py +8 -24
  40. cribl_control_plane/models/inputdatagen.py +4 -11
  41. cribl_control_plane/models/inputedgeprometheus.py +24 -58
  42. cribl_control_plane/models/inputelastic.py +14 -40
  43. cribl_control_plane/models/inputeventhub.py +6 -15
  44. cribl_control_plane/models/inputexec.py +6 -14
  45. cribl_control_plane/models/inputfile.py +6 -15
  46. cribl_control_plane/models/inputfirehose.py +8 -23
  47. cribl_control_plane/models/inputgooglepubsub.py +6 -19
  48. cribl_control_plane/models/inputgrafana.py +24 -67
  49. cribl_control_plane/models/inputhttp.py +8 -23
  50. cribl_control_plane/models/inputhttpraw.py +8 -23
  51. cribl_control_plane/models/inputjournalfiles.py +4 -12
  52. cribl_control_plane/models/inputkafka.py +28 -41
  53. cribl_control_plane/models/inputkinesis.py +14 -38
  54. cribl_control_plane/models/inputkubeevents.py +4 -11
  55. cribl_control_plane/models/inputkubelogs.py +8 -16
  56. cribl_control_plane/models/inputkubemetrics.py +8 -16
  57. cribl_control_plane/models/inputloki.py +10 -29
  58. cribl_control_plane/models/inputmetrics.py +8 -23
  59. cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
  60. cribl_control_plane/models/inputmsk.py +30 -48
  61. cribl_control_plane/models/inputnetflow.py +4 -11
  62. cribl_control_plane/models/inputoffice365mgmt.py +14 -33
  63. cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
  64. cribl_control_plane/models/inputoffice365service.py +16 -35
  65. cribl_control_plane/models/inputopentelemetry.py +16 -38
  66. cribl_control_plane/models/inputprometheus.py +18 -50
  67. cribl_control_plane/models/inputprometheusrw.py +10 -30
  68. cribl_control_plane/models/inputrawudp.py +4 -11
  69. cribl_control_plane/models/inputs3.py +8 -21
  70. cribl_control_plane/models/inputs3inventory.py +10 -26
  71. cribl_control_plane/models/inputsecuritylake.py +10 -27
  72. cribl_control_plane/models/inputsnmp.py +6 -16
  73. cribl_control_plane/models/inputsplunk.py +12 -33
  74. cribl_control_plane/models/inputsplunkhec.py +10 -29
  75. cribl_control_plane/models/inputsplunksearch.py +14 -33
  76. cribl_control_plane/models/inputsqs.py +10 -27
  77. cribl_control_plane/models/inputsyslog.py +16 -43
  78. cribl_control_plane/models/inputsystemmetrics.py +24 -48
  79. cribl_control_plane/models/inputsystemstate.py +8 -16
  80. cribl_control_plane/models/inputtcp.py +10 -29
  81. cribl_control_plane/models/inputtcpjson.py +10 -29
  82. cribl_control_plane/models/inputwef.py +14 -37
  83. cribl_control_plane/models/inputwindowsmetrics.py +24 -44
  84. cribl_control_plane/models/inputwineventlogs.py +10 -20
  85. cribl_control_plane/models/inputwiz.py +8 -21
  86. cribl_control_plane/models/inputwizwebhook.py +8 -23
  87. cribl_control_plane/models/inputzscalerhec.py +10 -29
  88. cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
  89. cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
  90. cribl_control_plane/models/masterworkerentry.py +2 -7
  91. cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
  92. cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
  93. cribl_control_plane/models/nodeprovidedinfo.py +0 -3
  94. cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
  95. cribl_control_plane/models/nodeupgradestate.py +1 -2
  96. cribl_control_plane/models/nodeupgradestatus.py +5 -13
  97. cribl_control_plane/models/output.py +79 -84
  98. cribl_control_plane/models/outputazureblob.py +18 -48
  99. cribl_control_plane/models/outputazuredataexplorer.py +28 -73
  100. cribl_control_plane/models/outputazureeventhub.py +18 -40
  101. cribl_control_plane/models/outputazurelogs.py +12 -35
  102. cribl_control_plane/models/outputclickhouse.py +20 -55
  103. cribl_control_plane/models/outputcloudwatch.py +10 -29
  104. cribl_control_plane/models/outputconfluentcloud.py +44 -71
  105. cribl_control_plane/models/outputcriblhttp.py +16 -44
  106. cribl_control_plane/models/outputcribllake.py +16 -46
  107. cribl_control_plane/models/outputcribltcp.py +18 -45
  108. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
  109. cribl_control_plane/models/outputdatadog.py +20 -48
  110. cribl_control_plane/models/outputdataset.py +18 -46
  111. cribl_control_plane/models/outputdiskspool.py +2 -7
  112. cribl_control_plane/models/outputdls3.py +24 -68
  113. cribl_control_plane/models/outputdynatracehttp.py +20 -53
  114. cribl_control_plane/models/outputdynatraceotlp.py +22 -55
  115. cribl_control_plane/models/outputelastic.py +18 -43
  116. cribl_control_plane/models/outputelasticcloud.py +12 -36
  117. cribl_control_plane/models/outputexabeam.py +10 -29
  118. cribl_control_plane/models/outputfilesystem.py +14 -39
  119. cribl_control_plane/models/outputgooglechronicle.py +16 -50
  120. cribl_control_plane/models/outputgooglecloudlogging.py +18 -50
  121. cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
  122. cribl_control_plane/models/outputgooglepubsub.py +10 -31
  123. cribl_control_plane/models/outputgrafanacloud.py +32 -97
  124. cribl_control_plane/models/outputgraphite.py +14 -31
  125. cribl_control_plane/models/outputhoneycomb.py +12 -35
  126. cribl_control_plane/models/outputhumiohec.py +16 -43
  127. cribl_control_plane/models/outputinfluxdb.py +16 -42
  128. cribl_control_plane/models/outputkafka.py +40 -69
  129. cribl_control_plane/models/outputkinesis.py +16 -40
  130. cribl_control_plane/models/outputloki.py +16 -41
  131. cribl_control_plane/models/outputminio.py +24 -65
  132. cribl_control_plane/models/outputmsk.py +42 -77
  133. cribl_control_plane/models/outputnewrelic.py +18 -43
  134. cribl_control_plane/models/outputnewrelicevents.py +14 -41
  135. cribl_control_plane/models/outputopentelemetry.py +26 -67
  136. cribl_control_plane/models/outputprometheus.py +12 -35
  137. cribl_control_plane/models/outputring.py +8 -19
  138. cribl_control_plane/models/outputs3.py +26 -68
  139. cribl_control_plane/models/outputsecuritylake.py +18 -52
  140. cribl_control_plane/models/outputsentinel.py +18 -45
  141. cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
  142. cribl_control_plane/models/outputservicenow.py +24 -60
  143. cribl_control_plane/models/outputsignalfx.py +14 -37
  144. cribl_control_plane/models/outputsns.py +14 -36
  145. cribl_control_plane/models/outputsplunk.py +24 -60
  146. cribl_control_plane/models/outputsplunkhec.py +12 -35
  147. cribl_control_plane/models/outputsplunklb.py +30 -77
  148. cribl_control_plane/models/outputsqs.py +16 -41
  149. cribl_control_plane/models/outputstatsd.py +14 -30
  150. cribl_control_plane/models/outputstatsdext.py +12 -29
  151. cribl_control_plane/models/outputsumologic.py +12 -35
  152. cribl_control_plane/models/outputsyslog.py +24 -58
  153. cribl_control_plane/models/outputtcpjson.py +20 -52
  154. cribl_control_plane/models/outputwavefront.py +12 -35
  155. cribl_control_plane/models/outputwebhook.py +22 -58
  156. cribl_control_plane/models/outputxsiam.py +14 -35
  157. cribl_control_plane/models/productscore.py +1 -2
  158. cribl_control_plane/models/rbacresource.py +1 -2
  159. cribl_control_plane/models/resourcepolicy.py +2 -4
  160. cribl_control_plane/models/routecloneconf.py +13 -0
  161. cribl_control_plane/models/routeconf.py +4 -3
  162. cribl_control_plane/models/runnablejobcollection.py +13 -30
  163. cribl_control_plane/models/runnablejobexecutor.py +4 -13
  164. cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
  165. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
  166. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
  167. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +5 -6
  168. cribl_control_plane/models/workertypes.py +1 -2
  169. {cribl_control_plane-0.0.50rc2.dist-info → cribl_control_plane-0.0.51.dist-info}/METADATA +14 -5
  170. cribl_control_plane-0.0.51.dist-info/RECORD +325 -0
  171. cribl_control_plane/models/error.py +0 -16
  172. cribl_control_plane/models/gethealthinfoop.py +0 -17
  173. cribl_control_plane/models/gitshowresult.py +0 -19
  174. cribl_control_plane/models/outputdatabricks.py +0 -282
  175. cribl_control_plane-0.0.50rc2.dist-info/RECORD +0 -327
  176. {cribl_control_plane-0.0.50rc2.dist-info → cribl_control_plane-0.0.51.dist-info}/WHEEL +0 -0
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -26,7 +23,7 @@ class OutputHumioHecExtraHTTPHeader(BaseModel):
26
23
  name: Optional[str] = None
27
24
 
28
25
 
29
- class OutputHumioHecFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
26
+ class OutputHumioHecFailedRequestLoggingMode(str, Enum):
30
27
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
31
28
 
32
29
  PAYLOAD = "payload"
@@ -34,14 +31,14 @@ class OutputHumioHecFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnum
34
31
  NONE = "none"
35
32
 
36
33
 
37
- class OutputHumioHecRequestFormat(str, Enum, metaclass=utils.OpenEnumMeta):
34
+ class OutputHumioHecRequestFormat(str, Enum):
38
35
  r"""When set to JSON, the event is automatically formatted with required fields before sending. When set to Raw, only the event's `_raw` value is sent."""
39
36
 
40
37
  JSON = "JSON"
41
38
  RAW = "raw"
42
39
 
43
40
 
44
- class OutputHumioHecAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
41
+ class OutputHumioHecAuthenticationMethod(str, Enum):
45
42
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
46
43
 
47
44
  MANUAL = "manual"
@@ -102,7 +99,7 @@ class OutputHumioHecTimeoutRetrySettings(BaseModel):
102
99
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
103
100
 
104
101
 
105
- class OutputHumioHecBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
102
+ class OutputHumioHecBackpressureBehavior(str, Enum):
106
103
  r"""How to handle events when all receivers are exerting backpressure"""
107
104
 
108
105
  BLOCK = "block"
@@ -110,21 +107,21 @@ class OutputHumioHecBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
110
107
  QUEUE = "queue"
111
108
 
112
109
 
113
- class OutputHumioHecCompression(str, Enum, metaclass=utils.OpenEnumMeta):
110
+ class OutputHumioHecCompression(str, Enum):
114
111
  r"""Codec to use to compress the persisted data"""
115
112
 
116
113
  NONE = "none"
117
114
  GZIP = "gzip"
118
115
 
119
116
 
120
- class OutputHumioHecQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
117
+ class OutputHumioHecQueueFullBehavior(str, Enum):
121
118
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
122
119
 
123
120
  BLOCK = "block"
124
121
  DROP = "drop"
125
122
 
126
123
 
127
- class OutputHumioHecMode(str, Enum, metaclass=utils.OpenEnumMeta):
124
+ class OutputHumioHecMode(str, Enum):
128
125
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
129
126
 
130
127
  ERROR = "error"
@@ -279,10 +276,7 @@ class OutputHumioHec(BaseModel):
279
276
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
280
277
 
281
278
  failed_request_logging_mode: Annotated[
282
- Annotated[
283
- Optional[OutputHumioHecFailedRequestLoggingMode],
284
- PlainValidator(validate_open_enum(False)),
285
- ],
279
+ Optional[OutputHumioHecFailedRequestLoggingMode],
286
280
  pydantic.Field(alias="failedRequestLoggingMode"),
287
281
  ] = OutputHumioHecFailedRequestLoggingMode.NONE
288
282
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -293,20 +287,12 @@ class OutputHumioHec(BaseModel):
293
287
  r"""List of headers that are safe to log in plain text"""
294
288
 
295
289
  format_: Annotated[
296
- Annotated[
297
- Optional[OutputHumioHecRequestFormat],
298
- PlainValidator(validate_open_enum(False)),
299
- ],
300
- pydantic.Field(alias="format"),
290
+ Optional[OutputHumioHecRequestFormat], pydantic.Field(alias="format")
301
291
  ] = OutputHumioHecRequestFormat.JSON
302
292
  r"""When set to JSON, the event is automatically formatted with required fields before sending. When set to Raw, only the event's `_raw` value is sent."""
303
293
 
304
294
  auth_type: Annotated[
305
- Annotated[
306
- Optional[OutputHumioHecAuthenticationMethod],
307
- PlainValidator(validate_open_enum(False)),
308
- ],
309
- pydantic.Field(alias="authType"),
295
+ Optional[OutputHumioHecAuthenticationMethod], pydantic.Field(alias="authType")
310
296
  ] = OutputHumioHecAuthenticationMethod.MANUAL
311
297
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
312
298
 
@@ -327,10 +313,7 @@ class OutputHumioHec(BaseModel):
327
313
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
328
314
 
329
315
  on_backpressure: Annotated[
330
- Annotated[
331
- Optional[OutputHumioHecBackpressureBehavior],
332
- PlainValidator(validate_open_enum(False)),
333
- ],
316
+ Optional[OutputHumioHecBackpressureBehavior],
334
317
  pydantic.Field(alias="onBackpressure"),
335
318
  ] = OutputHumioHecBackpressureBehavior.BLOCK
336
319
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -357,29 +340,19 @@ class OutputHumioHec(BaseModel):
357
340
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
358
341
 
359
342
  pq_compress: Annotated[
360
- Annotated[
361
- Optional[OutputHumioHecCompression],
362
- PlainValidator(validate_open_enum(False)),
363
- ],
364
- pydantic.Field(alias="pqCompress"),
343
+ Optional[OutputHumioHecCompression], pydantic.Field(alias="pqCompress")
365
344
  ] = OutputHumioHecCompression.NONE
366
345
  r"""Codec to use to compress the persisted data"""
367
346
 
368
347
  pq_on_backpressure: Annotated[
369
- Annotated[
370
- Optional[OutputHumioHecQueueFullBehavior],
371
- PlainValidator(validate_open_enum(False)),
372
- ],
348
+ Optional[OutputHumioHecQueueFullBehavior],
373
349
  pydantic.Field(alias="pqOnBackpressure"),
374
350
  ] = OutputHumioHecQueueFullBehavior.BLOCK
375
351
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
376
352
 
377
- pq_mode: Annotated[
378
- Annotated[
379
- Optional[OutputHumioHecMode], PlainValidator(validate_open_enum(False))
380
- ],
381
- pydantic.Field(alias="pqMode"),
382
- ] = OutputHumioHecMode.ERROR
353
+ pq_mode: Annotated[Optional[OutputHumioHecMode], pydantic.Field(alias="pqMode")] = (
354
+ OutputHumioHecMode.ERROR
355
+ )
383
356
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
384
357
 
385
358
  pq_controls: Annotated[
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputInfluxdbType(str, Enum):
15
12
  INFLUXDB = "influxdb"
16
13
 
17
14
 
18
- class TimestampPrecision(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class TimestampPrecision(str, Enum):
19
16
  r"""Sets the precision for the supplied Unix time values. Defaults to milliseconds."""
20
17
 
21
18
  NS = "ns"
@@ -37,7 +34,7 @@ class OutputInfluxdbExtraHTTPHeader(BaseModel):
37
34
  name: Optional[str] = None
38
35
 
39
36
 
40
- class OutputInfluxdbFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
37
+ class OutputInfluxdbFailedRequestLoggingMode(str, Enum):
41
38
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
42
39
 
43
40
  PAYLOAD = "payload"
@@ -99,7 +96,7 @@ class OutputInfluxdbTimeoutRetrySettings(BaseModel):
99
96
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
100
97
 
101
98
 
102
- class OutputInfluxdbBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
99
+ class OutputInfluxdbBackpressureBehavior(str, Enum):
103
100
  r"""How to handle events when all receivers are exerting backpressure"""
104
101
 
105
102
  BLOCK = "block"
@@ -107,7 +104,7 @@ class OutputInfluxdbBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
107
104
  QUEUE = "queue"
108
105
 
109
106
 
110
- class OutputInfluxdbAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
107
+ class OutputInfluxdbAuthenticationType(str, Enum):
111
108
  r"""InfluxDB authentication type"""
112
109
 
113
110
  NONE = "none"
@@ -118,21 +115,21 @@ class OutputInfluxdbAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
118
115
  OAUTH = "oauth"
119
116
 
120
117
 
121
- class OutputInfluxdbCompression(str, Enum, metaclass=utils.OpenEnumMeta):
118
+ class OutputInfluxdbCompression(str, Enum):
122
119
  r"""Codec to use to compress the persisted data"""
123
120
 
124
121
  NONE = "none"
125
122
  GZIP = "gzip"
126
123
 
127
124
 
128
- class OutputInfluxdbQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
125
+ class OutputInfluxdbQueueFullBehavior(str, Enum):
129
126
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
130
127
 
131
128
  BLOCK = "block"
132
129
  DROP = "drop"
133
130
 
134
131
 
135
- class OutputInfluxdbMode(str, Enum, metaclass=utils.OpenEnumMeta):
132
+ class OutputInfluxdbMode(str, Enum):
136
133
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
137
134
 
138
135
  ERROR = "error"
@@ -309,10 +306,7 @@ class OutputInfluxdb(BaseModel):
309
306
  r"""The v2 API can be enabled with InfluxDB versions 1.8 and later."""
310
307
 
311
308
  timestamp_precision: Annotated[
312
- Annotated[
313
- Optional[TimestampPrecision], PlainValidator(validate_open_enum(False))
314
- ],
315
- pydantic.Field(alias="timestampPrecision"),
309
+ Optional[TimestampPrecision], pydantic.Field(alias="timestampPrecision")
316
310
  ] = TimestampPrecision.MS
317
311
  r"""Sets the precision for the supplied Unix time values. Defaults to milliseconds."""
318
312
 
@@ -370,10 +364,7 @@ class OutputInfluxdb(BaseModel):
370
364
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
371
365
 
372
366
  failed_request_logging_mode: Annotated[
373
- Annotated[
374
- Optional[OutputInfluxdbFailedRequestLoggingMode],
375
- PlainValidator(validate_open_enum(False)),
376
- ],
367
+ Optional[OutputInfluxdbFailedRequestLoggingMode],
377
368
  pydantic.Field(alias="failedRequestLoggingMode"),
378
369
  ] = OutputInfluxdbFailedRequestLoggingMode.NONE
379
370
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -400,20 +391,13 @@ class OutputInfluxdb(BaseModel):
400
391
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
401
392
 
402
393
  on_backpressure: Annotated[
403
- Annotated[
404
- Optional[OutputInfluxdbBackpressureBehavior],
405
- PlainValidator(validate_open_enum(False)),
406
- ],
394
+ Optional[OutputInfluxdbBackpressureBehavior],
407
395
  pydantic.Field(alias="onBackpressure"),
408
396
  ] = OutputInfluxdbBackpressureBehavior.BLOCK
409
397
  r"""How to handle events when all receivers are exerting backpressure"""
410
398
 
411
399
  auth_type: Annotated[
412
- Annotated[
413
- Optional[OutputInfluxdbAuthenticationType],
414
- PlainValidator(validate_open_enum(False)),
415
- ],
416
- pydantic.Field(alias="authType"),
400
+ Optional[OutputInfluxdbAuthenticationType], pydantic.Field(alias="authType")
417
401
  ] = OutputInfluxdbAuthenticationType.NONE
418
402
  r"""InfluxDB authentication type"""
419
403
 
@@ -442,29 +426,19 @@ class OutputInfluxdb(BaseModel):
442
426
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
443
427
 
444
428
  pq_compress: Annotated[
445
- Annotated[
446
- Optional[OutputInfluxdbCompression],
447
- PlainValidator(validate_open_enum(False)),
448
- ],
449
- pydantic.Field(alias="pqCompress"),
429
+ Optional[OutputInfluxdbCompression], pydantic.Field(alias="pqCompress")
450
430
  ] = OutputInfluxdbCompression.NONE
451
431
  r"""Codec to use to compress the persisted data"""
452
432
 
453
433
  pq_on_backpressure: Annotated[
454
- Annotated[
455
- Optional[OutputInfluxdbQueueFullBehavior],
456
- PlainValidator(validate_open_enum(False)),
457
- ],
434
+ Optional[OutputInfluxdbQueueFullBehavior],
458
435
  pydantic.Field(alias="pqOnBackpressure"),
459
436
  ] = OutputInfluxdbQueueFullBehavior.BLOCK
460
437
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
461
438
 
462
- pq_mode: Annotated[
463
- Annotated[
464
- Optional[OutputInfluxdbMode], PlainValidator(validate_open_enum(False))
465
- ],
466
- pydantic.Field(alias="pqMode"),
467
- ] = OutputInfluxdbMode.ERROR
439
+ pq_mode: Annotated[Optional[OutputInfluxdbMode], pydantic.Field(alias="pqMode")] = (
440
+ OutputInfluxdbMode.ERROR
441
+ )
468
442
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
469
443
 
470
444
  pq_controls: Annotated[
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputKafkaType(str, Enum):
15
12
  KAFKA = "kafka"
16
13
 
17
14
 
18
- class OutputKafkaAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputKafkaAcknowledgments(int, Enum):
19
16
  r"""Control the number of required acknowledgments."""
20
17
 
21
18
  ONE = 1
@@ -23,7 +20,7 @@ class OutputKafkaAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
23
20
  MINUS_1 = -1
24
21
 
25
22
 
26
- class OutputKafkaRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
23
+ class OutputKafkaRecordDataFormat(str, Enum):
27
24
  r"""Format to use to serialize events before writing to Kafka."""
28
25
 
29
26
  JSON = "json"
@@ -31,7 +28,7 @@ class OutputKafkaRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
31
28
  PROTOBUF = "protobuf"
32
29
 
33
30
 
34
- class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
31
+ class OutputKafkaCompression(str, Enum):
35
32
  r"""Codec to use to compress the data before sending to Kafka"""
36
33
 
37
34
  NONE = "none"
@@ -40,6 +37,13 @@ class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
40
37
  LZ4 = "lz4"
41
38
 
42
39
 
40
+ class OutputKafkaSchemaType(str, Enum):
41
+ r"""The schema format used to encode and decode event data"""
42
+
43
+ AVRO = "avro"
44
+ JSON = "json"
45
+
46
+
43
47
  class OutputKafkaAuthTypedDict(TypedDict):
44
48
  r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
45
49
 
@@ -59,18 +63,14 @@ class OutputKafkaAuth(BaseModel):
59
63
  r"""Select or create a secret that references your credentials"""
60
64
 
61
65
 
62
- class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(
63
- str, Enum, metaclass=utils.OpenEnumMeta
64
- ):
66
+ class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
65
67
  TL_SV1 = "TLSv1"
66
68
  TL_SV1_1 = "TLSv1.1"
67
69
  TL_SV1_2 = "TLSv1.2"
68
70
  TL_SV1_3 = "TLSv1.3"
69
71
 
70
72
 
71
- class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(
72
- str, Enum, metaclass=utils.OpenEnumMeta
73
- ):
73
+ class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
74
74
  TL_SV1 = "TLSv1"
75
75
  TL_SV1_1 = "TLSv1.1"
76
76
  TL_SV1_2 = "TLSv1.2"
@@ -130,18 +130,12 @@ class OutputKafkaKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
130
130
  r"""Passphrase to use to decrypt private key"""
131
131
 
132
132
  min_version: Annotated[
133
- Annotated[
134
- Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
135
- PlainValidator(validate_open_enum(False)),
136
- ],
133
+ Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
137
134
  pydantic.Field(alias="minVersion"),
138
135
  ] = None
139
136
 
140
137
  max_version: Annotated[
141
- Annotated[
142
- Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
143
- PlainValidator(validate_open_enum(False)),
144
- ],
138
+ Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
145
139
  pydantic.Field(alias="maxVersion"),
146
140
  ] = None
147
141
 
@@ -150,6 +144,8 @@ class OutputKafkaKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
150
144
  disabled: NotRequired[bool]
151
145
  schema_registry_url: NotRequired[str]
152
146
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
147
+ schema_type: NotRequired[OutputKafkaSchemaType]
148
+ r"""The schema format used to encode and decode event data"""
153
149
  connection_timeout: NotRequired[float]
154
150
  r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
155
151
  request_timeout: NotRequired[float]
@@ -173,6 +169,11 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
173
169
  ] = "http://localhost:8081"
174
170
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
175
171
 
172
+ schema_type: Annotated[
173
+ Optional[OutputKafkaSchemaType], pydantic.Field(alias="schemaType")
174
+ ] = OutputKafkaSchemaType.AVRO
175
+ r"""The schema format used to encode and decode event data"""
176
+
176
177
  connection_timeout: Annotated[
177
178
  Optional[float], pydantic.Field(alias="connectionTimeout")
178
179
  ] = 30000
@@ -202,7 +203,7 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
202
203
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
203
204
 
204
205
 
205
- class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
206
+ class OutputKafkaSASLMechanism(str, Enum):
206
207
  PLAIN = "plain"
207
208
  SCRAM_SHA_256 = "scram-sha-256"
208
209
  SCRAM_SHA_512 = "scram-sha-512"
@@ -223,9 +224,7 @@ class OutputKafkaAuthentication(BaseModel):
223
224
 
224
225
  disabled: Optional[bool] = True
225
226
 
226
- mechanism: Annotated[
227
- Optional[OutputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
228
- ] = OutputKafkaSASLMechanism.PLAIN
227
+ mechanism: Optional[OutputKafkaSASLMechanism] = OutputKafkaSASLMechanism.PLAIN
229
228
 
230
229
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
231
230
  False
@@ -233,14 +232,14 @@ class OutputKafkaAuthentication(BaseModel):
233
232
  r"""Enable OAuth authentication"""
234
233
 
235
234
 
236
- class OutputKafkaMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
235
+ class OutputKafkaMinimumTLSVersion(str, Enum):
237
236
  TL_SV1 = "TLSv1"
238
237
  TL_SV1_1 = "TLSv1.1"
239
238
  TL_SV1_2 = "TLSv1.2"
240
239
  TL_SV1_3 = "TLSv1.3"
241
240
 
242
241
 
243
- class OutputKafkaMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
242
+ class OutputKafkaMaximumTLSVersion(str, Enum):
244
243
  TL_SV1 = "TLSv1"
245
244
  TL_SV1_1 = "TLSv1.1"
246
245
  TL_SV1_2 = "TLSv1.2"
@@ -300,23 +299,15 @@ class OutputKafkaTLSSettingsClientSide(BaseModel):
300
299
  r"""Passphrase to use to decrypt private key"""
301
300
 
302
301
  min_version: Annotated[
303
- Annotated[
304
- Optional[OutputKafkaMinimumTLSVersion],
305
- PlainValidator(validate_open_enum(False)),
306
- ],
307
- pydantic.Field(alias="minVersion"),
302
+ Optional[OutputKafkaMinimumTLSVersion], pydantic.Field(alias="minVersion")
308
303
  ] = None
309
304
 
310
305
  max_version: Annotated[
311
- Annotated[
312
- Optional[OutputKafkaMaximumTLSVersion],
313
- PlainValidator(validate_open_enum(False)),
314
- ],
315
- pydantic.Field(alias="maxVersion"),
306
+ Optional[OutputKafkaMaximumTLSVersion], pydantic.Field(alias="maxVersion")
316
307
  ] = None
317
308
 
318
309
 
319
- class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
310
+ class OutputKafkaBackpressureBehavior(str, Enum):
320
311
  r"""How to handle events when all receivers are exerting backpressure"""
321
312
 
322
313
  BLOCK = "block"
@@ -324,21 +315,21 @@ class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
324
315
  QUEUE = "queue"
325
316
 
326
317
 
327
- class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
318
+ class OutputKafkaPqCompressCompression(str, Enum):
328
319
  r"""Codec to use to compress the persisted data"""
329
320
 
330
321
  NONE = "none"
331
322
  GZIP = "gzip"
332
323
 
333
324
 
334
- class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
325
+ class OutputKafkaQueueFullBehavior(str, Enum):
335
326
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
336
327
 
337
328
  BLOCK = "block"
338
329
  DROP = "drop"
339
330
 
340
331
 
341
- class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
332
+ class OutputKafkaMode(str, Enum):
342
333
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
343
334
 
344
335
  ERROR = "error"
@@ -450,23 +441,15 @@ class OutputKafka(BaseModel):
450
441
  streamtags: Optional[List[str]] = None
451
442
  r"""Tags for filtering and grouping in @{product}"""
452
443
 
453
- ack: Annotated[
454
- Optional[OutputKafkaAcknowledgments], PlainValidator(validate_open_enum(True))
455
- ] = OutputKafkaAcknowledgments.ONE
444
+ ack: Optional[OutputKafkaAcknowledgments] = OutputKafkaAcknowledgments.ONE
456
445
  r"""Control the number of required acknowledgments."""
457
446
 
458
447
  format_: Annotated[
459
- Annotated[
460
- Optional[OutputKafkaRecordDataFormat],
461
- PlainValidator(validate_open_enum(False)),
462
- ],
463
- pydantic.Field(alias="format"),
448
+ Optional[OutputKafkaRecordDataFormat], pydantic.Field(alias="format")
464
449
  ] = OutputKafkaRecordDataFormat.JSON
465
450
  r"""Format to use to serialize events before writing to Kafka."""
466
451
 
467
- compression: Annotated[
468
- Optional[OutputKafkaCompression], PlainValidator(validate_open_enum(False))
469
- ] = OutputKafkaCompression.GZIP
452
+ compression: Optional[OutputKafkaCompression] = OutputKafkaCompression.GZIP
470
453
  r"""Codec to use to compress the data before sending to Kafka"""
471
454
 
472
455
  max_record_size_kb: Annotated[
@@ -529,10 +512,7 @@ class OutputKafka(BaseModel):
529
512
  tls: Optional[OutputKafkaTLSSettingsClientSide] = None
530
513
 
531
514
  on_backpressure: Annotated[
532
- Annotated[
533
- Optional[OutputKafkaBackpressureBehavior],
534
- PlainValidator(validate_open_enum(False)),
535
- ],
515
+ Optional[OutputKafkaBackpressureBehavior],
536
516
  pydantic.Field(alias="onBackpressure"),
537
517
  ] = OutputKafkaBackpressureBehavior.BLOCK
538
518
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -558,27 +538,18 @@ class OutputKafka(BaseModel):
558
538
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
559
539
 
560
540
  pq_compress: Annotated[
561
- Annotated[
562
- Optional[OutputKafkaPqCompressCompression],
563
- PlainValidator(validate_open_enum(False)),
564
- ],
565
- pydantic.Field(alias="pqCompress"),
541
+ Optional[OutputKafkaPqCompressCompression], pydantic.Field(alias="pqCompress")
566
542
  ] = OutputKafkaPqCompressCompression.NONE
567
543
  r"""Codec to use to compress the persisted data"""
568
544
 
569
545
  pq_on_backpressure: Annotated[
570
- Annotated[
571
- Optional[OutputKafkaQueueFullBehavior],
572
- PlainValidator(validate_open_enum(False)),
573
- ],
574
- pydantic.Field(alias="pqOnBackpressure"),
546
+ Optional[OutputKafkaQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
575
547
  ] = OutputKafkaQueueFullBehavior.BLOCK
576
548
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
577
549
 
578
- pq_mode: Annotated[
579
- Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
580
- pydantic.Field(alias="pqMode"),
581
- ] = OutputKafkaMode.ERROR
550
+ pq_mode: Annotated[Optional[OutputKafkaMode], pydantic.Field(alias="pqMode")] = (
551
+ OutputKafkaMode.ERROR
552
+ )
582
553
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
583
554
 
584
555
  pq_controls: Annotated[