cribl-control-plane 0.0.48a1__py3-none-any.whl → 0.0.50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (165) hide show
  1. cribl_control_plane/_version.py +6 -4
  2. cribl_control_plane/errors/healthstatus_error.py +2 -8
  3. cribl_control_plane/httpclient.py +0 -1
  4. cribl_control_plane/models/__init__.py +12 -12
  5. cribl_control_plane/models/appmode.py +13 -0
  6. cribl_control_plane/models/cacheconnection.py +2 -10
  7. cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
  8. cribl_control_plane/models/cloudprovider.py +1 -2
  9. cribl_control_plane/models/configgroup.py +2 -7
  10. cribl_control_plane/models/configgroupcloud.py +2 -6
  11. cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
  12. cribl_control_plane/models/cribllakedataset.py +2 -8
  13. cribl_control_plane/models/datasetmetadata.py +2 -8
  14. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
  15. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
  16. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
  17. cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
  18. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
  19. cribl_control_plane/models/getsummaryop.py +2 -7
  20. cribl_control_plane/models/hbcriblinfo.py +3 -19
  21. cribl_control_plane/models/healthstatus.py +4 -7
  22. cribl_control_plane/models/heartbeatmetadata.py +0 -3
  23. cribl_control_plane/models/inputappscope.py +14 -34
  24. cribl_control_plane/models/inputazureblob.py +6 -17
  25. cribl_control_plane/models/inputcollection.py +4 -11
  26. cribl_control_plane/models/inputconfluentcloud.py +20 -47
  27. cribl_control_plane/models/inputcribl.py +4 -11
  28. cribl_control_plane/models/inputcriblhttp.py +8 -23
  29. cribl_control_plane/models/inputcribllakehttp.py +10 -22
  30. cribl_control_plane/models/inputcriblmetrics.py +4 -12
  31. cribl_control_plane/models/inputcribltcp.py +8 -23
  32. cribl_control_plane/models/inputcrowdstrike.py +10 -26
  33. cribl_control_plane/models/inputdatadogagent.py +8 -24
  34. cribl_control_plane/models/inputdatagen.py +4 -11
  35. cribl_control_plane/models/inputedgeprometheus.py +24 -58
  36. cribl_control_plane/models/inputelastic.py +14 -40
  37. cribl_control_plane/models/inputeventhub.py +6 -15
  38. cribl_control_plane/models/inputexec.py +6 -14
  39. cribl_control_plane/models/inputfile.py +6 -15
  40. cribl_control_plane/models/inputfirehose.py +8 -23
  41. cribl_control_plane/models/inputgooglepubsub.py +6 -19
  42. cribl_control_plane/models/inputgrafana.py +24 -67
  43. cribl_control_plane/models/inputhttp.py +8 -23
  44. cribl_control_plane/models/inputhttpraw.py +8 -23
  45. cribl_control_plane/models/inputjournalfiles.py +4 -12
  46. cribl_control_plane/models/inputkafka.py +16 -46
  47. cribl_control_plane/models/inputkinesis.py +14 -38
  48. cribl_control_plane/models/inputkubeevents.py +4 -11
  49. cribl_control_plane/models/inputkubelogs.py +8 -16
  50. cribl_control_plane/models/inputkubemetrics.py +8 -16
  51. cribl_control_plane/models/inputloki.py +10 -29
  52. cribl_control_plane/models/inputmetrics.py +8 -23
  53. cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
  54. cribl_control_plane/models/inputmsk.py +18 -53
  55. cribl_control_plane/models/inputnetflow.py +4 -11
  56. cribl_control_plane/models/inputoffice365mgmt.py +14 -33
  57. cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
  58. cribl_control_plane/models/inputoffice365service.py +16 -35
  59. cribl_control_plane/models/inputopentelemetry.py +16 -38
  60. cribl_control_plane/models/inputprometheus.py +18 -50
  61. cribl_control_plane/models/inputprometheusrw.py +10 -30
  62. cribl_control_plane/models/inputrawudp.py +4 -11
  63. cribl_control_plane/models/inputs3.py +8 -21
  64. cribl_control_plane/models/inputs3inventory.py +10 -26
  65. cribl_control_plane/models/inputsecuritylake.py +10 -27
  66. cribl_control_plane/models/inputsnmp.py +6 -16
  67. cribl_control_plane/models/inputsplunk.py +12 -33
  68. cribl_control_plane/models/inputsplunkhec.py +10 -29
  69. cribl_control_plane/models/inputsplunksearch.py +14 -33
  70. cribl_control_plane/models/inputsqs.py +10 -27
  71. cribl_control_plane/models/inputsyslog.py +16 -43
  72. cribl_control_plane/models/inputsystemmetrics.py +24 -48
  73. cribl_control_plane/models/inputsystemstate.py +8 -16
  74. cribl_control_plane/models/inputtcp.py +10 -29
  75. cribl_control_plane/models/inputtcpjson.py +10 -29
  76. cribl_control_plane/models/inputwef.py +14 -37
  77. cribl_control_plane/models/inputwindowsmetrics.py +24 -44
  78. cribl_control_plane/models/inputwineventlogs.py +10 -20
  79. cribl_control_plane/models/inputwiz.py +8 -21
  80. cribl_control_plane/models/inputwizwebhook.py +8 -23
  81. cribl_control_plane/models/inputzscalerhec.py +10 -29
  82. cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
  83. cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
  84. cribl_control_plane/models/masterworkerentry.py +2 -7
  85. cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
  86. cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
  87. cribl_control_plane/models/nodeprovidedinfo.py +0 -3
  88. cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
  89. cribl_control_plane/models/nodeupgradestate.py +1 -2
  90. cribl_control_plane/models/nodeupgradestatus.py +5 -13
  91. cribl_control_plane/models/outputazureblob.py +18 -48
  92. cribl_control_plane/models/outputazuredataexplorer.py +28 -73
  93. cribl_control_plane/models/outputazureeventhub.py +18 -40
  94. cribl_control_plane/models/outputazurelogs.py +12 -35
  95. cribl_control_plane/models/outputclickhouse.py +20 -55
  96. cribl_control_plane/models/outputcloudwatch.py +10 -29
  97. cribl_control_plane/models/outputconfluentcloud.py +32 -77
  98. cribl_control_plane/models/outputcriblhttp.py +16 -44
  99. cribl_control_plane/models/outputcribllake.py +16 -46
  100. cribl_control_plane/models/outputcribltcp.py +18 -45
  101. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
  102. cribl_control_plane/models/outputdatadog.py +20 -48
  103. cribl_control_plane/models/outputdataset.py +18 -46
  104. cribl_control_plane/models/outputdiskspool.py +2 -7
  105. cribl_control_plane/models/outputdls3.py +24 -68
  106. cribl_control_plane/models/outputdynatracehttp.py +20 -53
  107. cribl_control_plane/models/outputdynatraceotlp.py +22 -55
  108. cribl_control_plane/models/outputelastic.py +18 -43
  109. cribl_control_plane/models/outputelasticcloud.py +12 -36
  110. cribl_control_plane/models/outputexabeam.py +10 -29
  111. cribl_control_plane/models/outputfilesystem.py +14 -39
  112. cribl_control_plane/models/outputgooglechronicle.py +16 -50
  113. cribl_control_plane/models/outputgooglecloudlogging.py +14 -41
  114. cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
  115. cribl_control_plane/models/outputgooglepubsub.py +10 -31
  116. cribl_control_plane/models/outputgrafanacloud.py +32 -97
  117. cribl_control_plane/models/outputgraphite.py +14 -31
  118. cribl_control_plane/models/outputhoneycomb.py +12 -35
  119. cribl_control_plane/models/outputhumiohec.py +16 -43
  120. cribl_control_plane/models/outputinfluxdb.py +16 -42
  121. cribl_control_plane/models/outputkafka.py +28 -74
  122. cribl_control_plane/models/outputkinesis.py +16 -40
  123. cribl_control_plane/models/outputloki.py +16 -41
  124. cribl_control_plane/models/outputminio.py +24 -65
  125. cribl_control_plane/models/outputmsk.py +30 -82
  126. cribl_control_plane/models/outputnewrelic.py +18 -43
  127. cribl_control_plane/models/outputnewrelicevents.py +14 -41
  128. cribl_control_plane/models/outputopentelemetry.py +26 -67
  129. cribl_control_plane/models/outputprometheus.py +12 -35
  130. cribl_control_plane/models/outputring.py +8 -19
  131. cribl_control_plane/models/outputs3.py +26 -68
  132. cribl_control_plane/models/outputsecuritylake.py +18 -52
  133. cribl_control_plane/models/outputsentinel.py +18 -45
  134. cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
  135. cribl_control_plane/models/outputservicenow.py +24 -60
  136. cribl_control_plane/models/outputsignalfx.py +14 -37
  137. cribl_control_plane/models/outputsns.py +14 -36
  138. cribl_control_plane/models/outputsplunk.py +24 -60
  139. cribl_control_plane/models/outputsplunkhec.py +12 -35
  140. cribl_control_plane/models/outputsplunklb.py +30 -77
  141. cribl_control_plane/models/outputsqs.py +16 -41
  142. cribl_control_plane/models/outputstatsd.py +14 -30
  143. cribl_control_plane/models/outputstatsdext.py +12 -29
  144. cribl_control_plane/models/outputsumologic.py +12 -35
  145. cribl_control_plane/models/outputsyslog.py +24 -58
  146. cribl_control_plane/models/outputtcpjson.py +20 -52
  147. cribl_control_plane/models/outputwavefront.py +12 -35
  148. cribl_control_plane/models/outputwebhook.py +22 -58
  149. cribl_control_plane/models/outputxsiam.py +14 -35
  150. cribl_control_plane/models/productscore.py +1 -2
  151. cribl_control_plane/models/rbacresource.py +1 -2
  152. cribl_control_plane/models/resourcepolicy.py +2 -4
  153. cribl_control_plane/models/routecloneconf.py +13 -0
  154. cribl_control_plane/models/routeconf.py +4 -3
  155. cribl_control_plane/models/runnablejobcollection.py +13 -30
  156. cribl_control_plane/models/runnablejobexecutor.py +4 -13
  157. cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
  158. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
  159. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
  160. cribl_control_plane/models/workertypes.py +1 -2
  161. cribl_control_plane/sdk.py +2 -2
  162. cribl_control_plane/utils/annotations.py +32 -8
  163. {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.50.dist-info}/METADATA +2 -1
  164. {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.50.dist-info}/RECORD +165 -163
  165. {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.50.dist-info}/WHEEL +0 -0
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -26,7 +23,7 @@ class OutputHumioHecExtraHTTPHeader(BaseModel):
26
23
  name: Optional[str] = None
27
24
 
28
25
 
29
- class OutputHumioHecFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
26
+ class OutputHumioHecFailedRequestLoggingMode(str, Enum):
30
27
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
31
28
 
32
29
  PAYLOAD = "payload"
@@ -34,14 +31,14 @@ class OutputHumioHecFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnum
34
31
  NONE = "none"
35
32
 
36
33
 
37
- class OutputHumioHecRequestFormat(str, Enum, metaclass=utils.OpenEnumMeta):
34
+ class OutputHumioHecRequestFormat(str, Enum):
38
35
  r"""When set to JSON, the event is automatically formatted with required fields before sending. When set to Raw, only the event's `_raw` value is sent."""
39
36
 
40
37
  JSON = "JSON"
41
38
  RAW = "raw"
42
39
 
43
40
 
44
- class OutputHumioHecAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
41
+ class OutputHumioHecAuthenticationMethod(str, Enum):
45
42
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
46
43
 
47
44
  MANUAL = "manual"
@@ -102,7 +99,7 @@ class OutputHumioHecTimeoutRetrySettings(BaseModel):
102
99
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
103
100
 
104
101
 
105
- class OutputHumioHecBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
102
+ class OutputHumioHecBackpressureBehavior(str, Enum):
106
103
  r"""How to handle events when all receivers are exerting backpressure"""
107
104
 
108
105
  BLOCK = "block"
@@ -110,21 +107,21 @@ class OutputHumioHecBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
110
107
  QUEUE = "queue"
111
108
 
112
109
 
113
- class OutputHumioHecCompression(str, Enum, metaclass=utils.OpenEnumMeta):
110
+ class OutputHumioHecCompression(str, Enum):
114
111
  r"""Codec to use to compress the persisted data"""
115
112
 
116
113
  NONE = "none"
117
114
  GZIP = "gzip"
118
115
 
119
116
 
120
- class OutputHumioHecQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
117
+ class OutputHumioHecQueueFullBehavior(str, Enum):
121
118
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
122
119
 
123
120
  BLOCK = "block"
124
121
  DROP = "drop"
125
122
 
126
123
 
127
- class OutputHumioHecMode(str, Enum, metaclass=utils.OpenEnumMeta):
124
+ class OutputHumioHecMode(str, Enum):
128
125
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
129
126
 
130
127
  ERROR = "error"
@@ -279,10 +276,7 @@ class OutputHumioHec(BaseModel):
279
276
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
280
277
 
281
278
  failed_request_logging_mode: Annotated[
282
- Annotated[
283
- Optional[OutputHumioHecFailedRequestLoggingMode],
284
- PlainValidator(validate_open_enum(False)),
285
- ],
279
+ Optional[OutputHumioHecFailedRequestLoggingMode],
286
280
  pydantic.Field(alias="failedRequestLoggingMode"),
287
281
  ] = OutputHumioHecFailedRequestLoggingMode.NONE
288
282
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -293,20 +287,12 @@ class OutputHumioHec(BaseModel):
293
287
  r"""List of headers that are safe to log in plain text"""
294
288
 
295
289
  format_: Annotated[
296
- Annotated[
297
- Optional[OutputHumioHecRequestFormat],
298
- PlainValidator(validate_open_enum(False)),
299
- ],
300
- pydantic.Field(alias="format"),
290
+ Optional[OutputHumioHecRequestFormat], pydantic.Field(alias="format")
301
291
  ] = OutputHumioHecRequestFormat.JSON
302
292
  r"""When set to JSON, the event is automatically formatted with required fields before sending. When set to Raw, only the event's `_raw` value is sent."""
303
293
 
304
294
  auth_type: Annotated[
305
- Annotated[
306
- Optional[OutputHumioHecAuthenticationMethod],
307
- PlainValidator(validate_open_enum(False)),
308
- ],
309
- pydantic.Field(alias="authType"),
295
+ Optional[OutputHumioHecAuthenticationMethod], pydantic.Field(alias="authType")
310
296
  ] = OutputHumioHecAuthenticationMethod.MANUAL
311
297
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
312
298
 
@@ -327,10 +313,7 @@ class OutputHumioHec(BaseModel):
327
313
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
328
314
 
329
315
  on_backpressure: Annotated[
330
- Annotated[
331
- Optional[OutputHumioHecBackpressureBehavior],
332
- PlainValidator(validate_open_enum(False)),
333
- ],
316
+ Optional[OutputHumioHecBackpressureBehavior],
334
317
  pydantic.Field(alias="onBackpressure"),
335
318
  ] = OutputHumioHecBackpressureBehavior.BLOCK
336
319
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -357,29 +340,19 @@ class OutputHumioHec(BaseModel):
357
340
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
358
341
 
359
342
  pq_compress: Annotated[
360
- Annotated[
361
- Optional[OutputHumioHecCompression],
362
- PlainValidator(validate_open_enum(False)),
363
- ],
364
- pydantic.Field(alias="pqCompress"),
343
+ Optional[OutputHumioHecCompression], pydantic.Field(alias="pqCompress")
365
344
  ] = OutputHumioHecCompression.NONE
366
345
  r"""Codec to use to compress the persisted data"""
367
346
 
368
347
  pq_on_backpressure: Annotated[
369
- Annotated[
370
- Optional[OutputHumioHecQueueFullBehavior],
371
- PlainValidator(validate_open_enum(False)),
372
- ],
348
+ Optional[OutputHumioHecQueueFullBehavior],
373
349
  pydantic.Field(alias="pqOnBackpressure"),
374
350
  ] = OutputHumioHecQueueFullBehavior.BLOCK
375
351
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
376
352
 
377
- pq_mode: Annotated[
378
- Annotated[
379
- Optional[OutputHumioHecMode], PlainValidator(validate_open_enum(False))
380
- ],
381
- pydantic.Field(alias="pqMode"),
382
- ] = OutputHumioHecMode.ERROR
353
+ pq_mode: Annotated[Optional[OutputHumioHecMode], pydantic.Field(alias="pqMode")] = (
354
+ OutputHumioHecMode.ERROR
355
+ )
383
356
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
384
357
 
385
358
  pq_controls: Annotated[
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputInfluxdbType(str, Enum):
15
12
  INFLUXDB = "influxdb"
16
13
 
17
14
 
18
- class TimestampPrecision(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class TimestampPrecision(str, Enum):
19
16
  r"""Sets the precision for the supplied Unix time values. Defaults to milliseconds."""
20
17
 
21
18
  NS = "ns"
@@ -37,7 +34,7 @@ class OutputInfluxdbExtraHTTPHeader(BaseModel):
37
34
  name: Optional[str] = None
38
35
 
39
36
 
40
- class OutputInfluxdbFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
37
+ class OutputInfluxdbFailedRequestLoggingMode(str, Enum):
41
38
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
42
39
 
43
40
  PAYLOAD = "payload"
@@ -99,7 +96,7 @@ class OutputInfluxdbTimeoutRetrySettings(BaseModel):
99
96
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
100
97
 
101
98
 
102
- class OutputInfluxdbBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
99
+ class OutputInfluxdbBackpressureBehavior(str, Enum):
103
100
  r"""How to handle events when all receivers are exerting backpressure"""
104
101
 
105
102
  BLOCK = "block"
@@ -107,7 +104,7 @@ class OutputInfluxdbBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
107
104
  QUEUE = "queue"
108
105
 
109
106
 
110
- class OutputInfluxdbAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
107
+ class OutputInfluxdbAuthenticationType(str, Enum):
111
108
  r"""InfluxDB authentication type"""
112
109
 
113
110
  NONE = "none"
@@ -118,21 +115,21 @@ class OutputInfluxdbAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
118
115
  OAUTH = "oauth"
119
116
 
120
117
 
121
- class OutputInfluxdbCompression(str, Enum, metaclass=utils.OpenEnumMeta):
118
+ class OutputInfluxdbCompression(str, Enum):
122
119
  r"""Codec to use to compress the persisted data"""
123
120
 
124
121
  NONE = "none"
125
122
  GZIP = "gzip"
126
123
 
127
124
 
128
- class OutputInfluxdbQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
125
+ class OutputInfluxdbQueueFullBehavior(str, Enum):
129
126
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
130
127
 
131
128
  BLOCK = "block"
132
129
  DROP = "drop"
133
130
 
134
131
 
135
- class OutputInfluxdbMode(str, Enum, metaclass=utils.OpenEnumMeta):
132
+ class OutputInfluxdbMode(str, Enum):
136
133
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
137
134
 
138
135
  ERROR = "error"
@@ -309,10 +306,7 @@ class OutputInfluxdb(BaseModel):
309
306
  r"""The v2 API can be enabled with InfluxDB versions 1.8 and later."""
310
307
 
311
308
  timestamp_precision: Annotated[
312
- Annotated[
313
- Optional[TimestampPrecision], PlainValidator(validate_open_enum(False))
314
- ],
315
- pydantic.Field(alias="timestampPrecision"),
309
+ Optional[TimestampPrecision], pydantic.Field(alias="timestampPrecision")
316
310
  ] = TimestampPrecision.MS
317
311
  r"""Sets the precision for the supplied Unix time values. Defaults to milliseconds."""
318
312
 
@@ -370,10 +364,7 @@ class OutputInfluxdb(BaseModel):
370
364
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
371
365
 
372
366
  failed_request_logging_mode: Annotated[
373
- Annotated[
374
- Optional[OutputInfluxdbFailedRequestLoggingMode],
375
- PlainValidator(validate_open_enum(False)),
376
- ],
367
+ Optional[OutputInfluxdbFailedRequestLoggingMode],
377
368
  pydantic.Field(alias="failedRequestLoggingMode"),
378
369
  ] = OutputInfluxdbFailedRequestLoggingMode.NONE
379
370
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -400,20 +391,13 @@ class OutputInfluxdb(BaseModel):
400
391
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
401
392
 
402
393
  on_backpressure: Annotated[
403
- Annotated[
404
- Optional[OutputInfluxdbBackpressureBehavior],
405
- PlainValidator(validate_open_enum(False)),
406
- ],
394
+ Optional[OutputInfluxdbBackpressureBehavior],
407
395
  pydantic.Field(alias="onBackpressure"),
408
396
  ] = OutputInfluxdbBackpressureBehavior.BLOCK
409
397
  r"""How to handle events when all receivers are exerting backpressure"""
410
398
 
411
399
  auth_type: Annotated[
412
- Annotated[
413
- Optional[OutputInfluxdbAuthenticationType],
414
- PlainValidator(validate_open_enum(False)),
415
- ],
416
- pydantic.Field(alias="authType"),
400
+ Optional[OutputInfluxdbAuthenticationType], pydantic.Field(alias="authType")
417
401
  ] = OutputInfluxdbAuthenticationType.NONE
418
402
  r"""InfluxDB authentication type"""
419
403
 
@@ -442,29 +426,19 @@ class OutputInfluxdb(BaseModel):
442
426
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
443
427
 
444
428
  pq_compress: Annotated[
445
- Annotated[
446
- Optional[OutputInfluxdbCompression],
447
- PlainValidator(validate_open_enum(False)),
448
- ],
449
- pydantic.Field(alias="pqCompress"),
429
+ Optional[OutputInfluxdbCompression], pydantic.Field(alias="pqCompress")
450
430
  ] = OutputInfluxdbCompression.NONE
451
431
  r"""Codec to use to compress the persisted data"""
452
432
 
453
433
  pq_on_backpressure: Annotated[
454
- Annotated[
455
- Optional[OutputInfluxdbQueueFullBehavior],
456
- PlainValidator(validate_open_enum(False)),
457
- ],
434
+ Optional[OutputInfluxdbQueueFullBehavior],
458
435
  pydantic.Field(alias="pqOnBackpressure"),
459
436
  ] = OutputInfluxdbQueueFullBehavior.BLOCK
460
437
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
461
438
 
462
- pq_mode: Annotated[
463
- Annotated[
464
- Optional[OutputInfluxdbMode], PlainValidator(validate_open_enum(False))
465
- ],
466
- pydantic.Field(alias="pqMode"),
467
- ] = OutputInfluxdbMode.ERROR
439
+ pq_mode: Annotated[Optional[OutputInfluxdbMode], pydantic.Field(alias="pqMode")] = (
440
+ OutputInfluxdbMode.ERROR
441
+ )
468
442
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
469
443
 
470
444
  pq_controls: Annotated[
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputKafkaType(str, Enum):
15
12
  KAFKA = "kafka"
16
13
 
17
14
 
18
- class OutputKafkaAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputKafkaAcknowledgments(int, Enum):
19
16
  r"""Control the number of required acknowledgments."""
20
17
 
21
18
  ONE = 1
@@ -23,7 +20,7 @@ class OutputKafkaAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
23
20
  MINUS_1 = -1
24
21
 
25
22
 
26
- class OutputKafkaRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
23
+ class OutputKafkaRecordDataFormat(str, Enum):
27
24
  r"""Format to use to serialize events before writing to Kafka."""
28
25
 
29
26
  JSON = "json"
@@ -31,7 +28,7 @@ class OutputKafkaRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
31
28
  PROTOBUF = "protobuf"
32
29
 
33
30
 
34
- class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
31
+ class OutputKafkaCompression(str, Enum):
35
32
  r"""Codec to use to compress the data before sending to Kafka"""
36
33
 
37
34
  NONE = "none"
@@ -40,7 +37,7 @@ class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
40
37
  LZ4 = "lz4"
41
38
 
42
39
 
43
- class OutputKafkaSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
40
+ class OutputKafkaSchemaType(str, Enum):
44
41
  r"""The schema format used to encode and decode event data"""
45
42
 
46
43
  AVRO = "avro"
@@ -66,18 +63,14 @@ class OutputKafkaAuth(BaseModel):
66
63
  r"""Select or create a secret that references your credentials"""
67
64
 
68
65
 
69
- class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(
70
- str, Enum, metaclass=utils.OpenEnumMeta
71
- ):
66
+ class OutputKafkaKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
72
67
  TL_SV1 = "TLSv1"
73
68
  TL_SV1_1 = "TLSv1.1"
74
69
  TL_SV1_2 = "TLSv1.2"
75
70
  TL_SV1_3 = "TLSv1.3"
76
71
 
77
72
 
78
- class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(
79
- str, Enum, metaclass=utils.OpenEnumMeta
80
- ):
73
+ class OutputKafkaKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
81
74
  TL_SV1 = "TLSv1"
82
75
  TL_SV1_1 = "TLSv1.1"
83
76
  TL_SV1_2 = "TLSv1.2"
@@ -137,18 +130,12 @@ class OutputKafkaKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
137
130
  r"""Passphrase to use to decrypt private key"""
138
131
 
139
132
  min_version: Annotated[
140
- Annotated[
141
- Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
142
- PlainValidator(validate_open_enum(False)),
143
- ],
133
+ Optional[OutputKafkaKafkaSchemaRegistryMinimumTLSVersion],
144
134
  pydantic.Field(alias="minVersion"),
145
135
  ] = None
146
136
 
147
137
  max_version: Annotated[
148
- Annotated[
149
- Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
150
- PlainValidator(validate_open_enum(False)),
151
- ],
138
+ Optional[OutputKafkaKafkaSchemaRegistryMaximumTLSVersion],
152
139
  pydantic.Field(alias="maxVersion"),
153
140
  ] = None
154
141
 
@@ -183,10 +170,7 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
183
170
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
184
171
 
185
172
  schema_type: Annotated[
186
- Annotated[
187
- Optional[OutputKafkaSchemaType], PlainValidator(validate_open_enum(False))
188
- ],
189
- pydantic.Field(alias="schemaType"),
173
+ Optional[OutputKafkaSchemaType], pydantic.Field(alias="schemaType")
190
174
  ] = OutputKafkaSchemaType.AVRO
191
175
  r"""The schema format used to encode and decode event data"""
192
176
 
@@ -219,7 +203,7 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
219
203
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
220
204
 
221
205
 
222
- class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
206
+ class OutputKafkaSASLMechanism(str, Enum):
223
207
  PLAIN = "plain"
224
208
  SCRAM_SHA_256 = "scram-sha-256"
225
209
  SCRAM_SHA_512 = "scram-sha-512"
@@ -240,9 +224,7 @@ class OutputKafkaAuthentication(BaseModel):
240
224
 
241
225
  disabled: Optional[bool] = True
242
226
 
243
- mechanism: Annotated[
244
- Optional[OutputKafkaSASLMechanism], PlainValidator(validate_open_enum(False))
245
- ] = OutputKafkaSASLMechanism.PLAIN
227
+ mechanism: Optional[OutputKafkaSASLMechanism] = OutputKafkaSASLMechanism.PLAIN
246
228
 
247
229
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
248
230
  False
@@ -250,14 +232,14 @@ class OutputKafkaAuthentication(BaseModel):
250
232
  r"""Enable OAuth authentication"""
251
233
 
252
234
 
253
- class OutputKafkaMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
235
+ class OutputKafkaMinimumTLSVersion(str, Enum):
254
236
  TL_SV1 = "TLSv1"
255
237
  TL_SV1_1 = "TLSv1.1"
256
238
  TL_SV1_2 = "TLSv1.2"
257
239
  TL_SV1_3 = "TLSv1.3"
258
240
 
259
241
 
260
- class OutputKafkaMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
242
+ class OutputKafkaMaximumTLSVersion(str, Enum):
261
243
  TL_SV1 = "TLSv1"
262
244
  TL_SV1_1 = "TLSv1.1"
263
245
  TL_SV1_2 = "TLSv1.2"
@@ -317,23 +299,15 @@ class OutputKafkaTLSSettingsClientSide(BaseModel):
317
299
  r"""Passphrase to use to decrypt private key"""
318
300
 
319
301
  min_version: Annotated[
320
- Annotated[
321
- Optional[OutputKafkaMinimumTLSVersion],
322
- PlainValidator(validate_open_enum(False)),
323
- ],
324
- pydantic.Field(alias="minVersion"),
302
+ Optional[OutputKafkaMinimumTLSVersion], pydantic.Field(alias="minVersion")
325
303
  ] = None
326
304
 
327
305
  max_version: Annotated[
328
- Annotated[
329
- Optional[OutputKafkaMaximumTLSVersion],
330
- PlainValidator(validate_open_enum(False)),
331
- ],
332
- pydantic.Field(alias="maxVersion"),
306
+ Optional[OutputKafkaMaximumTLSVersion], pydantic.Field(alias="maxVersion")
333
307
  ] = None
334
308
 
335
309
 
336
- class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
310
+ class OutputKafkaBackpressureBehavior(str, Enum):
337
311
  r"""How to handle events when all receivers are exerting backpressure"""
338
312
 
339
313
  BLOCK = "block"
@@ -341,21 +315,21 @@ class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
341
315
  QUEUE = "queue"
342
316
 
343
317
 
344
- class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
318
+ class OutputKafkaPqCompressCompression(str, Enum):
345
319
  r"""Codec to use to compress the persisted data"""
346
320
 
347
321
  NONE = "none"
348
322
  GZIP = "gzip"
349
323
 
350
324
 
351
- class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
325
+ class OutputKafkaQueueFullBehavior(str, Enum):
352
326
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
353
327
 
354
328
  BLOCK = "block"
355
329
  DROP = "drop"
356
330
 
357
331
 
358
- class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
332
+ class OutputKafkaMode(str, Enum):
359
333
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
360
334
 
361
335
  ERROR = "error"
@@ -467,23 +441,15 @@ class OutputKafka(BaseModel):
467
441
  streamtags: Optional[List[str]] = None
468
442
  r"""Tags for filtering and grouping in @{product}"""
469
443
 
470
- ack: Annotated[
471
- Optional[OutputKafkaAcknowledgments], PlainValidator(validate_open_enum(True))
472
- ] = OutputKafkaAcknowledgments.ONE
444
+ ack: Optional[OutputKafkaAcknowledgments] = OutputKafkaAcknowledgments.ONE
473
445
  r"""Control the number of required acknowledgments."""
474
446
 
475
447
  format_: Annotated[
476
- Annotated[
477
- Optional[OutputKafkaRecordDataFormat],
478
- PlainValidator(validate_open_enum(False)),
479
- ],
480
- pydantic.Field(alias="format"),
448
+ Optional[OutputKafkaRecordDataFormat], pydantic.Field(alias="format")
481
449
  ] = OutputKafkaRecordDataFormat.JSON
482
450
  r"""Format to use to serialize events before writing to Kafka."""
483
451
 
484
- compression: Annotated[
485
- Optional[OutputKafkaCompression], PlainValidator(validate_open_enum(False))
486
- ] = OutputKafkaCompression.GZIP
452
+ compression: Optional[OutputKafkaCompression] = OutputKafkaCompression.GZIP
487
453
  r"""Codec to use to compress the data before sending to Kafka"""
488
454
 
489
455
  max_record_size_kb: Annotated[
@@ -546,10 +512,7 @@ class OutputKafka(BaseModel):
546
512
  tls: Optional[OutputKafkaTLSSettingsClientSide] = None
547
513
 
548
514
  on_backpressure: Annotated[
549
- Annotated[
550
- Optional[OutputKafkaBackpressureBehavior],
551
- PlainValidator(validate_open_enum(False)),
552
- ],
515
+ Optional[OutputKafkaBackpressureBehavior],
553
516
  pydantic.Field(alias="onBackpressure"),
554
517
  ] = OutputKafkaBackpressureBehavior.BLOCK
555
518
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -575,27 +538,18 @@ class OutputKafka(BaseModel):
575
538
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
576
539
 
577
540
  pq_compress: Annotated[
578
- Annotated[
579
- Optional[OutputKafkaPqCompressCompression],
580
- PlainValidator(validate_open_enum(False)),
581
- ],
582
- pydantic.Field(alias="pqCompress"),
541
+ Optional[OutputKafkaPqCompressCompression], pydantic.Field(alias="pqCompress")
583
542
  ] = OutputKafkaPqCompressCompression.NONE
584
543
  r"""Codec to use to compress the persisted data"""
585
544
 
586
545
  pq_on_backpressure: Annotated[
587
- Annotated[
588
- Optional[OutputKafkaQueueFullBehavior],
589
- PlainValidator(validate_open_enum(False)),
590
- ],
591
- pydantic.Field(alias="pqOnBackpressure"),
546
+ Optional[OutputKafkaQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
592
547
  ] = OutputKafkaQueueFullBehavior.BLOCK
593
548
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
594
549
 
595
- pq_mode: Annotated[
596
- Annotated[Optional[OutputKafkaMode], PlainValidator(validate_open_enum(False))],
597
- pydantic.Field(alias="pqMode"),
598
- ] = OutputKafkaMode.ERROR
550
+ pq_mode: Annotated[Optional[OutputKafkaMode], pydantic.Field(alias="pqMode")] = (
551
+ OutputKafkaMode.ERROR
552
+ )
599
553
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
600
554
 
601
555
  pq_controls: Annotated[