cribl-control-plane 0.0.50__py3-none-any.whl → 0.0.50rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (176) hide show
  1. cribl_control_plane/_version.py +3 -5
  2. cribl_control_plane/errors/healthstatus_error.py +8 -2
  3. cribl_control_plane/groups_sdk.py +4 -4
  4. cribl_control_plane/health.py +6 -2
  5. cribl_control_plane/models/__init__.py +56 -31
  6. cribl_control_plane/models/cacheconnection.py +10 -2
  7. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  8. cribl_control_plane/models/cloudprovider.py +2 -1
  9. cribl_control_plane/models/configgroup.py +24 -4
  10. cribl_control_plane/models/configgroupcloud.py +6 -2
  11. cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
  12. cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
  13. cribl_control_plane/models/createversionpushop.py +5 -5
  14. cribl_control_plane/models/cribllakedataset.py +8 -2
  15. cribl_control_plane/models/datasetmetadata.py +8 -2
  16. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
  17. cribl_control_plane/models/error.py +16 -0
  18. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
  19. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
  20. cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
  21. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
  22. cribl_control_plane/models/gethealthinfoop.py +17 -0
  23. cribl_control_plane/models/getsummaryop.py +7 -2
  24. cribl_control_plane/models/getversionshowop.py +6 -5
  25. cribl_control_plane/models/gitinfo.py +14 -3
  26. cribl_control_plane/models/gitshowresult.py +19 -0
  27. cribl_control_plane/models/hbcriblinfo.py +24 -3
  28. cribl_control_plane/models/healthstatus.py +7 -4
  29. cribl_control_plane/models/heartbeatmetadata.py +3 -0
  30. cribl_control_plane/models/input.py +65 -63
  31. cribl_control_plane/models/inputappscope.py +34 -14
  32. cribl_control_plane/models/inputazureblob.py +17 -6
  33. cribl_control_plane/models/inputcollection.py +11 -4
  34. cribl_control_plane/models/inputconfluentcloud.py +41 -32
  35. cribl_control_plane/models/inputcribl.py +11 -4
  36. cribl_control_plane/models/inputcriblhttp.py +23 -8
  37. cribl_control_plane/models/inputcribllakehttp.py +22 -10
  38. cribl_control_plane/models/inputcriblmetrics.py +12 -4
  39. cribl_control_plane/models/inputcribltcp.py +23 -8
  40. cribl_control_plane/models/inputcrowdstrike.py +26 -10
  41. cribl_control_plane/models/inputdatadogagent.py +24 -8
  42. cribl_control_plane/models/inputdatagen.py +11 -4
  43. cribl_control_plane/models/inputedgeprometheus.py +58 -24
  44. cribl_control_plane/models/inputelastic.py +40 -14
  45. cribl_control_plane/models/inputeventhub.py +15 -6
  46. cribl_control_plane/models/inputexec.py +14 -6
  47. cribl_control_plane/models/inputfile.py +15 -6
  48. cribl_control_plane/models/inputfirehose.py +23 -8
  49. cribl_control_plane/models/inputgooglepubsub.py +19 -6
  50. cribl_control_plane/models/inputgrafana.py +67 -24
  51. cribl_control_plane/models/inputhttp.py +23 -8
  52. cribl_control_plane/models/inputhttpraw.py +23 -8
  53. cribl_control_plane/models/inputjournalfiles.py +12 -4
  54. cribl_control_plane/models/inputkafka.py +41 -28
  55. cribl_control_plane/models/inputkinesis.py +38 -14
  56. cribl_control_plane/models/inputkubeevents.py +11 -4
  57. cribl_control_plane/models/inputkubelogs.py +16 -8
  58. cribl_control_plane/models/inputkubemetrics.py +16 -8
  59. cribl_control_plane/models/inputloki.py +29 -10
  60. cribl_control_plane/models/inputmetrics.py +23 -8
  61. cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
  62. cribl_control_plane/models/inputmsk.py +48 -30
  63. cribl_control_plane/models/inputnetflow.py +11 -4
  64. cribl_control_plane/models/inputoffice365mgmt.py +33 -14
  65. cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
  66. cribl_control_plane/models/inputoffice365service.py +35 -16
  67. cribl_control_plane/models/inputopentelemetry.py +38 -16
  68. cribl_control_plane/models/inputprometheus.py +50 -18
  69. cribl_control_plane/models/inputprometheusrw.py +30 -10
  70. cribl_control_plane/models/inputrawudp.py +11 -4
  71. cribl_control_plane/models/inputs3.py +21 -8
  72. cribl_control_plane/models/inputs3inventory.py +26 -10
  73. cribl_control_plane/models/inputsecuritylake.py +27 -10
  74. cribl_control_plane/models/inputsnmp.py +16 -6
  75. cribl_control_plane/models/inputsplunk.py +33 -12
  76. cribl_control_plane/models/inputsplunkhec.py +29 -10
  77. cribl_control_plane/models/inputsplunksearch.py +33 -14
  78. cribl_control_plane/models/inputsqs.py +27 -10
  79. cribl_control_plane/models/inputsyslog.py +43 -16
  80. cribl_control_plane/models/inputsystemmetrics.py +48 -24
  81. cribl_control_plane/models/inputsystemstate.py +16 -8
  82. cribl_control_plane/models/inputtcp.py +29 -10
  83. cribl_control_plane/models/inputtcpjson.py +29 -10
  84. cribl_control_plane/models/inputwef.py +37 -14
  85. cribl_control_plane/models/inputwindowsmetrics.py +44 -24
  86. cribl_control_plane/models/inputwineventlogs.py +20 -10
  87. cribl_control_plane/models/inputwiz.py +21 -8
  88. cribl_control_plane/models/inputwizwebhook.py +23 -8
  89. cribl_control_plane/models/inputzscalerhec.py +29 -10
  90. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  91. cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
  92. cribl_control_plane/models/masterworkerentry.py +7 -2
  93. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  94. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  95. cribl_control_plane/models/nodeprovidedinfo.py +3 -0
  96. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  97. cribl_control_plane/models/nodeupgradestate.py +2 -1
  98. cribl_control_plane/models/nodeupgradestatus.py +13 -5
  99. cribl_control_plane/models/output.py +84 -79
  100. cribl_control_plane/models/outputazureblob.py +48 -18
  101. cribl_control_plane/models/outputazuredataexplorer.py +73 -28
  102. cribl_control_plane/models/outputazureeventhub.py +40 -18
  103. cribl_control_plane/models/outputazurelogs.py +35 -12
  104. cribl_control_plane/models/outputclickhouse.py +55 -20
  105. cribl_control_plane/models/outputcloudwatch.py +29 -10
  106. cribl_control_plane/models/outputconfluentcloud.py +71 -44
  107. cribl_control_plane/models/outputcriblhttp.py +44 -16
  108. cribl_control_plane/models/outputcribllake.py +46 -16
  109. cribl_control_plane/models/outputcribltcp.py +45 -18
  110. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
  111. cribl_control_plane/models/outputdatabricks.py +282 -0
  112. cribl_control_plane/models/outputdatadog.py +48 -20
  113. cribl_control_plane/models/outputdataset.py +46 -18
  114. cribl_control_plane/models/outputdiskspool.py +7 -2
  115. cribl_control_plane/models/outputdls3.py +68 -24
  116. cribl_control_plane/models/outputdynatracehttp.py +53 -20
  117. cribl_control_plane/models/outputdynatraceotlp.py +55 -22
  118. cribl_control_plane/models/outputelastic.py +43 -18
  119. cribl_control_plane/models/outputelasticcloud.py +36 -12
  120. cribl_control_plane/models/outputexabeam.py +29 -10
  121. cribl_control_plane/models/outputfilesystem.py +39 -14
  122. cribl_control_plane/models/outputgooglechronicle.py +50 -16
  123. cribl_control_plane/models/outputgooglecloudlogging.py +50 -18
  124. cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
  125. cribl_control_plane/models/outputgooglepubsub.py +31 -10
  126. cribl_control_plane/models/outputgrafanacloud.py +97 -32
  127. cribl_control_plane/models/outputgraphite.py +31 -14
  128. cribl_control_plane/models/outputhoneycomb.py +35 -12
  129. cribl_control_plane/models/outputhumiohec.py +43 -16
  130. cribl_control_plane/models/outputinfluxdb.py +42 -16
  131. cribl_control_plane/models/outputkafka.py +69 -40
  132. cribl_control_plane/models/outputkinesis.py +40 -16
  133. cribl_control_plane/models/outputloki.py +41 -16
  134. cribl_control_plane/models/outputminio.py +65 -24
  135. cribl_control_plane/models/outputmsk.py +77 -42
  136. cribl_control_plane/models/outputnewrelic.py +43 -18
  137. cribl_control_plane/models/outputnewrelicevents.py +41 -14
  138. cribl_control_plane/models/outputopentelemetry.py +67 -26
  139. cribl_control_plane/models/outputprometheus.py +35 -12
  140. cribl_control_plane/models/outputring.py +19 -8
  141. cribl_control_plane/models/outputs3.py +68 -26
  142. cribl_control_plane/models/outputsecuritylake.py +52 -18
  143. cribl_control_plane/models/outputsentinel.py +45 -18
  144. cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
  145. cribl_control_plane/models/outputservicenow.py +60 -24
  146. cribl_control_plane/models/outputsignalfx.py +37 -14
  147. cribl_control_plane/models/outputsns.py +36 -14
  148. cribl_control_plane/models/outputsplunk.py +60 -24
  149. cribl_control_plane/models/outputsplunkhec.py +35 -12
  150. cribl_control_plane/models/outputsplunklb.py +77 -30
  151. cribl_control_plane/models/outputsqs.py +41 -16
  152. cribl_control_plane/models/outputstatsd.py +30 -14
  153. cribl_control_plane/models/outputstatsdext.py +29 -12
  154. cribl_control_plane/models/outputsumologic.py +35 -12
  155. cribl_control_plane/models/outputsyslog.py +58 -24
  156. cribl_control_plane/models/outputtcpjson.py +52 -20
  157. cribl_control_plane/models/outputwavefront.py +35 -12
  158. cribl_control_plane/models/outputwebhook.py +58 -22
  159. cribl_control_plane/models/outputxsiam.py +35 -14
  160. cribl_control_plane/models/productscore.py +2 -1
  161. cribl_control_plane/models/rbacresource.py +2 -1
  162. cribl_control_plane/models/resourcepolicy.py +4 -2
  163. cribl_control_plane/models/routeconf.py +3 -4
  164. cribl_control_plane/models/runnablejobcollection.py +30 -13
  165. cribl_control_plane/models/runnablejobexecutor.py +13 -4
  166. cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
  167. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
  168. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
  169. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
  170. cribl_control_plane/models/workertypes.py +2 -1
  171. {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/METADATA +1 -1
  172. cribl_control_plane-0.0.50rc2.dist-info/RECORD +327 -0
  173. cribl_control_plane/models/appmode.py +0 -13
  174. cribl_control_plane/models/routecloneconf.py +0 -13
  175. cribl_control_plane-0.0.50.dist-info/RECORD +0 -325
  176. {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/WHEEL +0 -0
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -23,7 +26,7 @@ class OutputPrometheusExtraHTTPHeader(BaseModel):
23
26
  name: Optional[str] = None
24
27
 
25
28
 
26
- class OutputPrometheusFailedRequestLoggingMode(str, Enum):
29
+ class OutputPrometheusFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
27
30
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
28
31
 
29
32
  PAYLOAD = "payload"
@@ -85,7 +88,7 @@ class OutputPrometheusTimeoutRetrySettings(BaseModel):
85
88
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
86
89
 
87
90
 
88
- class OutputPrometheusBackpressureBehavior(str, Enum):
91
+ class OutputPrometheusBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
89
92
  r"""How to handle events when all receivers are exerting backpressure"""
90
93
 
91
94
  BLOCK = "block"
@@ -93,7 +96,7 @@ class OutputPrometheusBackpressureBehavior(str, Enum):
93
96
  QUEUE = "queue"
94
97
 
95
98
 
96
- class OutputPrometheusAuthenticationType(str, Enum):
99
+ class OutputPrometheusAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
97
100
  r"""Remote Write authentication type"""
98
101
 
99
102
  NONE = "none"
@@ -104,21 +107,21 @@ class OutputPrometheusAuthenticationType(str, Enum):
104
107
  OAUTH = "oauth"
105
108
 
106
109
 
107
- class OutputPrometheusCompression(str, Enum):
110
+ class OutputPrometheusCompression(str, Enum, metaclass=utils.OpenEnumMeta):
108
111
  r"""Codec to use to compress the persisted data"""
109
112
 
110
113
  NONE = "none"
111
114
  GZIP = "gzip"
112
115
 
113
116
 
114
- class OutputPrometheusQueueFullBehavior(str, Enum):
117
+ class OutputPrometheusQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
115
118
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
116
119
 
117
120
  BLOCK = "block"
118
121
  DROP = "drop"
119
122
 
120
123
 
121
- class OutputPrometheusMode(str, Enum):
124
+ class OutputPrometheusMode(str, Enum, metaclass=utils.OpenEnumMeta):
122
125
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
123
126
 
124
127
  ERROR = "error"
@@ -332,7 +335,10 @@ class OutputPrometheus(BaseModel):
332
335
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
333
336
 
334
337
  failed_request_logging_mode: Annotated[
335
- Optional[OutputPrometheusFailedRequestLoggingMode],
338
+ Annotated[
339
+ Optional[OutputPrometheusFailedRequestLoggingMode],
340
+ PlainValidator(validate_open_enum(False)),
341
+ ],
336
342
  pydantic.Field(alias="failedRequestLoggingMode"),
337
343
  ] = OutputPrometheusFailedRequestLoggingMode.NONE
338
344
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -359,13 +365,20 @@ class OutputPrometheus(BaseModel):
359
365
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
360
366
 
361
367
  on_backpressure: Annotated[
362
- Optional[OutputPrometheusBackpressureBehavior],
368
+ Annotated[
369
+ Optional[OutputPrometheusBackpressureBehavior],
370
+ PlainValidator(validate_open_enum(False)),
371
+ ],
363
372
  pydantic.Field(alias="onBackpressure"),
364
373
  ] = OutputPrometheusBackpressureBehavior.BLOCK
365
374
  r"""How to handle events when all receivers are exerting backpressure"""
366
375
 
367
376
  auth_type: Annotated[
368
- Optional[OutputPrometheusAuthenticationType], pydantic.Field(alias="authType")
377
+ Annotated[
378
+ Optional[OutputPrometheusAuthenticationType],
379
+ PlainValidator(validate_open_enum(False)),
380
+ ],
381
+ pydantic.Field(alias="authType"),
369
382
  ] = OutputPrometheusAuthenticationType.NONE
370
383
  r"""Remote Write authentication type"""
371
384
 
@@ -390,18 +403,28 @@ class OutputPrometheus(BaseModel):
390
403
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
391
404
 
392
405
  pq_compress: Annotated[
393
- Optional[OutputPrometheusCompression], pydantic.Field(alias="pqCompress")
406
+ Annotated[
407
+ Optional[OutputPrometheusCompression],
408
+ PlainValidator(validate_open_enum(False)),
409
+ ],
410
+ pydantic.Field(alias="pqCompress"),
394
411
  ] = OutputPrometheusCompression.NONE
395
412
  r"""Codec to use to compress the persisted data"""
396
413
 
397
414
  pq_on_backpressure: Annotated[
398
- Optional[OutputPrometheusQueueFullBehavior],
415
+ Annotated[
416
+ Optional[OutputPrometheusQueueFullBehavior],
417
+ PlainValidator(validate_open_enum(False)),
418
+ ],
399
419
  pydantic.Field(alias="pqOnBackpressure"),
400
420
  ] = OutputPrometheusQueueFullBehavior.BLOCK
401
421
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
402
422
 
403
423
  pq_mode: Annotated[
404
- Optional[OutputPrometheusMode], pydantic.Field(alias="pqMode")
424
+ Annotated[
425
+ Optional[OutputPrometheusMode], PlainValidator(validate_open_enum(False))
426
+ ],
427
+ pydantic.Field(alias="pqMode"),
405
428
  ] = OutputPrometheusMode.ERROR
406
429
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
407
430
 
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,19 +15,19 @@ class OutputRingType(str, Enum):
12
15
  RING = "ring"
13
16
 
14
17
 
15
- class OutputRingDataFormat(str, Enum):
18
+ class OutputRingDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Format of the output data."""
17
20
 
18
21
  JSON = "json"
19
22
  RAW = "raw"
20
23
 
21
24
 
22
- class OutputRingDataCompressionFormat(str, Enum):
25
+ class OutputRingDataCompressionFormat(str, Enum, metaclass=utils.OpenEnumMeta):
23
26
  NONE = "none"
24
27
  GZIP = "gzip"
25
28
 
26
29
 
27
- class OutputRingBackpressureBehavior(str, Enum):
30
+ class OutputRingBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
28
31
  r"""How to handle events when all receivers are exerting backpressure"""
29
32
 
30
33
  BLOCK = "block"
@@ -80,7 +83,10 @@ class OutputRing(BaseModel):
80
83
  r"""Tags for filtering and grouping in @{product}"""
81
84
 
82
85
  format_: Annotated[
83
- Optional[OutputRingDataFormat], pydantic.Field(alias="format")
86
+ Annotated[
87
+ Optional[OutputRingDataFormat], PlainValidator(validate_open_enum(False))
88
+ ],
89
+ pydantic.Field(alias="format"),
84
90
  ] = OutputRingDataFormat.JSON
85
91
  r"""Format of the output data."""
86
92
 
@@ -95,15 +101,20 @@ class OutputRing(BaseModel):
95
101
  max_data_time: Annotated[Optional[str], pydantic.Field(alias="maxDataTime")] = "24h"
96
102
  r"""Maximum amount of time to retain data (examples: 2h, 4d). When limit is reached, older data will be deleted."""
97
103
 
98
- compress: Optional[OutputRingDataCompressionFormat] = (
99
- OutputRingDataCompressionFormat.GZIP
100
- )
104
+ compress: Annotated[
105
+ Optional[OutputRingDataCompressionFormat],
106
+ PlainValidator(validate_open_enum(False)),
107
+ ] = OutputRingDataCompressionFormat.GZIP
101
108
 
102
109
  dest_path: Annotated[Optional[str], pydantic.Field(alias="destPath")] = None
103
110
  r"""Path to use to write metrics. Defaults to $CRIBL_HOME/state/<id>"""
104
111
 
105
112
  on_backpressure: Annotated[
106
- Optional[OutputRingBackpressureBehavior], pydantic.Field(alias="onBackpressure")
113
+ Annotated[
114
+ Optional[OutputRingBackpressureBehavior],
115
+ PlainValidator(validate_open_enum(False)),
116
+ ],
117
+ pydantic.Field(alias="onBackpressure"),
107
118
  ] = OutputRingBackpressureBehavior.BLOCK
108
119
  r"""How to handle events when all receivers are exerting backpressure"""
109
120
 
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputS3Type(str, Enum):
12
15
  S3 = "s3"
13
16
 
14
17
 
15
- class OutputS3AuthenticationMethod(str, Enum):
18
+ class OutputS3AuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
17
20
 
18
21
  AUTO = "auto"
@@ -20,14 +23,14 @@ class OutputS3AuthenticationMethod(str, Enum):
20
23
  SECRET = "secret"
21
24
 
22
25
 
23
- class OutputS3SignatureVersion(str, Enum):
26
+ class OutputS3SignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""Signature version to use for signing S3 requests"""
25
28
 
26
29
  V2 = "v2"
27
30
  V4 = "v4"
28
31
 
29
32
 
30
- class OutputS3ObjectACL(str, Enum):
33
+ class OutputS3ObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
31
34
  r"""Object ACL to assign to uploaded objects"""
32
35
 
33
36
  PRIVATE = "private"
@@ -39,7 +42,7 @@ class OutputS3ObjectACL(str, Enum):
39
42
  BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
40
43
 
41
44
 
42
- class OutputS3StorageClass(str, Enum):
45
+ class OutputS3StorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
43
46
  r"""Storage class to select for uploaded objects"""
44
47
 
45
48
  STANDARD = "STANDARD"
@@ -52,12 +55,14 @@ class OutputS3StorageClass(str, Enum):
52
55
  DEEP_ARCHIVE = "DEEP_ARCHIVE"
53
56
 
54
57
 
55
- class OutputS3ServerSideEncryptionForUploadedObjects(str, Enum):
58
+ class OutputS3ServerSideEncryptionForUploadedObjects(
59
+ str, Enum, metaclass=utils.OpenEnumMeta
60
+ ):
56
61
  AES256 = "AES256"
57
62
  AWS_KMS = "aws:kms"
58
63
 
59
64
 
60
- class OutputS3DataFormat(str, Enum):
65
+ class OutputS3DataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
61
66
  r"""Format of the output data"""
62
67
 
63
68
  JSON = "json"
@@ -65,28 +70,28 @@ class OutputS3DataFormat(str, Enum):
65
70
  PARQUET = "parquet"
66
71
 
67
72
 
68
- class OutputS3BackpressureBehavior(str, Enum):
73
+ class OutputS3BackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
69
74
  r"""How to handle events when all receivers are exerting backpressure"""
70
75
 
71
76
  BLOCK = "block"
72
77
  DROP = "drop"
73
78
 
74
79
 
75
- class OutputS3DiskSpaceProtection(str, Enum):
80
+ class OutputS3DiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
76
81
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
77
82
 
78
83
  BLOCK = "block"
79
84
  DROP = "drop"
80
85
 
81
86
 
82
- class OutputS3Compression(str, Enum):
87
+ class OutputS3Compression(str, Enum, metaclass=utils.OpenEnumMeta):
83
88
  r"""Data compression format to apply to HTTP content before it is delivered"""
84
89
 
85
90
  NONE = "none"
86
91
  GZIP = "gzip"
87
92
 
88
93
 
89
- class OutputS3CompressionLevel(str, Enum):
94
+ class OutputS3CompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
90
95
  r"""Compression level to apply before moving files to final destination"""
91
96
 
92
97
  BEST_SPEED = "best_speed"
@@ -94,7 +99,7 @@ class OutputS3CompressionLevel(str, Enum):
94
99
  BEST_COMPRESSION = "best_compression"
95
100
 
96
101
 
97
- class OutputS3ParquetVersion(str, Enum):
102
+ class OutputS3ParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
98
103
  r"""Determines which data types are supported and how they are represented"""
99
104
 
100
105
  PARQUET_1_0 = "PARQUET_1_0"
@@ -102,7 +107,7 @@ class OutputS3ParquetVersion(str, Enum):
102
107
  PARQUET_2_6 = "PARQUET_2_6"
103
108
 
104
109
 
105
- class OutputS3DataPageVersion(str, Enum):
110
+ class OutputS3DataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
106
111
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
107
112
 
108
113
  DATA_PAGE_V1 = "DATA_PAGE_V1"
@@ -272,7 +277,10 @@ class OutputS3(BaseModel):
272
277
  r"""Secret key. This value can be a constant or a JavaScript expression. Example: `${C.env.SOME_SECRET}`)"""
273
278
 
274
279
  aws_authentication_method: Annotated[
275
- Optional[OutputS3AuthenticationMethod],
280
+ Annotated[
281
+ Optional[OutputS3AuthenticationMethod],
282
+ PlainValidator(validate_open_enum(False)),
283
+ ],
276
284
  pydantic.Field(alias="awsAuthenticationMethod"),
277
285
  ] = OutputS3AuthenticationMethod.AUTO
278
286
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -281,7 +289,11 @@ class OutputS3(BaseModel):
281
289
  r"""S3 service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to S3-compatible endpoint."""
282
290
 
283
291
  signature_version: Annotated[
284
- Optional[OutputS3SignatureVersion], pydantic.Field(alias="signatureVersion")
292
+ Annotated[
293
+ Optional[OutputS3SignatureVersion],
294
+ PlainValidator(validate_open_enum(False)),
295
+ ],
296
+ pydantic.Field(alias="signatureVersion"),
285
297
  ] = OutputS3SignatureVersion.V4
286
298
  r"""Signature version to use for signing S3 requests"""
287
299
 
@@ -329,17 +341,26 @@ class OutputS3(BaseModel):
329
341
  r"""Prefix to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myKeyPrefix-${C.vars.myVar}`"""
330
342
 
331
343
  object_acl: Annotated[
332
- Optional[OutputS3ObjectACL], pydantic.Field(alias="objectACL")
344
+ Annotated[
345
+ Optional[OutputS3ObjectACL], PlainValidator(validate_open_enum(False))
346
+ ],
347
+ pydantic.Field(alias="objectACL"),
333
348
  ] = OutputS3ObjectACL.PRIVATE
334
349
  r"""Object ACL to assign to uploaded objects"""
335
350
 
336
351
  storage_class: Annotated[
337
- Optional[OutputS3StorageClass], pydantic.Field(alias="storageClass")
352
+ Annotated[
353
+ Optional[OutputS3StorageClass], PlainValidator(validate_open_enum(False))
354
+ ],
355
+ pydantic.Field(alias="storageClass"),
338
356
  ] = None
339
357
  r"""Storage class to select for uploaded objects"""
340
358
 
341
359
  server_side_encryption: Annotated[
342
- Optional[OutputS3ServerSideEncryptionForUploadedObjects],
360
+ Annotated[
361
+ Optional[OutputS3ServerSideEncryptionForUploadedObjects],
362
+ PlainValidator(validate_open_enum(False)),
363
+ ],
343
364
  pydantic.Field(alias="serverSideEncryption"),
344
365
  ] = None
345
366
 
@@ -356,9 +377,12 @@ class OutputS3(BaseModel):
356
377
  )
357
378
  r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
358
379
 
359
- format_: Annotated[Optional[OutputS3DataFormat], pydantic.Field(alias="format")] = (
360
- OutputS3DataFormat.JSON
361
- )
380
+ format_: Annotated[
381
+ Annotated[
382
+ Optional[OutputS3DataFormat], PlainValidator(validate_open_enum(False))
383
+ ],
384
+ pydantic.Field(alias="format"),
385
+ ] = OutputS3DataFormat.JSON
362
386
  r"""Format of the output data"""
363
387
 
364
388
  base_file_name: Annotated[Optional[str], pydantic.Field(alias="baseFileName")] = (
@@ -390,7 +414,11 @@ class OutputS3(BaseModel):
390
414
  r"""Buffer size used to write to a file"""
391
415
 
392
416
  on_backpressure: Annotated[
393
- Optional[OutputS3BackpressureBehavior], pydantic.Field(alias="onBackpressure")
417
+ Annotated[
418
+ Optional[OutputS3BackpressureBehavior],
419
+ PlainValidator(validate_open_enum(False)),
420
+ ],
421
+ pydantic.Field(alias="onBackpressure"),
394
422
  ] = OutputS3BackpressureBehavior.BLOCK
395
423
  r"""How to handle events when all receivers are exerting backpressure"""
396
424
 
@@ -400,7 +428,10 @@ class OutputS3(BaseModel):
400
428
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
401
429
 
402
430
  on_disk_full_backpressure: Annotated[
403
- Optional[OutputS3DiskSpaceProtection],
431
+ Annotated[
432
+ Optional[OutputS3DiskSpaceProtection],
433
+ PlainValidator(validate_open_enum(False)),
434
+ ],
404
435
  pydantic.Field(alias="onDiskFullBackpressure"),
405
436
  ] = OutputS3DiskSpaceProtection.BLOCK
406
437
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
@@ -438,11 +469,17 @@ class OutputS3(BaseModel):
438
469
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
439
470
  r"""Select or create a stored secret that references your access key and secret key"""
440
471
 
441
- compress: Optional[OutputS3Compression] = OutputS3Compression.GZIP
472
+ compress: Annotated[
473
+ Optional[OutputS3Compression], PlainValidator(validate_open_enum(False))
474
+ ] = OutputS3Compression.GZIP
442
475
  r"""Data compression format to apply to HTTP content before it is delivered"""
443
476
 
444
477
  compression_level: Annotated[
445
- Optional[OutputS3CompressionLevel], pydantic.Field(alias="compressionLevel")
478
+ Annotated[
479
+ Optional[OutputS3CompressionLevel],
480
+ PlainValidator(validate_open_enum(False)),
481
+ ],
482
+ pydantic.Field(alias="compressionLevel"),
446
483
  ] = OutputS3CompressionLevel.BEST_SPEED
447
484
  r"""Compression level to apply before moving files to final destination"""
448
485
 
@@ -452,12 +489,17 @@ class OutputS3(BaseModel):
452
489
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
453
490
 
454
491
  parquet_version: Annotated[
455
- Optional[OutputS3ParquetVersion], pydantic.Field(alias="parquetVersion")
492
+ Annotated[
493
+ Optional[OutputS3ParquetVersion], PlainValidator(validate_open_enum(False))
494
+ ],
495
+ pydantic.Field(alias="parquetVersion"),
456
496
  ] = OutputS3ParquetVersion.PARQUET_2_6
457
497
  r"""Determines which data types are supported and how they are represented"""
458
498
 
459
499
  parquet_data_page_version: Annotated[
460
- Optional[OutputS3DataPageVersion],
500
+ Annotated[
501
+ Optional[OutputS3DataPageVersion], PlainValidator(validate_open_enum(False))
502
+ ],
461
503
  pydantic.Field(alias="parquetDataPageVersion"),
462
504
  ] = OutputS3DataPageVersion.DATA_PAGE_V2
463
505
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputSecurityLakeType(str, Enum):
12
15
  SECURITY_LAKE = "security_lake"
13
16
 
14
17
 
15
- class OutputSecurityLakeAuthenticationMethod(str, Enum):
18
+ class OutputSecurityLakeAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
17
20
 
18
21
  AUTO = "auto"
@@ -20,14 +23,14 @@ class OutputSecurityLakeAuthenticationMethod(str, Enum):
20
23
  SECRET = "secret"
21
24
 
22
25
 
23
- class OutputSecurityLakeSignatureVersion(str, Enum):
26
+ class OutputSecurityLakeSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""Signature version to use for signing Amazon Security Lake requests"""
25
28
 
26
29
  V2 = "v2"
27
30
  V4 = "v4"
28
31
 
29
32
 
30
- class OutputSecurityLakeObjectACL(str, Enum):
33
+ class OutputSecurityLakeObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
31
34
  r"""Object ACL to assign to uploaded objects"""
32
35
 
33
36
  PRIVATE = "private"
@@ -39,7 +42,7 @@ class OutputSecurityLakeObjectACL(str, Enum):
39
42
  BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
40
43
 
41
44
 
42
- class OutputSecurityLakeStorageClass(str, Enum):
45
+ class OutputSecurityLakeStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
43
46
  r"""Storage class to select for uploaded objects"""
44
47
 
45
48
  STANDARD = "STANDARD"
@@ -52,26 +55,28 @@ class OutputSecurityLakeStorageClass(str, Enum):
52
55
  DEEP_ARCHIVE = "DEEP_ARCHIVE"
53
56
 
54
57
 
55
- class OutputSecurityLakeServerSideEncryptionForUploadedObjects(str, Enum):
58
+ class OutputSecurityLakeServerSideEncryptionForUploadedObjects(
59
+ str, Enum, metaclass=utils.OpenEnumMeta
60
+ ):
56
61
  AES256 = "AES256"
57
62
  AWS_KMS = "aws:kms"
58
63
 
59
64
 
60
- class OutputSecurityLakeBackpressureBehavior(str, Enum):
65
+ class OutputSecurityLakeBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
61
66
  r"""How to handle events when all receivers are exerting backpressure"""
62
67
 
63
68
  BLOCK = "block"
64
69
  DROP = "drop"
65
70
 
66
71
 
67
- class OutputSecurityLakeDiskSpaceProtection(str, Enum):
72
+ class OutputSecurityLakeDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
68
73
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
69
74
 
70
75
  BLOCK = "block"
71
76
  DROP = "drop"
72
77
 
73
78
 
74
- class OutputSecurityLakeParquetVersion(str, Enum):
79
+ class OutputSecurityLakeParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
75
80
  r"""Determines which data types are supported and how they are represented"""
76
81
 
77
82
  PARQUET_1_0 = "PARQUET_1_0"
@@ -79,7 +84,7 @@ class OutputSecurityLakeParquetVersion(str, Enum):
79
84
  PARQUET_2_6 = "PARQUET_2_6"
80
85
 
81
86
 
82
- class OutputSecurityLakeDataPageVersion(str, Enum):
87
+ class OutputSecurityLakeDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
83
88
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
84
89
 
85
90
  DATA_PAGE_V1 = "DATA_PAGE_V1"
@@ -252,7 +257,10 @@ class OutputSecurityLake(BaseModel):
252
257
  )
253
258
 
254
259
  aws_authentication_method: Annotated[
255
- Optional[OutputSecurityLakeAuthenticationMethod],
260
+ Annotated[
261
+ Optional[OutputSecurityLakeAuthenticationMethod],
262
+ PlainValidator(validate_open_enum(False)),
263
+ ],
256
264
  pydantic.Field(alias="awsAuthenticationMethod"),
257
265
  ] = OutputSecurityLakeAuthenticationMethod.AUTO
258
266
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -261,7 +269,10 @@ class OutputSecurityLake(BaseModel):
261
269
  r"""Amazon Security Lake service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to Amazon Security Lake-compatible endpoint."""
262
270
 
263
271
  signature_version: Annotated[
264
- Optional[OutputSecurityLakeSignatureVersion],
272
+ Annotated[
273
+ Optional[OutputSecurityLakeSignatureVersion],
274
+ PlainValidator(validate_open_enum(False)),
275
+ ],
265
276
  pydantic.Field(alias="signatureVersion"),
266
277
  ] = OutputSecurityLakeSignatureVersion.V4
267
278
  r"""Signature version to use for signing Amazon Security Lake requests"""
@@ -302,17 +313,28 @@ class OutputSecurityLake(BaseModel):
302
313
  r"""Add the Output ID value to staging location"""
303
314
 
304
315
  object_acl: Annotated[
305
- Optional[OutputSecurityLakeObjectACL], pydantic.Field(alias="objectACL")
316
+ Annotated[
317
+ Optional[OutputSecurityLakeObjectACL],
318
+ PlainValidator(validate_open_enum(False)),
319
+ ],
320
+ pydantic.Field(alias="objectACL"),
306
321
  ] = OutputSecurityLakeObjectACL.PRIVATE
307
322
  r"""Object ACL to assign to uploaded objects"""
308
323
 
309
324
  storage_class: Annotated[
310
- Optional[OutputSecurityLakeStorageClass], pydantic.Field(alias="storageClass")
325
+ Annotated[
326
+ Optional[OutputSecurityLakeStorageClass],
327
+ PlainValidator(validate_open_enum(False)),
328
+ ],
329
+ pydantic.Field(alias="storageClass"),
311
330
  ] = None
312
331
  r"""Storage class to select for uploaded objects"""
313
332
 
314
333
  server_side_encryption: Annotated[
315
- Optional[OutputSecurityLakeServerSideEncryptionForUploadedObjects],
334
+ Annotated[
335
+ Optional[OutputSecurityLakeServerSideEncryptionForUploadedObjects],
336
+ PlainValidator(validate_open_enum(False)),
337
+ ],
316
338
  pydantic.Field(alias="serverSideEncryption"),
317
339
  ] = None
318
340
 
@@ -348,7 +370,10 @@ class OutputSecurityLake(BaseModel):
348
370
  r"""Buffer size used to write to a file"""
349
371
 
350
372
  on_backpressure: Annotated[
351
- Optional[OutputSecurityLakeBackpressureBehavior],
373
+ Annotated[
374
+ Optional[OutputSecurityLakeBackpressureBehavior],
375
+ PlainValidator(validate_open_enum(False)),
376
+ ],
352
377
  pydantic.Field(alias="onBackpressure"),
353
378
  ] = OutputSecurityLakeBackpressureBehavior.BLOCK
354
379
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -359,7 +384,10 @@ class OutputSecurityLake(BaseModel):
359
384
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
360
385
 
361
386
  on_disk_full_backpressure: Annotated[
362
- Optional[OutputSecurityLakeDiskSpaceProtection],
387
+ Annotated[
388
+ Optional[OutputSecurityLakeDiskSpaceProtection],
389
+ PlainValidator(validate_open_enum(False)),
390
+ ],
363
391
  pydantic.Field(alias="onDiskFullBackpressure"),
364
392
  ] = OutputSecurityLakeDiskSpaceProtection.BLOCK
365
393
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
@@ -395,13 +423,19 @@ class OutputSecurityLake(BaseModel):
395
423
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
396
424
 
397
425
  parquet_version: Annotated[
398
- Optional[OutputSecurityLakeParquetVersion],
426
+ Annotated[
427
+ Optional[OutputSecurityLakeParquetVersion],
428
+ PlainValidator(validate_open_enum(False)),
429
+ ],
399
430
  pydantic.Field(alias="parquetVersion"),
400
431
  ] = OutputSecurityLakeParquetVersion.PARQUET_2_6
401
432
  r"""Determines which data types are supported and how they are represented"""
402
433
 
403
434
  parquet_data_page_version: Annotated[
404
- Optional[OutputSecurityLakeDataPageVersion],
435
+ Annotated[
436
+ Optional[OutputSecurityLakeDataPageVersion],
437
+ PlainValidator(validate_open_enum(False)),
438
+ ],
405
439
  pydantic.Field(alias="parquetDataPageVersion"),
406
440
  ] = OutputSecurityLakeDataPageVersion.DATA_PAGE_V2
407
441
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""