cribl-control-plane 0.0.49__py3-none-any.whl → 0.1.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (173) hide show
  1. cribl_control_plane/_version.py +4 -6
  2. cribl_control_plane/errors/healthstatus_error.py +8 -2
  3. cribl_control_plane/health.py +6 -2
  4. cribl_control_plane/models/__init__.py +68 -30
  5. cribl_control_plane/models/cacheconnection.py +10 -2
  6. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  7. cribl_control_plane/models/cloudprovider.py +2 -1
  8. cribl_control_plane/models/configgroup.py +7 -2
  9. cribl_control_plane/models/configgroupcloud.py +6 -2
  10. cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
  11. cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
  12. cribl_control_plane/models/createversionpushop.py +5 -5
  13. cribl_control_plane/models/cribllakedataset.py +8 -2
  14. cribl_control_plane/models/datasetmetadata.py +8 -2
  15. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
  16. cribl_control_plane/models/error.py +16 -0
  17. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
  18. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
  19. cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
  20. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
  21. cribl_control_plane/models/gethealthinfoop.py +17 -0
  22. cribl_control_plane/models/getsummaryop.py +7 -2
  23. cribl_control_plane/models/getversionshowop.py +6 -5
  24. cribl_control_plane/models/gitshowresult.py +19 -0
  25. cribl_control_plane/models/hbcriblinfo.py +24 -3
  26. cribl_control_plane/models/healthstatus.py +7 -4
  27. cribl_control_plane/models/heartbeatmetadata.py +3 -0
  28. cribl_control_plane/models/inputappscope.py +34 -14
  29. cribl_control_plane/models/inputazureblob.py +17 -6
  30. cribl_control_plane/models/inputcollection.py +11 -4
  31. cribl_control_plane/models/inputconfluentcloud.py +41 -32
  32. cribl_control_plane/models/inputcribl.py +11 -4
  33. cribl_control_plane/models/inputcriblhttp.py +23 -8
  34. cribl_control_plane/models/inputcribllakehttp.py +22 -10
  35. cribl_control_plane/models/inputcriblmetrics.py +12 -4
  36. cribl_control_plane/models/inputcribltcp.py +23 -8
  37. cribl_control_plane/models/inputcrowdstrike.py +26 -10
  38. cribl_control_plane/models/inputdatadogagent.py +24 -8
  39. cribl_control_plane/models/inputdatagen.py +11 -4
  40. cribl_control_plane/models/inputedgeprometheus.py +58 -24
  41. cribl_control_plane/models/inputelastic.py +40 -14
  42. cribl_control_plane/models/inputeventhub.py +15 -6
  43. cribl_control_plane/models/inputexec.py +14 -6
  44. cribl_control_plane/models/inputfile.py +15 -6
  45. cribl_control_plane/models/inputfirehose.py +23 -8
  46. cribl_control_plane/models/inputgooglepubsub.py +19 -6
  47. cribl_control_plane/models/inputgrafana.py +67 -24
  48. cribl_control_plane/models/inputhttp.py +23 -8
  49. cribl_control_plane/models/inputhttpraw.py +23 -8
  50. cribl_control_plane/models/inputjournalfiles.py +12 -4
  51. cribl_control_plane/models/inputkafka.py +41 -28
  52. cribl_control_plane/models/inputkinesis.py +38 -14
  53. cribl_control_plane/models/inputkubeevents.py +11 -4
  54. cribl_control_plane/models/inputkubelogs.py +16 -8
  55. cribl_control_plane/models/inputkubemetrics.py +16 -8
  56. cribl_control_plane/models/inputloki.py +29 -10
  57. cribl_control_plane/models/inputmetrics.py +23 -8
  58. cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
  59. cribl_control_plane/models/inputmsk.py +48 -30
  60. cribl_control_plane/models/inputnetflow.py +11 -4
  61. cribl_control_plane/models/inputoffice365mgmt.py +33 -14
  62. cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
  63. cribl_control_plane/models/inputoffice365service.py +35 -16
  64. cribl_control_plane/models/inputopentelemetry.py +38 -16
  65. cribl_control_plane/models/inputprometheus.py +50 -18
  66. cribl_control_plane/models/inputprometheusrw.py +30 -10
  67. cribl_control_plane/models/inputrawudp.py +11 -4
  68. cribl_control_plane/models/inputs3.py +21 -8
  69. cribl_control_plane/models/inputs3inventory.py +26 -10
  70. cribl_control_plane/models/inputsecuritylake.py +27 -10
  71. cribl_control_plane/models/inputsnmp.py +16 -6
  72. cribl_control_plane/models/inputsplunk.py +33 -12
  73. cribl_control_plane/models/inputsplunkhec.py +29 -10
  74. cribl_control_plane/models/inputsplunksearch.py +33 -14
  75. cribl_control_plane/models/inputsqs.py +27 -10
  76. cribl_control_plane/models/inputsyslog.py +43 -16
  77. cribl_control_plane/models/inputsystemmetrics.py +48 -24
  78. cribl_control_plane/models/inputsystemstate.py +16 -8
  79. cribl_control_plane/models/inputtcp.py +29 -10
  80. cribl_control_plane/models/inputtcpjson.py +29 -10
  81. cribl_control_plane/models/inputwef.py +37 -14
  82. cribl_control_plane/models/inputwindowsmetrics.py +44 -24
  83. cribl_control_plane/models/inputwineventlogs.py +20 -10
  84. cribl_control_plane/models/inputwiz.py +21 -8
  85. cribl_control_plane/models/inputwizwebhook.py +23 -8
  86. cribl_control_plane/models/inputzscalerhec.py +29 -10
  87. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  88. cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
  89. cribl_control_plane/models/masterworkerentry.py +7 -2
  90. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  91. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  92. cribl_control_plane/models/nodeprovidedinfo.py +3 -0
  93. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  94. cribl_control_plane/models/nodeupgradestate.py +2 -1
  95. cribl_control_plane/models/nodeupgradestatus.py +13 -5
  96. cribl_control_plane/models/output.py +3 -0
  97. cribl_control_plane/models/outputazureblob.py +48 -18
  98. cribl_control_plane/models/outputazuredataexplorer.py +73 -28
  99. cribl_control_plane/models/outputazureeventhub.py +40 -18
  100. cribl_control_plane/models/outputazurelogs.py +35 -12
  101. cribl_control_plane/models/outputclickhouse.py +55 -20
  102. cribl_control_plane/models/outputcloudwatch.py +29 -10
  103. cribl_control_plane/models/outputconfluentcloud.py +71 -44
  104. cribl_control_plane/models/outputcriblhttp.py +44 -16
  105. cribl_control_plane/models/outputcribllake.py +46 -16
  106. cribl_control_plane/models/outputcribltcp.py +45 -18
  107. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
  108. cribl_control_plane/models/outputdatabricks.py +439 -0
  109. cribl_control_plane/models/outputdatadog.py +48 -20
  110. cribl_control_plane/models/outputdataset.py +46 -18
  111. cribl_control_plane/models/outputdiskspool.py +7 -2
  112. cribl_control_plane/models/outputdls3.py +68 -24
  113. cribl_control_plane/models/outputdynatracehttp.py +53 -20
  114. cribl_control_plane/models/outputdynatraceotlp.py +55 -22
  115. cribl_control_plane/models/outputelastic.py +43 -18
  116. cribl_control_plane/models/outputelasticcloud.py +36 -12
  117. cribl_control_plane/models/outputexabeam.py +29 -10
  118. cribl_control_plane/models/outputfilesystem.py +39 -14
  119. cribl_control_plane/models/outputgooglechronicle.py +50 -16
  120. cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
  121. cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
  122. cribl_control_plane/models/outputgooglepubsub.py +31 -10
  123. cribl_control_plane/models/outputgrafanacloud.py +97 -32
  124. cribl_control_plane/models/outputgraphite.py +31 -14
  125. cribl_control_plane/models/outputhoneycomb.py +35 -12
  126. cribl_control_plane/models/outputhumiohec.py +43 -16
  127. cribl_control_plane/models/outputinfluxdb.py +42 -16
  128. cribl_control_plane/models/outputkafka.py +69 -40
  129. cribl_control_plane/models/outputkinesis.py +40 -16
  130. cribl_control_plane/models/outputloki.py +41 -16
  131. cribl_control_plane/models/outputminio.py +65 -24
  132. cribl_control_plane/models/outputmsk.py +77 -42
  133. cribl_control_plane/models/outputnewrelic.py +43 -18
  134. cribl_control_plane/models/outputnewrelicevents.py +41 -14
  135. cribl_control_plane/models/outputopentelemetry.py +67 -26
  136. cribl_control_plane/models/outputprometheus.py +35 -12
  137. cribl_control_plane/models/outputring.py +19 -8
  138. cribl_control_plane/models/outputs3.py +68 -26
  139. cribl_control_plane/models/outputsecuritylake.py +52 -18
  140. cribl_control_plane/models/outputsentinel.py +45 -18
  141. cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
  142. cribl_control_plane/models/outputservicenow.py +60 -24
  143. cribl_control_plane/models/outputsignalfx.py +37 -14
  144. cribl_control_plane/models/outputsns.py +36 -14
  145. cribl_control_plane/models/outputsplunk.py +60 -24
  146. cribl_control_plane/models/outputsplunkhec.py +35 -12
  147. cribl_control_plane/models/outputsplunklb.py +77 -30
  148. cribl_control_plane/models/outputsqs.py +41 -16
  149. cribl_control_plane/models/outputstatsd.py +30 -14
  150. cribl_control_plane/models/outputstatsdext.py +29 -12
  151. cribl_control_plane/models/outputsumologic.py +35 -12
  152. cribl_control_plane/models/outputsyslog.py +58 -24
  153. cribl_control_plane/models/outputtcpjson.py +52 -20
  154. cribl_control_plane/models/outputwavefront.py +35 -12
  155. cribl_control_plane/models/outputwebhook.py +58 -22
  156. cribl_control_plane/models/outputxsiam.py +35 -14
  157. cribl_control_plane/models/productscore.py +2 -1
  158. cribl_control_plane/models/rbacresource.py +2 -1
  159. cribl_control_plane/models/resourcepolicy.py +4 -2
  160. cribl_control_plane/models/routeconf.py +3 -4
  161. cribl_control_plane/models/runnablejobcollection.py +30 -13
  162. cribl_control_plane/models/runnablejobexecutor.py +13 -4
  163. cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
  164. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
  165. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
  166. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
  167. cribl_control_plane/models/workertypes.py +2 -1
  168. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0b1.dist-info}/METADATA +1 -1
  169. cribl_control_plane-0.1.0b1.dist-info/RECORD +327 -0
  170. cribl_control_plane/models/appmode.py +0 -13
  171. cribl_control_plane/models/routecloneconf.py +0 -13
  172. cribl_control_plane-0.0.49.dist-info/RECORD +0 -325
  173. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0b1.dist-info}/WHEEL +0 -0
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputClickHouseType(str, Enum):
12
15
  CLICK_HOUSE = "click_house"
13
16
 
14
17
 
15
- class OutputClickHouseAuthenticationType(str, Enum):
18
+ class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  NONE = "none"
17
20
  BASIC = "basic"
18
21
  CREDENTIALS_SECRET = "credentialsSecret"
@@ -22,28 +25,28 @@ class OutputClickHouseAuthenticationType(str, Enum):
22
25
  OAUTH = "oauth"
23
26
 
24
27
 
25
- class OutputClickHouseFormat(str, Enum):
28
+ class OutputClickHouseFormat(str, Enum, metaclass=utils.OpenEnumMeta):
26
29
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
27
30
 
28
31
  JSON_COMPACT_EACH_ROW_WITH_NAMES = "json-compact-each-row-with-names"
29
32
  JSON_EACH_ROW = "json-each-row"
30
33
 
31
34
 
32
- class MappingType(str, Enum):
35
+ class MappingType(str, Enum, metaclass=utils.OpenEnumMeta):
33
36
  r"""How event fields are mapped to ClickHouse columns."""
34
37
 
35
38
  AUTOMATIC = "automatic"
36
39
  CUSTOM = "custom"
37
40
 
38
41
 
39
- class OutputClickHouseMinimumTLSVersion(str, Enum):
42
+ class OutputClickHouseMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
40
43
  TL_SV1 = "TLSv1"
41
44
  TL_SV1_1 = "TLSv1.1"
42
45
  TL_SV1_2 = "TLSv1.2"
43
46
  TL_SV1_3 = "TLSv1.3"
44
47
 
45
48
 
46
- class OutputClickHouseMaximumTLSVersion(str, Enum):
49
+ class OutputClickHouseMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
47
50
  TL_SV1 = "TLSv1"
48
51
  TL_SV1_1 = "TLSv1.1"
49
52
  TL_SV1_2 = "TLSv1.2"
@@ -92,11 +95,19 @@ class OutputClickHouseTLSSettingsClientSide(BaseModel):
92
95
  r"""Passphrase to use to decrypt private key"""
93
96
 
94
97
  min_version: Annotated[
95
- Optional[OutputClickHouseMinimumTLSVersion], pydantic.Field(alias="minVersion")
98
+ Annotated[
99
+ Optional[OutputClickHouseMinimumTLSVersion],
100
+ PlainValidator(validate_open_enum(False)),
101
+ ],
102
+ pydantic.Field(alias="minVersion"),
96
103
  ] = None
97
104
 
98
105
  max_version: Annotated[
99
- Optional[OutputClickHouseMaximumTLSVersion], pydantic.Field(alias="maxVersion")
106
+ Annotated[
107
+ Optional[OutputClickHouseMaximumTLSVersion],
108
+ PlainValidator(validate_open_enum(False)),
109
+ ],
110
+ pydantic.Field(alias="maxVersion"),
100
111
  ] = None
101
112
 
102
113
 
@@ -111,7 +122,7 @@ class OutputClickHouseExtraHTTPHeader(BaseModel):
111
122
  name: Optional[str] = None
112
123
 
113
124
 
114
- class OutputClickHouseFailedRequestLoggingMode(str, Enum):
125
+ class OutputClickHouseFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
115
126
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
116
127
 
117
128
  PAYLOAD = "payload"
@@ -173,7 +184,7 @@ class OutputClickHouseTimeoutRetrySettings(BaseModel):
173
184
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
174
185
 
175
186
 
176
- class OutputClickHouseBackpressureBehavior(str, Enum):
187
+ class OutputClickHouseBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
177
188
  r"""How to handle events when all receivers are exerting backpressure"""
178
189
 
179
190
  BLOCK = "block"
@@ -233,21 +244,21 @@ class ColumnMapping(BaseModel):
233
244
  r"""Type of the column in the ClickHouse database"""
234
245
 
235
246
 
236
- class OutputClickHouseCompression(str, Enum):
247
+ class OutputClickHouseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
237
248
  r"""Codec to use to compress the persisted data"""
238
249
 
239
250
  NONE = "none"
240
251
  GZIP = "gzip"
241
252
 
242
253
 
243
- class OutputClickHouseQueueFullBehavior(str, Enum):
254
+ class OutputClickHouseQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
244
255
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
245
256
 
246
257
  BLOCK = "block"
247
258
  DROP = "drop"
248
259
 
249
260
 
250
- class OutputClickHouseMode(str, Enum):
261
+ class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
251
262
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
252
263
 
253
264
  ERROR = "error"
@@ -402,16 +413,24 @@ class OutputClickHouse(BaseModel):
402
413
  r"""Tags for filtering and grouping in @{product}"""
403
414
 
404
415
  auth_type: Annotated[
405
- Optional[OutputClickHouseAuthenticationType], pydantic.Field(alias="authType")
416
+ Annotated[
417
+ Optional[OutputClickHouseAuthenticationType],
418
+ PlainValidator(validate_open_enum(False)),
419
+ ],
420
+ pydantic.Field(alias="authType"),
406
421
  ] = OutputClickHouseAuthenticationType.NONE
407
422
 
408
423
  format_: Annotated[
409
- Optional[OutputClickHouseFormat], pydantic.Field(alias="format")
424
+ Annotated[
425
+ Optional[OutputClickHouseFormat], PlainValidator(validate_open_enum(False))
426
+ ],
427
+ pydantic.Field(alias="format"),
410
428
  ] = OutputClickHouseFormat.JSON_COMPACT_EACH_ROW_WITH_NAMES
411
429
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
412
430
 
413
431
  mapping_type: Annotated[
414
- Optional[MappingType], pydantic.Field(alias="mappingType")
432
+ Annotated[Optional[MappingType], PlainValidator(validate_open_enum(False))],
433
+ pydantic.Field(alias="mappingType"),
415
434
  ] = MappingType.AUTOMATIC
416
435
  r"""How event fields are mapped to ClickHouse columns."""
417
436
 
@@ -466,7 +485,10 @@ class OutputClickHouse(BaseModel):
466
485
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
467
486
 
468
487
  failed_request_logging_mode: Annotated[
469
- Optional[OutputClickHouseFailedRequestLoggingMode],
488
+ Annotated[
489
+ Optional[OutputClickHouseFailedRequestLoggingMode],
490
+ PlainValidator(validate_open_enum(False)),
491
+ ],
470
492
  pydantic.Field(alias="failedRequestLoggingMode"),
471
493
  ] = OutputClickHouseFailedRequestLoggingMode.NONE
472
494
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -498,7 +520,10 @@ class OutputClickHouse(BaseModel):
498
520
  r"""Log the most recent event that fails to match the table schema"""
499
521
 
500
522
  on_backpressure: Annotated[
501
- Optional[OutputClickHouseBackpressureBehavior],
523
+ Annotated[
524
+ Optional[OutputClickHouseBackpressureBehavior],
525
+ PlainValidator(validate_open_enum(False)),
526
+ ],
502
527
  pydantic.Field(alias="onBackpressure"),
503
528
  ] = OutputClickHouseBackpressureBehavior.BLOCK
504
529
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -593,18 +618,28 @@ class OutputClickHouse(BaseModel):
593
618
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
594
619
 
595
620
  pq_compress: Annotated[
596
- Optional[OutputClickHouseCompression], pydantic.Field(alias="pqCompress")
621
+ Annotated[
622
+ Optional[OutputClickHouseCompression],
623
+ PlainValidator(validate_open_enum(False)),
624
+ ],
625
+ pydantic.Field(alias="pqCompress"),
597
626
  ] = OutputClickHouseCompression.NONE
598
627
  r"""Codec to use to compress the persisted data"""
599
628
 
600
629
  pq_on_backpressure: Annotated[
601
- Optional[OutputClickHouseQueueFullBehavior],
630
+ Annotated[
631
+ Optional[OutputClickHouseQueueFullBehavior],
632
+ PlainValidator(validate_open_enum(False)),
633
+ ],
602
634
  pydantic.Field(alias="pqOnBackpressure"),
603
635
  ] = OutputClickHouseQueueFullBehavior.BLOCK
604
636
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
605
637
 
606
638
  pq_mode: Annotated[
607
- Optional[OutputClickHouseMode], pydantic.Field(alias="pqMode")
639
+ Annotated[
640
+ Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
641
+ ],
642
+ pydantic.Field(alias="pqMode"),
608
643
  ] = OutputClickHouseMode.ERROR
609
644
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
610
645
 
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputCloudwatchType(str, Enum):
12
15
  CLOUDWATCH = "cloudwatch"
13
16
 
14
17
 
15
- class OutputCloudwatchAuthenticationMethod(str, Enum):
18
+ class OutputCloudwatchAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
17
20
 
18
21
  AUTO = "auto"
@@ -20,7 +23,7 @@ class OutputCloudwatchAuthenticationMethod(str, Enum):
20
23
  SECRET = "secret"
21
24
 
22
25
 
23
- class OutputCloudwatchBackpressureBehavior(str, Enum):
26
+ class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""How to handle events when all receivers are exerting backpressure"""
25
28
 
26
29
  BLOCK = "block"
@@ -28,21 +31,21 @@ class OutputCloudwatchBackpressureBehavior(str, Enum):
28
31
  QUEUE = "queue"
29
32
 
30
33
 
31
- class OutputCloudwatchCompression(str, Enum):
34
+ class OutputCloudwatchCompression(str, Enum, metaclass=utils.OpenEnumMeta):
32
35
  r"""Codec to use to compress the persisted data"""
33
36
 
34
37
  NONE = "none"
35
38
  GZIP = "gzip"
36
39
 
37
40
 
38
- class OutputCloudwatchQueueFullBehavior(str, Enum):
41
+ class OutputCloudwatchQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
39
42
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
40
43
 
41
44
  BLOCK = "block"
42
45
  DROP = "drop"
43
46
 
44
47
 
45
- class OutputCloudwatchMode(str, Enum):
48
+ class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
46
49
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
47
50
 
48
51
  ERROR = "error"
@@ -150,7 +153,10 @@ class OutputCloudwatch(BaseModel):
150
153
  r"""Tags for filtering and grouping in @{product}"""
151
154
 
152
155
  aws_authentication_method: Annotated[
153
- Optional[OutputCloudwatchAuthenticationMethod],
156
+ Annotated[
157
+ Optional[OutputCloudwatchAuthenticationMethod],
158
+ PlainValidator(validate_open_enum(False)),
159
+ ],
154
160
  pydantic.Field(alias="awsAuthenticationMethod"),
155
161
  ] = OutputCloudwatchAuthenticationMethod.AUTO
156
162
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -206,7 +212,10 @@ class OutputCloudwatch(BaseModel):
206
212
  r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
207
213
 
208
214
  on_backpressure: Annotated[
209
- Optional[OutputCloudwatchBackpressureBehavior],
215
+ Annotated[
216
+ Optional[OutputCloudwatchBackpressureBehavior],
217
+ PlainValidator(validate_open_enum(False)),
218
+ ],
210
219
  pydantic.Field(alias="onBackpressure"),
211
220
  ] = OutputCloudwatchBackpressureBehavior.BLOCK
212
221
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -232,18 +241,28 @@ class OutputCloudwatch(BaseModel):
232
241
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
233
242
 
234
243
  pq_compress: Annotated[
235
- Optional[OutputCloudwatchCompression], pydantic.Field(alias="pqCompress")
244
+ Annotated[
245
+ Optional[OutputCloudwatchCompression],
246
+ PlainValidator(validate_open_enum(False)),
247
+ ],
248
+ pydantic.Field(alias="pqCompress"),
236
249
  ] = OutputCloudwatchCompression.NONE
237
250
  r"""Codec to use to compress the persisted data"""
238
251
 
239
252
  pq_on_backpressure: Annotated[
240
- Optional[OutputCloudwatchQueueFullBehavior],
253
+ Annotated[
254
+ Optional[OutputCloudwatchQueueFullBehavior],
255
+ PlainValidator(validate_open_enum(False)),
256
+ ],
241
257
  pydantic.Field(alias="pqOnBackpressure"),
242
258
  ] = OutputCloudwatchQueueFullBehavior.BLOCK
243
259
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
244
260
 
245
261
  pq_mode: Annotated[
246
- Optional[OutputCloudwatchMode], pydantic.Field(alias="pqMode")
262
+ Annotated[
263
+ Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
264
+ ],
265
+ pydantic.Field(alias="pqMode"),
247
266
  ] = OutputCloudwatchMode.ERROR
248
267
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
249
268
 
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,14 +15,14 @@ class OutputConfluentCloudType(str, Enum):
12
15
  CONFLUENT_CLOUD = "confluent_cloud"
13
16
 
14
17
 
15
- class OutputConfluentCloudMinimumTLSVersion(str, Enum):
18
+ class OutputConfluentCloudMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  TL_SV1 = "TLSv1"
17
20
  TL_SV1_1 = "TLSv1.1"
18
21
  TL_SV1_2 = "TLSv1.2"
19
22
  TL_SV1_3 = "TLSv1.3"
20
23
 
21
24
 
22
- class OutputConfluentCloudMaximumTLSVersion(str, Enum):
25
+ class OutputConfluentCloudMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
23
26
  TL_SV1 = "TLSv1"
24
27
  TL_SV1_1 = "TLSv1.1"
25
28
  TL_SV1_2 = "TLSv1.2"
@@ -79,17 +82,23 @@ class OutputConfluentCloudTLSSettingsClientSide(BaseModel):
79
82
  r"""Passphrase to use to decrypt private key"""
80
83
 
81
84
  min_version: Annotated[
82
- Optional[OutputConfluentCloudMinimumTLSVersion],
85
+ Annotated[
86
+ Optional[OutputConfluentCloudMinimumTLSVersion],
87
+ PlainValidator(validate_open_enum(False)),
88
+ ],
83
89
  pydantic.Field(alias="minVersion"),
84
90
  ] = None
85
91
 
86
92
  max_version: Annotated[
87
- Optional[OutputConfluentCloudMaximumTLSVersion],
93
+ Annotated[
94
+ Optional[OutputConfluentCloudMaximumTLSVersion],
95
+ PlainValidator(validate_open_enum(False)),
96
+ ],
88
97
  pydantic.Field(alias="maxVersion"),
89
98
  ] = None
90
99
 
91
100
 
92
- class OutputConfluentCloudAcknowledgments(int, Enum):
101
+ class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
93
102
  r"""Control the number of required acknowledgments."""
94
103
 
95
104
  ONE = 1
@@ -97,7 +106,7 @@ class OutputConfluentCloudAcknowledgments(int, Enum):
97
106
  MINUS_1 = -1
98
107
 
99
108
 
100
- class OutputConfluentCloudRecordDataFormat(str, Enum):
109
+ class OutputConfluentCloudRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
101
110
  r"""Format to use to serialize events before writing to Kafka."""
102
111
 
103
112
  JSON = "json"
@@ -105,7 +114,7 @@ class OutputConfluentCloudRecordDataFormat(str, Enum):
105
114
  PROTOBUF = "protobuf"
106
115
 
107
116
 
108
- class OutputConfluentCloudCompression(str, Enum):
117
+ class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
109
118
  r"""Codec to use to compress the data before sending to Kafka"""
110
119
 
111
120
  NONE = "none"
@@ -114,13 +123,6 @@ class OutputConfluentCloudCompression(str, Enum):
114
123
  LZ4 = "lz4"
115
124
 
116
125
 
117
- class OutputConfluentCloudSchemaType(str, Enum):
118
- r"""The schema format used to encode and decode event data"""
119
-
120
- AVRO = "avro"
121
- JSON = "json"
122
-
123
-
124
126
  class OutputConfluentCloudAuthTypedDict(TypedDict):
125
127
  r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
126
128
 
@@ -140,14 +142,18 @@ class OutputConfluentCloudAuth(BaseModel):
140
142
  r"""Select or create a secret that references your credentials"""
141
143
 
142
144
 
143
- class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
145
+ class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(
146
+ str, Enum, metaclass=utils.OpenEnumMeta
147
+ ):
144
148
  TL_SV1 = "TLSv1"
145
149
  TL_SV1_1 = "TLSv1.1"
146
150
  TL_SV1_2 = "TLSv1.2"
147
151
  TL_SV1_3 = "TLSv1.3"
148
152
 
149
153
 
150
- class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
154
+ class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(
155
+ str, Enum, metaclass=utils.OpenEnumMeta
156
+ ):
151
157
  TL_SV1 = "TLSv1"
152
158
  TL_SV1_1 = "TLSv1.1"
153
159
  TL_SV1_2 = "TLSv1.2"
@@ -207,12 +213,18 @@ class OutputConfluentCloudKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
207
213
  r"""Passphrase to use to decrypt private key"""
208
214
 
209
215
  min_version: Annotated[
210
- Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
216
+ Annotated[
217
+ Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
218
+ PlainValidator(validate_open_enum(False)),
219
+ ],
211
220
  pydantic.Field(alias="minVersion"),
212
221
  ] = None
213
222
 
214
223
  max_version: Annotated[
215
- Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
224
+ Annotated[
225
+ Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
226
+ PlainValidator(validate_open_enum(False)),
227
+ ],
216
228
  pydantic.Field(alias="maxVersion"),
217
229
  ] = None
218
230
 
@@ -221,8 +233,6 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
221
233
  disabled: NotRequired[bool]
222
234
  schema_registry_url: NotRequired[str]
223
235
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
224
- schema_type: NotRequired[OutputConfluentCloudSchemaType]
225
- r"""The schema format used to encode and decode event data"""
226
236
  connection_timeout: NotRequired[float]
227
237
  r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
228
238
  request_timeout: NotRequired[float]
@@ -248,11 +258,6 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
248
258
  ] = "http://localhost:8081"
249
259
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
250
260
 
251
- schema_type: Annotated[
252
- Optional[OutputConfluentCloudSchemaType], pydantic.Field(alias="schemaType")
253
- ] = OutputConfluentCloudSchemaType.AVRO
254
- r"""The schema format used to encode and decode event data"""
255
-
256
261
  connection_timeout: Annotated[
257
262
  Optional[float], pydantic.Field(alias="connectionTimeout")
258
263
  ] = 30000
@@ -282,7 +287,7 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
282
287
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
283
288
 
284
289
 
285
- class OutputConfluentCloudSASLMechanism(str, Enum):
290
+ class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
286
291
  PLAIN = "plain"
287
292
  SCRAM_SHA_256 = "scram-sha-256"
288
293
  SCRAM_SHA_512 = "scram-sha-512"
@@ -303,9 +308,10 @@ class OutputConfluentCloudAuthentication(BaseModel):
303
308
 
304
309
  disabled: Optional[bool] = True
305
310
 
306
- mechanism: Optional[OutputConfluentCloudSASLMechanism] = (
307
- OutputConfluentCloudSASLMechanism.PLAIN
308
- )
311
+ mechanism: Annotated[
312
+ Optional[OutputConfluentCloudSASLMechanism],
313
+ PlainValidator(validate_open_enum(False)),
314
+ ] = OutputConfluentCloudSASLMechanism.PLAIN
309
315
 
310
316
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
311
317
  False
@@ -313,7 +319,7 @@ class OutputConfluentCloudAuthentication(BaseModel):
313
319
  r"""Enable OAuth authentication"""
314
320
 
315
321
 
316
- class OutputConfluentCloudBackpressureBehavior(str, Enum):
322
+ class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
317
323
  r"""How to handle events when all receivers are exerting backpressure"""
318
324
 
319
325
  BLOCK = "block"
@@ -321,21 +327,23 @@ class OutputConfluentCloudBackpressureBehavior(str, Enum):
321
327
  QUEUE = "queue"
322
328
 
323
329
 
324
- class OutputConfluentCloudPqCompressCompression(str, Enum):
330
+ class OutputConfluentCloudPqCompressCompression(
331
+ str, Enum, metaclass=utils.OpenEnumMeta
332
+ ):
325
333
  r"""Codec to use to compress the persisted data"""
326
334
 
327
335
  NONE = "none"
328
336
  GZIP = "gzip"
329
337
 
330
338
 
331
- class OutputConfluentCloudQueueFullBehavior(str, Enum):
339
+ class OutputConfluentCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
332
340
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
333
341
 
334
342
  BLOCK = "block"
335
343
  DROP = "drop"
336
344
 
337
345
 
338
- class OutputConfluentCloudMode(str, Enum):
346
+ class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
339
347
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
340
348
 
341
349
  ERROR = "error"
@@ -449,19 +457,25 @@ class OutputConfluentCloud(BaseModel):
449
457
 
450
458
  tls: Optional[OutputConfluentCloudTLSSettingsClientSide] = None
451
459
 
452
- ack: Optional[OutputConfluentCloudAcknowledgments] = (
453
- OutputConfluentCloudAcknowledgments.ONE
454
- )
460
+ ack: Annotated[
461
+ Optional[OutputConfluentCloudAcknowledgments],
462
+ PlainValidator(validate_open_enum(True)),
463
+ ] = OutputConfluentCloudAcknowledgments.ONE
455
464
  r"""Control the number of required acknowledgments."""
456
465
 
457
466
  format_: Annotated[
458
- Optional[OutputConfluentCloudRecordDataFormat], pydantic.Field(alias="format")
467
+ Annotated[
468
+ Optional[OutputConfluentCloudRecordDataFormat],
469
+ PlainValidator(validate_open_enum(False)),
470
+ ],
471
+ pydantic.Field(alias="format"),
459
472
  ] = OutputConfluentCloudRecordDataFormat.JSON
460
473
  r"""Format to use to serialize events before writing to Kafka."""
461
474
 
462
- compression: Optional[OutputConfluentCloudCompression] = (
463
- OutputConfluentCloudCompression.GZIP
464
- )
475
+ compression: Annotated[
476
+ Optional[OutputConfluentCloudCompression],
477
+ PlainValidator(validate_open_enum(False)),
478
+ ] = OutputConfluentCloudCompression.GZIP
465
479
  r"""Codec to use to compress the data before sending to Kafka"""
466
480
 
467
481
  max_record_size_kb: Annotated[
@@ -522,7 +536,10 @@ class OutputConfluentCloud(BaseModel):
522
536
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
523
537
 
524
538
  on_backpressure: Annotated[
525
- Optional[OutputConfluentCloudBackpressureBehavior],
539
+ Annotated[
540
+ Optional[OutputConfluentCloudBackpressureBehavior],
541
+ PlainValidator(validate_open_enum(False)),
542
+ ],
526
543
  pydantic.Field(alias="onBackpressure"),
527
544
  ] = OutputConfluentCloudBackpressureBehavior.BLOCK
528
545
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -548,19 +565,29 @@ class OutputConfluentCloud(BaseModel):
548
565
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
549
566
 
550
567
  pq_compress: Annotated[
551
- Optional[OutputConfluentCloudPqCompressCompression],
568
+ Annotated[
569
+ Optional[OutputConfluentCloudPqCompressCompression],
570
+ PlainValidator(validate_open_enum(False)),
571
+ ],
552
572
  pydantic.Field(alias="pqCompress"),
553
573
  ] = OutputConfluentCloudPqCompressCompression.NONE
554
574
  r"""Codec to use to compress the persisted data"""
555
575
 
556
576
  pq_on_backpressure: Annotated[
557
- Optional[OutputConfluentCloudQueueFullBehavior],
577
+ Annotated[
578
+ Optional[OutputConfluentCloudQueueFullBehavior],
579
+ PlainValidator(validate_open_enum(False)),
580
+ ],
558
581
  pydantic.Field(alias="pqOnBackpressure"),
559
582
  ] = OutputConfluentCloudQueueFullBehavior.BLOCK
560
583
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
561
584
 
562
585
  pq_mode: Annotated[
563
- Optional[OutputConfluentCloudMode], pydantic.Field(alias="pqMode")
586
+ Annotated[
587
+ Optional[OutputConfluentCloudMode],
588
+ PlainValidator(validate_open_enum(False)),
589
+ ],
590
+ pydantic.Field(alias="pqMode"),
564
591
  ] = OutputConfluentCloudMode.ERROR
565
592
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
566
593