cribl-control-plane 0.0.44__py3-none-any.whl → 0.0.44a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (158) hide show
  1. cribl_control_plane/_version.py +3 -3
  2. cribl_control_plane/errors/healthstatus_error.py +8 -2
  3. cribl_control_plane/models/__init__.py +3 -3
  4. cribl_control_plane/models/appmode.py +2 -1
  5. cribl_control_plane/models/cacheconnection.py +10 -2
  6. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  7. cribl_control_plane/models/cloudprovider.py +2 -1
  8. cribl_control_plane/models/configgroup.py +7 -2
  9. cribl_control_plane/models/configgroupcloud.py +6 -2
  10. cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
  11. cribl_control_plane/models/cribllakedataset.py +8 -2
  12. cribl_control_plane/models/datasetmetadata.py +8 -2
  13. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
  14. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
  15. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
  16. cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
  17. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
  18. cribl_control_plane/models/getsummaryop.py +7 -2
  19. cribl_control_plane/models/hbcriblinfo.py +6 -1
  20. cribl_control_plane/models/healthstatus.py +7 -4
  21. cribl_control_plane/models/inputappscope.py +34 -14
  22. cribl_control_plane/models/inputazureblob.py +17 -6
  23. cribl_control_plane/models/inputcollection.py +11 -4
  24. cribl_control_plane/models/inputconfluentcloud.py +47 -20
  25. cribl_control_plane/models/inputcribl.py +11 -4
  26. cribl_control_plane/models/inputcriblhttp.py +23 -8
  27. cribl_control_plane/models/inputcribllakehttp.py +22 -10
  28. cribl_control_plane/models/inputcriblmetrics.py +12 -4
  29. cribl_control_plane/models/inputcribltcp.py +23 -8
  30. cribl_control_plane/models/inputcrowdstrike.py +26 -10
  31. cribl_control_plane/models/inputdatadogagent.py +24 -8
  32. cribl_control_plane/models/inputdatagen.py +11 -4
  33. cribl_control_plane/models/inputedgeprometheus.py +58 -24
  34. cribl_control_plane/models/inputelastic.py +40 -14
  35. cribl_control_plane/models/inputeventhub.py +15 -6
  36. cribl_control_plane/models/inputexec.py +14 -6
  37. cribl_control_plane/models/inputfile.py +15 -6
  38. cribl_control_plane/models/inputfirehose.py +23 -8
  39. cribl_control_plane/models/inputgooglepubsub.py +19 -6
  40. cribl_control_plane/models/inputgrafana.py +67 -24
  41. cribl_control_plane/models/inputhttp.py +23 -8
  42. cribl_control_plane/models/inputhttpraw.py +23 -8
  43. cribl_control_plane/models/inputjournalfiles.py +12 -4
  44. cribl_control_plane/models/inputkafka.py +46 -16
  45. cribl_control_plane/models/inputkinesis.py +38 -14
  46. cribl_control_plane/models/inputkubeevents.py +11 -4
  47. cribl_control_plane/models/inputkubelogs.py +16 -8
  48. cribl_control_plane/models/inputkubemetrics.py +16 -8
  49. cribl_control_plane/models/inputloki.py +29 -10
  50. cribl_control_plane/models/inputmetrics.py +23 -8
  51. cribl_control_plane/models/inputmodeldriventelemetry.py +27 -10
  52. cribl_control_plane/models/inputmsk.py +53 -18
  53. cribl_control_plane/models/inputnetflow.py +11 -4
  54. cribl_control_plane/models/inputoffice365mgmt.py +33 -14
  55. cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
  56. cribl_control_plane/models/inputoffice365service.py +35 -16
  57. cribl_control_plane/models/inputopentelemetry.py +38 -16
  58. cribl_control_plane/models/inputprometheus.py +50 -18
  59. cribl_control_plane/models/inputprometheusrw.py +30 -10
  60. cribl_control_plane/models/inputrawudp.py +11 -4
  61. cribl_control_plane/models/inputs3.py +21 -8
  62. cribl_control_plane/models/inputs3inventory.py +26 -10
  63. cribl_control_plane/models/inputsecuritylake.py +27 -10
  64. cribl_control_plane/models/inputsnmp.py +16 -6
  65. cribl_control_plane/models/inputsplunk.py +33 -12
  66. cribl_control_plane/models/inputsplunkhec.py +29 -10
  67. cribl_control_plane/models/inputsplunksearch.py +33 -14
  68. cribl_control_plane/models/inputsqs.py +27 -10
  69. cribl_control_plane/models/inputsyslog.py +43 -16
  70. cribl_control_plane/models/inputsystemmetrics.py +48 -24
  71. cribl_control_plane/models/inputsystemstate.py +16 -8
  72. cribl_control_plane/models/inputtcp.py +29 -10
  73. cribl_control_plane/models/inputtcpjson.py +29 -10
  74. cribl_control_plane/models/inputwef.py +37 -14
  75. cribl_control_plane/models/inputwindowsmetrics.py +44 -24
  76. cribl_control_plane/models/inputwineventlogs.py +20 -10
  77. cribl_control_plane/models/inputwiz.py +21 -8
  78. cribl_control_plane/models/inputwizwebhook.py +23 -8
  79. cribl_control_plane/models/inputzscalerhec.py +29 -10
  80. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  81. cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
  82. cribl_control_plane/models/masterworkerentry.py +7 -2
  83. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  84. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  85. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  86. cribl_control_plane/models/nodeupgradestate.py +2 -1
  87. cribl_control_plane/models/nodeupgradestatus.py +13 -5
  88. cribl_control_plane/models/outputazureblob.py +48 -18
  89. cribl_control_plane/models/outputazuredataexplorer.py +74 -29
  90. cribl_control_plane/models/outputazureeventhub.py +40 -18
  91. cribl_control_plane/models/outputazurelogs.py +36 -13
  92. cribl_control_plane/models/outputclickhouse.py +56 -21
  93. cribl_control_plane/models/outputcloudwatch.py +29 -10
  94. cribl_control_plane/models/outputconfluentcloud.py +77 -32
  95. cribl_control_plane/models/outputcriblhttp.py +46 -18
  96. cribl_control_plane/models/outputcribllake.py +46 -16
  97. cribl_control_plane/models/outputcribltcp.py +45 -18
  98. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +50 -15
  99. cribl_control_plane/models/outputdatadog.py +48 -20
  100. cribl_control_plane/models/outputdataset.py +46 -18
  101. cribl_control_plane/models/outputdiskspool.py +7 -2
  102. cribl_control_plane/models/outputdls3.py +68 -24
  103. cribl_control_plane/models/outputdynatracehttp.py +54 -21
  104. cribl_control_plane/models/outputdynatraceotlp.py +56 -23
  105. cribl_control_plane/models/outputelastic.py +44 -19
  106. cribl_control_plane/models/outputelasticcloud.py +37 -13
  107. cribl_control_plane/models/outputexabeam.py +29 -10
  108. cribl_control_plane/models/outputfilesystem.py +39 -14
  109. cribl_control_plane/models/outputgooglechronicle.py +50 -16
  110. cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
  111. cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
  112. cribl_control_plane/models/outputgooglepubsub.py +31 -10
  113. cribl_control_plane/models/outputgrafanacloud.py +99 -34
  114. cribl_control_plane/models/outputgraphite.py +31 -14
  115. cribl_control_plane/models/outputhoneycomb.py +36 -13
  116. cribl_control_plane/models/outputhumiohec.py +44 -17
  117. cribl_control_plane/models/outputinfluxdb.py +43 -17
  118. cribl_control_plane/models/outputkafka.py +74 -28
  119. cribl_control_plane/models/outputkinesis.py +40 -16
  120. cribl_control_plane/models/outputloki.py +41 -16
  121. cribl_control_plane/models/outputminio.py +65 -24
  122. cribl_control_plane/models/outputmsk.py +82 -30
  123. cribl_control_plane/models/outputnewrelic.py +43 -18
  124. cribl_control_plane/models/outputnewrelicevents.py +42 -15
  125. cribl_control_plane/models/outputopentelemetry.py +68 -27
  126. cribl_control_plane/models/outputprometheus.py +36 -13
  127. cribl_control_plane/models/outputring.py +19 -8
  128. cribl_control_plane/models/outputs3.py +68 -26
  129. cribl_control_plane/models/outputsecuritylake.py +52 -18
  130. cribl_control_plane/models/outputsentinel.py +45 -18
  131. cribl_control_plane/models/outputsentineloneaisiem.py +51 -19
  132. cribl_control_plane/models/outputservicenow.py +61 -25
  133. cribl_control_plane/models/outputsignalfx.py +38 -15
  134. cribl_control_plane/models/outputsns.py +36 -14
  135. cribl_control_plane/models/outputsplunk.py +60 -24
  136. cribl_control_plane/models/outputsplunkhec.py +36 -13
  137. cribl_control_plane/models/outputsplunklb.py +77 -30
  138. cribl_control_plane/models/outputsqs.py +41 -16
  139. cribl_control_plane/models/outputstatsd.py +30 -14
  140. cribl_control_plane/models/outputstatsdext.py +29 -12
  141. cribl_control_plane/models/outputsumologic.py +35 -12
  142. cribl_control_plane/models/outputsyslog.py +58 -24
  143. cribl_control_plane/models/outputtcpjson.py +52 -20
  144. cribl_control_plane/models/outputwavefront.py +36 -13
  145. cribl_control_plane/models/outputwebhook.py +58 -22
  146. cribl_control_plane/models/outputxsiam.py +36 -15
  147. cribl_control_plane/models/productscore.py +2 -1
  148. cribl_control_plane/models/rbacresource.py +2 -1
  149. cribl_control_plane/models/resourcepolicy.py +4 -2
  150. cribl_control_plane/models/runnablejobcollection.py +30 -13
  151. cribl_control_plane/models/runnablejobexecutor.py +13 -4
  152. cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
  153. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
  154. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
  155. cribl_control_plane/models/workertypes.py +2 -1
  156. {cribl_control_plane-0.0.44.dist-info → cribl_control_plane-0.0.44a2.dist-info}/METADATA +1 -1
  157. {cribl_control_plane-0.0.44.dist-info → cribl_control_plane-0.0.44a2.dist-info}/RECORD +158 -158
  158. {cribl_control_plane-0.0.44.dist-info → cribl_control_plane-0.0.44a2.dist-info}/WHEEL +0 -0
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputClickHouseType(str, Enum):
12
15
  CLICK_HOUSE = "click_house"
13
16
 
14
17
 
15
- class OutputClickHouseAuthenticationType(str, Enum):
18
+ class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  NONE = "none"
17
20
  BASIC = "basic"
18
21
  CREDENTIALS_SECRET = "credentialsSecret"
@@ -22,28 +25,28 @@ class OutputClickHouseAuthenticationType(str, Enum):
22
25
  OAUTH = "oauth"
23
26
 
24
27
 
25
- class OutputClickHouseFormat(str, Enum):
28
+ class OutputClickHouseFormat(str, Enum, metaclass=utils.OpenEnumMeta):
26
29
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
27
30
 
28
31
  JSON_COMPACT_EACH_ROW_WITH_NAMES = "json-compact-each-row-with-names"
29
32
  JSON_EACH_ROW = "json-each-row"
30
33
 
31
34
 
32
- class MappingType(str, Enum):
35
+ class MappingType(str, Enum, metaclass=utils.OpenEnumMeta):
33
36
  r"""How event fields are mapped to ClickHouse columns."""
34
37
 
35
38
  AUTOMATIC = "automatic"
36
39
  CUSTOM = "custom"
37
40
 
38
41
 
39
- class OutputClickHouseMinimumTLSVersion(str, Enum):
42
+ class OutputClickHouseMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
40
43
  TL_SV1 = "TLSv1"
41
44
  TL_SV1_1 = "TLSv1.1"
42
45
  TL_SV1_2 = "TLSv1.2"
43
46
  TL_SV1_3 = "TLSv1.3"
44
47
 
45
48
 
46
- class OutputClickHouseMaximumTLSVersion(str, Enum):
49
+ class OutputClickHouseMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
47
50
  TL_SV1 = "TLSv1"
48
51
  TL_SV1_1 = "TLSv1.1"
49
52
  TL_SV1_2 = "TLSv1.2"
@@ -92,11 +95,19 @@ class OutputClickHouseTLSSettingsClientSide(BaseModel):
92
95
  r"""Passphrase to use to decrypt private key"""
93
96
 
94
97
  min_version: Annotated[
95
- Optional[OutputClickHouseMinimumTLSVersion], pydantic.Field(alias="minVersion")
98
+ Annotated[
99
+ Optional[OutputClickHouseMinimumTLSVersion],
100
+ PlainValidator(validate_open_enum(False)),
101
+ ],
102
+ pydantic.Field(alias="minVersion"),
96
103
  ] = None
97
104
 
98
105
  max_version: Annotated[
99
- Optional[OutputClickHouseMaximumTLSVersion], pydantic.Field(alias="maxVersion")
106
+ Annotated[
107
+ Optional[OutputClickHouseMaximumTLSVersion],
108
+ PlainValidator(validate_open_enum(False)),
109
+ ],
110
+ pydantic.Field(alias="maxVersion"),
100
111
  ] = None
101
112
 
102
113
 
@@ -111,7 +122,7 @@ class OutputClickHouseExtraHTTPHeader(BaseModel):
111
122
  name: Optional[str] = None
112
123
 
113
124
 
114
- class OutputClickHouseFailedRequestLoggingMode(str, Enum):
125
+ class OutputClickHouseFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
115
126
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
116
127
 
117
128
  PAYLOAD = "payload"
@@ -173,7 +184,7 @@ class OutputClickHouseTimeoutRetrySettings(BaseModel):
173
184
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
174
185
 
175
186
 
176
- class OutputClickHouseBackpressureBehavior(str, Enum):
187
+ class OutputClickHouseBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
177
188
  r"""How to handle events when all receivers are exerting backpressure"""
178
189
 
179
190
  BLOCK = "block"
@@ -233,21 +244,21 @@ class ColumnMapping(BaseModel):
233
244
  r"""Type of the column in the ClickHouse database"""
234
245
 
235
246
 
236
- class OutputClickHouseCompression(str, Enum):
247
+ class OutputClickHouseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
237
248
  r"""Codec to use to compress the persisted data"""
238
249
 
239
250
  NONE = "none"
240
251
  GZIP = "gzip"
241
252
 
242
253
 
243
- class OutputClickHouseQueueFullBehavior(str, Enum):
254
+ class OutputClickHouseQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
244
255
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
245
256
 
246
257
  BLOCK = "block"
247
258
  DROP = "drop"
248
259
 
249
260
 
250
- class OutputClickHouseMode(str, Enum):
261
+ class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
251
262
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
252
263
 
253
264
  ERROR = "error"
@@ -402,16 +413,24 @@ class OutputClickHouse(BaseModel):
402
413
  r"""Tags for filtering and grouping in @{product}"""
403
414
 
404
415
  auth_type: Annotated[
405
- Optional[OutputClickHouseAuthenticationType], pydantic.Field(alias="authType")
416
+ Annotated[
417
+ Optional[OutputClickHouseAuthenticationType],
418
+ PlainValidator(validate_open_enum(False)),
419
+ ],
420
+ pydantic.Field(alias="authType"),
406
421
  ] = OutputClickHouseAuthenticationType.NONE
407
422
 
408
423
  format_: Annotated[
409
- Optional[OutputClickHouseFormat], pydantic.Field(alias="format")
424
+ Annotated[
425
+ Optional[OutputClickHouseFormat], PlainValidator(validate_open_enum(False))
426
+ ],
427
+ pydantic.Field(alias="format"),
410
428
  ] = OutputClickHouseFormat.JSON_COMPACT_EACH_ROW_WITH_NAMES
411
429
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
412
430
 
413
431
  mapping_type: Annotated[
414
- Optional[MappingType], pydantic.Field(alias="mappingType")
432
+ Annotated[Optional[MappingType], PlainValidator(validate_open_enum(False))],
433
+ pydantic.Field(alias="mappingType"),
415
434
  ] = MappingType.AUTOMATIC
416
435
  r"""How event fields are mapped to ClickHouse columns."""
417
436
 
@@ -466,7 +485,10 @@ class OutputClickHouse(BaseModel):
466
485
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
467
486
 
468
487
  failed_request_logging_mode: Annotated[
469
- Optional[OutputClickHouseFailedRequestLoggingMode],
488
+ Annotated[
489
+ Optional[OutputClickHouseFailedRequestLoggingMode],
490
+ PlainValidator(validate_open_enum(False)),
491
+ ],
470
492
  pydantic.Field(alias="failedRequestLoggingMode"),
471
493
  ] = OutputClickHouseFailedRequestLoggingMode.NONE
472
494
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -489,7 +511,7 @@ class OutputClickHouse(BaseModel):
489
511
 
490
512
  response_honor_retry_after_header: Annotated[
491
513
  Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
492
- ] = False
514
+ ] = True
493
515
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
494
516
 
495
517
  dump_format_errors_to_disk: Annotated[
@@ -498,7 +520,10 @@ class OutputClickHouse(BaseModel):
498
520
  r"""Log the most recent event that fails to match the table schema"""
499
521
 
500
522
  on_backpressure: Annotated[
501
- Optional[OutputClickHouseBackpressureBehavior],
523
+ Annotated[
524
+ Optional[OutputClickHouseBackpressureBehavior],
525
+ PlainValidator(validate_open_enum(False)),
526
+ ],
502
527
  pydantic.Field(alias="onBackpressure"),
503
528
  ] = OutputClickHouseBackpressureBehavior.BLOCK
504
529
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -593,18 +618,28 @@ class OutputClickHouse(BaseModel):
593
618
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
594
619
 
595
620
  pq_compress: Annotated[
596
- Optional[OutputClickHouseCompression], pydantic.Field(alias="pqCompress")
621
+ Annotated[
622
+ Optional[OutputClickHouseCompression],
623
+ PlainValidator(validate_open_enum(False)),
624
+ ],
625
+ pydantic.Field(alias="pqCompress"),
597
626
  ] = OutputClickHouseCompression.NONE
598
627
  r"""Codec to use to compress the persisted data"""
599
628
 
600
629
  pq_on_backpressure: Annotated[
601
- Optional[OutputClickHouseQueueFullBehavior],
630
+ Annotated[
631
+ Optional[OutputClickHouseQueueFullBehavior],
632
+ PlainValidator(validate_open_enum(False)),
633
+ ],
602
634
  pydantic.Field(alias="pqOnBackpressure"),
603
635
  ] = OutputClickHouseQueueFullBehavior.BLOCK
604
636
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
605
637
 
606
638
  pq_mode: Annotated[
607
- Optional[OutputClickHouseMode], pydantic.Field(alias="pqMode")
639
+ Annotated[
640
+ Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
641
+ ],
642
+ pydantic.Field(alias="pqMode"),
608
643
  ] = OutputClickHouseMode.ERROR
609
644
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
610
645
 
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputCloudwatchType(str, Enum):
12
15
  CLOUDWATCH = "cloudwatch"
13
16
 
14
17
 
15
- class OutputCloudwatchAuthenticationMethod(str, Enum):
18
+ class OutputCloudwatchAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
17
20
 
18
21
  AUTO = "auto"
@@ -20,7 +23,7 @@ class OutputCloudwatchAuthenticationMethod(str, Enum):
20
23
  SECRET = "secret"
21
24
 
22
25
 
23
- class OutputCloudwatchBackpressureBehavior(str, Enum):
26
+ class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""How to handle events when all receivers are exerting backpressure"""
25
28
 
26
29
  BLOCK = "block"
@@ -28,21 +31,21 @@ class OutputCloudwatchBackpressureBehavior(str, Enum):
28
31
  QUEUE = "queue"
29
32
 
30
33
 
31
- class OutputCloudwatchCompression(str, Enum):
34
+ class OutputCloudwatchCompression(str, Enum, metaclass=utils.OpenEnumMeta):
32
35
  r"""Codec to use to compress the persisted data"""
33
36
 
34
37
  NONE = "none"
35
38
  GZIP = "gzip"
36
39
 
37
40
 
38
- class OutputCloudwatchQueueFullBehavior(str, Enum):
41
+ class OutputCloudwatchQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
39
42
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
40
43
 
41
44
  BLOCK = "block"
42
45
  DROP = "drop"
43
46
 
44
47
 
45
- class OutputCloudwatchMode(str, Enum):
48
+ class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
46
49
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
47
50
 
48
51
  ERROR = "error"
@@ -150,7 +153,10 @@ class OutputCloudwatch(BaseModel):
150
153
  r"""Tags for filtering and grouping in @{product}"""
151
154
 
152
155
  aws_authentication_method: Annotated[
153
- Optional[OutputCloudwatchAuthenticationMethod],
156
+ Annotated[
157
+ Optional[OutputCloudwatchAuthenticationMethod],
158
+ PlainValidator(validate_open_enum(False)),
159
+ ],
154
160
  pydantic.Field(alias="awsAuthenticationMethod"),
155
161
  ] = OutputCloudwatchAuthenticationMethod.AUTO
156
162
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -206,7 +212,10 @@ class OutputCloudwatch(BaseModel):
206
212
  r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
207
213
 
208
214
  on_backpressure: Annotated[
209
- Optional[OutputCloudwatchBackpressureBehavior],
215
+ Annotated[
216
+ Optional[OutputCloudwatchBackpressureBehavior],
217
+ PlainValidator(validate_open_enum(False)),
218
+ ],
210
219
  pydantic.Field(alias="onBackpressure"),
211
220
  ] = OutputCloudwatchBackpressureBehavior.BLOCK
212
221
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -232,18 +241,28 @@ class OutputCloudwatch(BaseModel):
232
241
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
233
242
 
234
243
  pq_compress: Annotated[
235
- Optional[OutputCloudwatchCompression], pydantic.Field(alias="pqCompress")
244
+ Annotated[
245
+ Optional[OutputCloudwatchCompression],
246
+ PlainValidator(validate_open_enum(False)),
247
+ ],
248
+ pydantic.Field(alias="pqCompress"),
236
249
  ] = OutputCloudwatchCompression.NONE
237
250
  r"""Codec to use to compress the persisted data"""
238
251
 
239
252
  pq_on_backpressure: Annotated[
240
- Optional[OutputCloudwatchQueueFullBehavior],
253
+ Annotated[
254
+ Optional[OutputCloudwatchQueueFullBehavior],
255
+ PlainValidator(validate_open_enum(False)),
256
+ ],
241
257
  pydantic.Field(alias="pqOnBackpressure"),
242
258
  ] = OutputCloudwatchQueueFullBehavior.BLOCK
243
259
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
244
260
 
245
261
  pq_mode: Annotated[
246
- Optional[OutputCloudwatchMode], pydantic.Field(alias="pqMode")
262
+ Annotated[
263
+ Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
264
+ ],
265
+ pydantic.Field(alias="pqMode"),
247
266
  ] = OutputCloudwatchMode.ERROR
248
267
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
249
268
 
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,14 +15,14 @@ class OutputConfluentCloudType(str, Enum):
12
15
  CONFLUENT_CLOUD = "confluent_cloud"
13
16
 
14
17
 
15
- class OutputConfluentCloudMinimumTLSVersion(str, Enum):
18
+ class OutputConfluentCloudMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  TL_SV1 = "TLSv1"
17
20
  TL_SV1_1 = "TLSv1.1"
18
21
  TL_SV1_2 = "TLSv1.2"
19
22
  TL_SV1_3 = "TLSv1.3"
20
23
 
21
24
 
22
- class OutputConfluentCloudMaximumTLSVersion(str, Enum):
25
+ class OutputConfluentCloudMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
23
26
  TL_SV1 = "TLSv1"
24
27
  TL_SV1_1 = "TLSv1.1"
25
28
  TL_SV1_2 = "TLSv1.2"
@@ -79,17 +82,23 @@ class OutputConfluentCloudTLSSettingsClientSide(BaseModel):
79
82
  r"""Passphrase to use to decrypt private key"""
80
83
 
81
84
  min_version: Annotated[
82
- Optional[OutputConfluentCloudMinimumTLSVersion],
85
+ Annotated[
86
+ Optional[OutputConfluentCloudMinimumTLSVersion],
87
+ PlainValidator(validate_open_enum(False)),
88
+ ],
83
89
  pydantic.Field(alias="minVersion"),
84
90
  ] = None
85
91
 
86
92
  max_version: Annotated[
87
- Optional[OutputConfluentCloudMaximumTLSVersion],
93
+ Annotated[
94
+ Optional[OutputConfluentCloudMaximumTLSVersion],
95
+ PlainValidator(validate_open_enum(False)),
96
+ ],
88
97
  pydantic.Field(alias="maxVersion"),
89
98
  ] = None
90
99
 
91
100
 
92
- class OutputConfluentCloudAcknowledgments(int, Enum):
101
+ class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
93
102
  r"""Control the number of required acknowledgments."""
94
103
 
95
104
  ONE = 1
@@ -97,7 +106,7 @@ class OutputConfluentCloudAcknowledgments(int, Enum):
97
106
  MINUS_1 = -1
98
107
 
99
108
 
100
- class OutputConfluentCloudRecordDataFormat(str, Enum):
109
+ class OutputConfluentCloudRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
101
110
  r"""Format to use to serialize events before writing to Kafka."""
102
111
 
103
112
  JSON = "json"
@@ -105,7 +114,7 @@ class OutputConfluentCloudRecordDataFormat(str, Enum):
105
114
  PROTOBUF = "protobuf"
106
115
 
107
116
 
108
- class OutputConfluentCloudCompression(str, Enum):
117
+ class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
109
118
  r"""Codec to use to compress the data before sending to Kafka"""
110
119
 
111
120
  NONE = "none"
@@ -114,7 +123,7 @@ class OutputConfluentCloudCompression(str, Enum):
114
123
  LZ4 = "lz4"
115
124
 
116
125
 
117
- class OutputConfluentCloudSchemaType(str, Enum):
126
+ class OutputConfluentCloudSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
118
127
  r"""The schema format used to encode and decode event data"""
119
128
 
120
129
  AVRO = "avro"
@@ -140,14 +149,18 @@ class OutputConfluentCloudAuth(BaseModel):
140
149
  r"""Select or create a secret that references your credentials"""
141
150
 
142
151
 
143
- class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
152
+ class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(
153
+ str, Enum, metaclass=utils.OpenEnumMeta
154
+ ):
144
155
  TL_SV1 = "TLSv1"
145
156
  TL_SV1_1 = "TLSv1.1"
146
157
  TL_SV1_2 = "TLSv1.2"
147
158
  TL_SV1_3 = "TLSv1.3"
148
159
 
149
160
 
150
- class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
161
+ class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(
162
+ str, Enum, metaclass=utils.OpenEnumMeta
163
+ ):
151
164
  TL_SV1 = "TLSv1"
152
165
  TL_SV1_1 = "TLSv1.1"
153
166
  TL_SV1_2 = "TLSv1.2"
@@ -207,12 +220,18 @@ class OutputConfluentCloudKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
207
220
  r"""Passphrase to use to decrypt private key"""
208
221
 
209
222
  min_version: Annotated[
210
- Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
223
+ Annotated[
224
+ Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
225
+ PlainValidator(validate_open_enum(False)),
226
+ ],
211
227
  pydantic.Field(alias="minVersion"),
212
228
  ] = None
213
229
 
214
230
  max_version: Annotated[
215
- Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
231
+ Annotated[
232
+ Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
233
+ PlainValidator(validate_open_enum(False)),
234
+ ],
216
235
  pydantic.Field(alias="maxVersion"),
217
236
  ] = None
218
237
 
@@ -249,7 +268,11 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
249
268
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
250
269
 
251
270
  schema_type: Annotated[
252
- Optional[OutputConfluentCloudSchemaType], pydantic.Field(alias="schemaType")
271
+ Annotated[
272
+ Optional[OutputConfluentCloudSchemaType],
273
+ PlainValidator(validate_open_enum(False)),
274
+ ],
275
+ pydantic.Field(alias="schemaType"),
253
276
  ] = OutputConfluentCloudSchemaType.AVRO
254
277
  r"""The schema format used to encode and decode event data"""
255
278
 
@@ -282,7 +305,7 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
282
305
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
283
306
 
284
307
 
285
- class OutputConfluentCloudSASLMechanism(str, Enum):
308
+ class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
286
309
  PLAIN = "plain"
287
310
  SCRAM_SHA_256 = "scram-sha-256"
288
311
  SCRAM_SHA_512 = "scram-sha-512"
@@ -303,9 +326,10 @@ class OutputConfluentCloudAuthentication(BaseModel):
303
326
 
304
327
  disabled: Optional[bool] = True
305
328
 
306
- mechanism: Optional[OutputConfluentCloudSASLMechanism] = (
307
- OutputConfluentCloudSASLMechanism.PLAIN
308
- )
329
+ mechanism: Annotated[
330
+ Optional[OutputConfluentCloudSASLMechanism],
331
+ PlainValidator(validate_open_enum(False)),
332
+ ] = OutputConfluentCloudSASLMechanism.PLAIN
309
333
 
310
334
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
311
335
  False
@@ -313,7 +337,7 @@ class OutputConfluentCloudAuthentication(BaseModel):
313
337
  r"""Enable OAuth authentication"""
314
338
 
315
339
 
316
- class OutputConfluentCloudBackpressureBehavior(str, Enum):
340
+ class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
317
341
  r"""How to handle events when all receivers are exerting backpressure"""
318
342
 
319
343
  BLOCK = "block"
@@ -321,21 +345,23 @@ class OutputConfluentCloudBackpressureBehavior(str, Enum):
321
345
  QUEUE = "queue"
322
346
 
323
347
 
324
- class OutputConfluentCloudPqCompressCompression(str, Enum):
348
+ class OutputConfluentCloudPqCompressCompression(
349
+ str, Enum, metaclass=utils.OpenEnumMeta
350
+ ):
325
351
  r"""Codec to use to compress the persisted data"""
326
352
 
327
353
  NONE = "none"
328
354
  GZIP = "gzip"
329
355
 
330
356
 
331
- class OutputConfluentCloudQueueFullBehavior(str, Enum):
357
+ class OutputConfluentCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
332
358
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
333
359
 
334
360
  BLOCK = "block"
335
361
  DROP = "drop"
336
362
 
337
363
 
338
- class OutputConfluentCloudMode(str, Enum):
364
+ class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
339
365
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
340
366
 
341
367
  ERROR = "error"
@@ -449,19 +475,25 @@ class OutputConfluentCloud(BaseModel):
449
475
 
450
476
  tls: Optional[OutputConfluentCloudTLSSettingsClientSide] = None
451
477
 
452
- ack: Optional[OutputConfluentCloudAcknowledgments] = (
453
- OutputConfluentCloudAcknowledgments.ONE
454
- )
478
+ ack: Annotated[
479
+ Optional[OutputConfluentCloudAcknowledgments],
480
+ PlainValidator(validate_open_enum(True)),
481
+ ] = OutputConfluentCloudAcknowledgments.ONE
455
482
  r"""Control the number of required acknowledgments."""
456
483
 
457
484
  format_: Annotated[
458
- Optional[OutputConfluentCloudRecordDataFormat], pydantic.Field(alias="format")
485
+ Annotated[
486
+ Optional[OutputConfluentCloudRecordDataFormat],
487
+ PlainValidator(validate_open_enum(False)),
488
+ ],
489
+ pydantic.Field(alias="format"),
459
490
  ] = OutputConfluentCloudRecordDataFormat.JSON
460
491
  r"""Format to use to serialize events before writing to Kafka."""
461
492
 
462
- compression: Optional[OutputConfluentCloudCompression] = (
463
- OutputConfluentCloudCompression.GZIP
464
- )
493
+ compression: Annotated[
494
+ Optional[OutputConfluentCloudCompression],
495
+ PlainValidator(validate_open_enum(False)),
496
+ ] = OutputConfluentCloudCompression.GZIP
465
497
  r"""Codec to use to compress the data before sending to Kafka"""
466
498
 
467
499
  max_record_size_kb: Annotated[
@@ -522,7 +554,10 @@ class OutputConfluentCloud(BaseModel):
522
554
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
523
555
 
524
556
  on_backpressure: Annotated[
525
- Optional[OutputConfluentCloudBackpressureBehavior],
557
+ Annotated[
558
+ Optional[OutputConfluentCloudBackpressureBehavior],
559
+ PlainValidator(validate_open_enum(False)),
560
+ ],
526
561
  pydantic.Field(alias="onBackpressure"),
527
562
  ] = OutputConfluentCloudBackpressureBehavior.BLOCK
528
563
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -548,19 +583,29 @@ class OutputConfluentCloud(BaseModel):
548
583
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
549
584
 
550
585
  pq_compress: Annotated[
551
- Optional[OutputConfluentCloudPqCompressCompression],
586
+ Annotated[
587
+ Optional[OutputConfluentCloudPqCompressCompression],
588
+ PlainValidator(validate_open_enum(False)),
589
+ ],
552
590
  pydantic.Field(alias="pqCompress"),
553
591
  ] = OutputConfluentCloudPqCompressCompression.NONE
554
592
  r"""Codec to use to compress the persisted data"""
555
593
 
556
594
  pq_on_backpressure: Annotated[
557
- Optional[OutputConfluentCloudQueueFullBehavior],
595
+ Annotated[
596
+ Optional[OutputConfluentCloudQueueFullBehavior],
597
+ PlainValidator(validate_open_enum(False)),
598
+ ],
558
599
  pydantic.Field(alias="pqOnBackpressure"),
559
600
  ] = OutputConfluentCloudQueueFullBehavior.BLOCK
560
601
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
561
602
 
562
603
  pq_mode: Annotated[
563
- Optional[OutputConfluentCloudMode], pydantic.Field(alias="pqMode")
604
+ Annotated[
605
+ Optional[OutputConfluentCloudMode],
606
+ PlainValidator(validate_open_enum(False)),
607
+ ],
608
+ pydantic.Field(alias="pqMode"),
564
609
  ] = OutputConfluentCloudMode.ERROR
565
610
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
566
611