cribl-control-plane 0.0.48a1__py3-none-any.whl → 0.0.50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (165) hide show
  1. cribl_control_plane/_version.py +6 -4
  2. cribl_control_plane/errors/healthstatus_error.py +2 -8
  3. cribl_control_plane/httpclient.py +0 -1
  4. cribl_control_plane/models/__init__.py +12 -12
  5. cribl_control_plane/models/appmode.py +13 -0
  6. cribl_control_plane/models/cacheconnection.py +2 -10
  7. cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
  8. cribl_control_plane/models/cloudprovider.py +1 -2
  9. cribl_control_plane/models/configgroup.py +2 -7
  10. cribl_control_plane/models/configgroupcloud.py +2 -6
  11. cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
  12. cribl_control_plane/models/cribllakedataset.py +2 -8
  13. cribl_control_plane/models/datasetmetadata.py +2 -8
  14. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
  15. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
  16. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
  17. cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
  18. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
  19. cribl_control_plane/models/getsummaryop.py +2 -7
  20. cribl_control_plane/models/hbcriblinfo.py +3 -19
  21. cribl_control_plane/models/healthstatus.py +4 -7
  22. cribl_control_plane/models/heartbeatmetadata.py +0 -3
  23. cribl_control_plane/models/inputappscope.py +14 -34
  24. cribl_control_plane/models/inputazureblob.py +6 -17
  25. cribl_control_plane/models/inputcollection.py +4 -11
  26. cribl_control_plane/models/inputconfluentcloud.py +20 -47
  27. cribl_control_plane/models/inputcribl.py +4 -11
  28. cribl_control_plane/models/inputcriblhttp.py +8 -23
  29. cribl_control_plane/models/inputcribllakehttp.py +10 -22
  30. cribl_control_plane/models/inputcriblmetrics.py +4 -12
  31. cribl_control_plane/models/inputcribltcp.py +8 -23
  32. cribl_control_plane/models/inputcrowdstrike.py +10 -26
  33. cribl_control_plane/models/inputdatadogagent.py +8 -24
  34. cribl_control_plane/models/inputdatagen.py +4 -11
  35. cribl_control_plane/models/inputedgeprometheus.py +24 -58
  36. cribl_control_plane/models/inputelastic.py +14 -40
  37. cribl_control_plane/models/inputeventhub.py +6 -15
  38. cribl_control_plane/models/inputexec.py +6 -14
  39. cribl_control_plane/models/inputfile.py +6 -15
  40. cribl_control_plane/models/inputfirehose.py +8 -23
  41. cribl_control_plane/models/inputgooglepubsub.py +6 -19
  42. cribl_control_plane/models/inputgrafana.py +24 -67
  43. cribl_control_plane/models/inputhttp.py +8 -23
  44. cribl_control_plane/models/inputhttpraw.py +8 -23
  45. cribl_control_plane/models/inputjournalfiles.py +4 -12
  46. cribl_control_plane/models/inputkafka.py +16 -46
  47. cribl_control_plane/models/inputkinesis.py +14 -38
  48. cribl_control_plane/models/inputkubeevents.py +4 -11
  49. cribl_control_plane/models/inputkubelogs.py +8 -16
  50. cribl_control_plane/models/inputkubemetrics.py +8 -16
  51. cribl_control_plane/models/inputloki.py +10 -29
  52. cribl_control_plane/models/inputmetrics.py +8 -23
  53. cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
  54. cribl_control_plane/models/inputmsk.py +18 -53
  55. cribl_control_plane/models/inputnetflow.py +4 -11
  56. cribl_control_plane/models/inputoffice365mgmt.py +14 -33
  57. cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
  58. cribl_control_plane/models/inputoffice365service.py +16 -35
  59. cribl_control_plane/models/inputopentelemetry.py +16 -38
  60. cribl_control_plane/models/inputprometheus.py +18 -50
  61. cribl_control_plane/models/inputprometheusrw.py +10 -30
  62. cribl_control_plane/models/inputrawudp.py +4 -11
  63. cribl_control_plane/models/inputs3.py +8 -21
  64. cribl_control_plane/models/inputs3inventory.py +10 -26
  65. cribl_control_plane/models/inputsecuritylake.py +10 -27
  66. cribl_control_plane/models/inputsnmp.py +6 -16
  67. cribl_control_plane/models/inputsplunk.py +12 -33
  68. cribl_control_plane/models/inputsplunkhec.py +10 -29
  69. cribl_control_plane/models/inputsplunksearch.py +14 -33
  70. cribl_control_plane/models/inputsqs.py +10 -27
  71. cribl_control_plane/models/inputsyslog.py +16 -43
  72. cribl_control_plane/models/inputsystemmetrics.py +24 -48
  73. cribl_control_plane/models/inputsystemstate.py +8 -16
  74. cribl_control_plane/models/inputtcp.py +10 -29
  75. cribl_control_plane/models/inputtcpjson.py +10 -29
  76. cribl_control_plane/models/inputwef.py +14 -37
  77. cribl_control_plane/models/inputwindowsmetrics.py +24 -44
  78. cribl_control_plane/models/inputwineventlogs.py +10 -20
  79. cribl_control_plane/models/inputwiz.py +8 -21
  80. cribl_control_plane/models/inputwizwebhook.py +8 -23
  81. cribl_control_plane/models/inputzscalerhec.py +10 -29
  82. cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
  83. cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
  84. cribl_control_plane/models/masterworkerentry.py +2 -7
  85. cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
  86. cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
  87. cribl_control_plane/models/nodeprovidedinfo.py +0 -3
  88. cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
  89. cribl_control_plane/models/nodeupgradestate.py +1 -2
  90. cribl_control_plane/models/nodeupgradestatus.py +5 -13
  91. cribl_control_plane/models/outputazureblob.py +18 -48
  92. cribl_control_plane/models/outputazuredataexplorer.py +28 -73
  93. cribl_control_plane/models/outputazureeventhub.py +18 -40
  94. cribl_control_plane/models/outputazurelogs.py +12 -35
  95. cribl_control_plane/models/outputclickhouse.py +20 -55
  96. cribl_control_plane/models/outputcloudwatch.py +10 -29
  97. cribl_control_plane/models/outputconfluentcloud.py +32 -77
  98. cribl_control_plane/models/outputcriblhttp.py +16 -44
  99. cribl_control_plane/models/outputcribllake.py +16 -46
  100. cribl_control_plane/models/outputcribltcp.py +18 -45
  101. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
  102. cribl_control_plane/models/outputdatadog.py +20 -48
  103. cribl_control_plane/models/outputdataset.py +18 -46
  104. cribl_control_plane/models/outputdiskspool.py +2 -7
  105. cribl_control_plane/models/outputdls3.py +24 -68
  106. cribl_control_plane/models/outputdynatracehttp.py +20 -53
  107. cribl_control_plane/models/outputdynatraceotlp.py +22 -55
  108. cribl_control_plane/models/outputelastic.py +18 -43
  109. cribl_control_plane/models/outputelasticcloud.py +12 -36
  110. cribl_control_plane/models/outputexabeam.py +10 -29
  111. cribl_control_plane/models/outputfilesystem.py +14 -39
  112. cribl_control_plane/models/outputgooglechronicle.py +16 -50
  113. cribl_control_plane/models/outputgooglecloudlogging.py +14 -41
  114. cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
  115. cribl_control_plane/models/outputgooglepubsub.py +10 -31
  116. cribl_control_plane/models/outputgrafanacloud.py +32 -97
  117. cribl_control_plane/models/outputgraphite.py +14 -31
  118. cribl_control_plane/models/outputhoneycomb.py +12 -35
  119. cribl_control_plane/models/outputhumiohec.py +16 -43
  120. cribl_control_plane/models/outputinfluxdb.py +16 -42
  121. cribl_control_plane/models/outputkafka.py +28 -74
  122. cribl_control_plane/models/outputkinesis.py +16 -40
  123. cribl_control_plane/models/outputloki.py +16 -41
  124. cribl_control_plane/models/outputminio.py +24 -65
  125. cribl_control_plane/models/outputmsk.py +30 -82
  126. cribl_control_plane/models/outputnewrelic.py +18 -43
  127. cribl_control_plane/models/outputnewrelicevents.py +14 -41
  128. cribl_control_plane/models/outputopentelemetry.py +26 -67
  129. cribl_control_plane/models/outputprometheus.py +12 -35
  130. cribl_control_plane/models/outputring.py +8 -19
  131. cribl_control_plane/models/outputs3.py +26 -68
  132. cribl_control_plane/models/outputsecuritylake.py +18 -52
  133. cribl_control_plane/models/outputsentinel.py +18 -45
  134. cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
  135. cribl_control_plane/models/outputservicenow.py +24 -60
  136. cribl_control_plane/models/outputsignalfx.py +14 -37
  137. cribl_control_plane/models/outputsns.py +14 -36
  138. cribl_control_plane/models/outputsplunk.py +24 -60
  139. cribl_control_plane/models/outputsplunkhec.py +12 -35
  140. cribl_control_plane/models/outputsplunklb.py +30 -77
  141. cribl_control_plane/models/outputsqs.py +16 -41
  142. cribl_control_plane/models/outputstatsd.py +14 -30
  143. cribl_control_plane/models/outputstatsdext.py +12 -29
  144. cribl_control_plane/models/outputsumologic.py +12 -35
  145. cribl_control_plane/models/outputsyslog.py +24 -58
  146. cribl_control_plane/models/outputtcpjson.py +20 -52
  147. cribl_control_plane/models/outputwavefront.py +12 -35
  148. cribl_control_plane/models/outputwebhook.py +22 -58
  149. cribl_control_plane/models/outputxsiam.py +14 -35
  150. cribl_control_plane/models/productscore.py +1 -2
  151. cribl_control_plane/models/rbacresource.py +1 -2
  152. cribl_control_plane/models/resourcepolicy.py +2 -4
  153. cribl_control_plane/models/routecloneconf.py +13 -0
  154. cribl_control_plane/models/routeconf.py +4 -3
  155. cribl_control_plane/models/runnablejobcollection.py +13 -30
  156. cribl_control_plane/models/runnablejobexecutor.py +4 -13
  157. cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
  158. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
  159. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
  160. cribl_control_plane/models/workertypes.py +1 -2
  161. cribl_control_plane/sdk.py +2 -2
  162. cribl_control_plane/utils/annotations.py +32 -8
  163. {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.50.dist-info}/METADATA +2 -1
  164. {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.50.dist-info}/RECORD +165 -163
  165. {cribl_control_plane-0.0.48a1.dist-info → cribl_control_plane-0.0.50.dist-info}/WHEEL +0 -0
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputClickHouseType(str, Enum):
15
12
  CLICK_HOUSE = "click_house"
16
13
 
17
14
 
18
- class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputClickHouseAuthenticationType(str, Enum):
19
16
  NONE = "none"
20
17
  BASIC = "basic"
21
18
  CREDENTIALS_SECRET = "credentialsSecret"
@@ -25,28 +22,28 @@ class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta
25
22
  OAUTH = "oauth"
26
23
 
27
24
 
28
- class OutputClickHouseFormat(str, Enum, metaclass=utils.OpenEnumMeta):
25
+ class OutputClickHouseFormat(str, Enum):
29
26
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
30
27
 
31
28
  JSON_COMPACT_EACH_ROW_WITH_NAMES = "json-compact-each-row-with-names"
32
29
  JSON_EACH_ROW = "json-each-row"
33
30
 
34
31
 
35
- class MappingType(str, Enum, metaclass=utils.OpenEnumMeta):
32
+ class MappingType(str, Enum):
36
33
  r"""How event fields are mapped to ClickHouse columns."""
37
34
 
38
35
  AUTOMATIC = "automatic"
39
36
  CUSTOM = "custom"
40
37
 
41
38
 
42
- class OutputClickHouseMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
39
+ class OutputClickHouseMinimumTLSVersion(str, Enum):
43
40
  TL_SV1 = "TLSv1"
44
41
  TL_SV1_1 = "TLSv1.1"
45
42
  TL_SV1_2 = "TLSv1.2"
46
43
  TL_SV1_3 = "TLSv1.3"
47
44
 
48
45
 
49
- class OutputClickHouseMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
46
+ class OutputClickHouseMaximumTLSVersion(str, Enum):
50
47
  TL_SV1 = "TLSv1"
51
48
  TL_SV1_1 = "TLSv1.1"
52
49
  TL_SV1_2 = "TLSv1.2"
@@ -95,19 +92,11 @@ class OutputClickHouseTLSSettingsClientSide(BaseModel):
95
92
  r"""Passphrase to use to decrypt private key"""
96
93
 
97
94
  min_version: Annotated[
98
- Annotated[
99
- Optional[OutputClickHouseMinimumTLSVersion],
100
- PlainValidator(validate_open_enum(False)),
101
- ],
102
- pydantic.Field(alias="minVersion"),
95
+ Optional[OutputClickHouseMinimumTLSVersion], pydantic.Field(alias="minVersion")
103
96
  ] = None
104
97
 
105
98
  max_version: Annotated[
106
- Annotated[
107
- Optional[OutputClickHouseMaximumTLSVersion],
108
- PlainValidator(validate_open_enum(False)),
109
- ],
110
- pydantic.Field(alias="maxVersion"),
99
+ Optional[OutputClickHouseMaximumTLSVersion], pydantic.Field(alias="maxVersion")
111
100
  ] = None
112
101
 
113
102
 
@@ -122,7 +111,7 @@ class OutputClickHouseExtraHTTPHeader(BaseModel):
122
111
  name: Optional[str] = None
123
112
 
124
113
 
125
- class OutputClickHouseFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
114
+ class OutputClickHouseFailedRequestLoggingMode(str, Enum):
126
115
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
127
116
 
128
117
  PAYLOAD = "payload"
@@ -184,7 +173,7 @@ class OutputClickHouseTimeoutRetrySettings(BaseModel):
184
173
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
185
174
 
186
175
 
187
- class OutputClickHouseBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
176
+ class OutputClickHouseBackpressureBehavior(str, Enum):
188
177
  r"""How to handle events when all receivers are exerting backpressure"""
189
178
 
190
179
  BLOCK = "block"
@@ -244,21 +233,21 @@ class ColumnMapping(BaseModel):
244
233
  r"""Type of the column in the ClickHouse database"""
245
234
 
246
235
 
247
- class OutputClickHouseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
236
+ class OutputClickHouseCompression(str, Enum):
248
237
  r"""Codec to use to compress the persisted data"""
249
238
 
250
239
  NONE = "none"
251
240
  GZIP = "gzip"
252
241
 
253
242
 
254
- class OutputClickHouseQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
243
+ class OutputClickHouseQueueFullBehavior(str, Enum):
255
244
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
256
245
 
257
246
  BLOCK = "block"
258
247
  DROP = "drop"
259
248
 
260
249
 
261
- class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
250
+ class OutputClickHouseMode(str, Enum):
262
251
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
263
252
 
264
253
  ERROR = "error"
@@ -413,24 +402,16 @@ class OutputClickHouse(BaseModel):
413
402
  r"""Tags for filtering and grouping in @{product}"""
414
403
 
415
404
  auth_type: Annotated[
416
- Annotated[
417
- Optional[OutputClickHouseAuthenticationType],
418
- PlainValidator(validate_open_enum(False)),
419
- ],
420
- pydantic.Field(alias="authType"),
405
+ Optional[OutputClickHouseAuthenticationType], pydantic.Field(alias="authType")
421
406
  ] = OutputClickHouseAuthenticationType.NONE
422
407
 
423
408
  format_: Annotated[
424
- Annotated[
425
- Optional[OutputClickHouseFormat], PlainValidator(validate_open_enum(False))
426
- ],
427
- pydantic.Field(alias="format"),
409
+ Optional[OutputClickHouseFormat], pydantic.Field(alias="format")
428
410
  ] = OutputClickHouseFormat.JSON_COMPACT_EACH_ROW_WITH_NAMES
429
411
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
430
412
 
431
413
  mapping_type: Annotated[
432
- Annotated[Optional[MappingType], PlainValidator(validate_open_enum(False))],
433
- pydantic.Field(alias="mappingType"),
414
+ Optional[MappingType], pydantic.Field(alias="mappingType")
434
415
  ] = MappingType.AUTOMATIC
435
416
  r"""How event fields are mapped to ClickHouse columns."""
436
417
 
@@ -485,10 +466,7 @@ class OutputClickHouse(BaseModel):
485
466
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
486
467
 
487
468
  failed_request_logging_mode: Annotated[
488
- Annotated[
489
- Optional[OutputClickHouseFailedRequestLoggingMode],
490
- PlainValidator(validate_open_enum(False)),
491
- ],
469
+ Optional[OutputClickHouseFailedRequestLoggingMode],
492
470
  pydantic.Field(alias="failedRequestLoggingMode"),
493
471
  ] = OutputClickHouseFailedRequestLoggingMode.NONE
494
472
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -520,10 +498,7 @@ class OutputClickHouse(BaseModel):
520
498
  r"""Log the most recent event that fails to match the table schema"""
521
499
 
522
500
  on_backpressure: Annotated[
523
- Annotated[
524
- Optional[OutputClickHouseBackpressureBehavior],
525
- PlainValidator(validate_open_enum(False)),
526
- ],
501
+ Optional[OutputClickHouseBackpressureBehavior],
527
502
  pydantic.Field(alias="onBackpressure"),
528
503
  ] = OutputClickHouseBackpressureBehavior.BLOCK
529
504
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -618,28 +593,18 @@ class OutputClickHouse(BaseModel):
618
593
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
619
594
 
620
595
  pq_compress: Annotated[
621
- Annotated[
622
- Optional[OutputClickHouseCompression],
623
- PlainValidator(validate_open_enum(False)),
624
- ],
625
- pydantic.Field(alias="pqCompress"),
596
+ Optional[OutputClickHouseCompression], pydantic.Field(alias="pqCompress")
626
597
  ] = OutputClickHouseCompression.NONE
627
598
  r"""Codec to use to compress the persisted data"""
628
599
 
629
600
  pq_on_backpressure: Annotated[
630
- Annotated[
631
- Optional[OutputClickHouseQueueFullBehavior],
632
- PlainValidator(validate_open_enum(False)),
633
- ],
601
+ Optional[OutputClickHouseQueueFullBehavior],
634
602
  pydantic.Field(alias="pqOnBackpressure"),
635
603
  ] = OutputClickHouseQueueFullBehavior.BLOCK
636
604
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
637
605
 
638
606
  pq_mode: Annotated[
639
- Annotated[
640
- Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
641
- ],
642
- pydantic.Field(alias="pqMode"),
607
+ Optional[OutputClickHouseMode], pydantic.Field(alias="pqMode")
643
608
  ] = OutputClickHouseMode.ERROR
644
609
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
645
610
 
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputCloudwatchType(str, Enum):
15
12
  CLOUDWATCH = "cloudwatch"
16
13
 
17
14
 
18
- class OutputCloudwatchAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputCloudwatchAuthenticationMethod(str, Enum):
19
16
  r"""AWS authentication method. Choose Auto to use IAM roles."""
20
17
 
21
18
  AUTO = "auto"
@@ -23,7 +20,7 @@ class OutputCloudwatchAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMe
23
20
  SECRET = "secret"
24
21
 
25
22
 
26
- class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
23
+ class OutputCloudwatchBackpressureBehavior(str, Enum):
27
24
  r"""How to handle events when all receivers are exerting backpressure"""
28
25
 
29
26
  BLOCK = "block"
@@ -31,21 +28,21 @@ class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMe
31
28
  QUEUE = "queue"
32
29
 
33
30
 
34
- class OutputCloudwatchCompression(str, Enum, metaclass=utils.OpenEnumMeta):
31
+ class OutputCloudwatchCompression(str, Enum):
35
32
  r"""Codec to use to compress the persisted data"""
36
33
 
37
34
  NONE = "none"
38
35
  GZIP = "gzip"
39
36
 
40
37
 
41
- class OutputCloudwatchQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
38
+ class OutputCloudwatchQueueFullBehavior(str, Enum):
42
39
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
43
40
 
44
41
  BLOCK = "block"
45
42
  DROP = "drop"
46
43
 
47
44
 
48
- class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
45
+ class OutputCloudwatchMode(str, Enum):
49
46
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
50
47
 
51
48
  ERROR = "error"
@@ -153,10 +150,7 @@ class OutputCloudwatch(BaseModel):
153
150
  r"""Tags for filtering and grouping in @{product}"""
154
151
 
155
152
  aws_authentication_method: Annotated[
156
- Annotated[
157
- Optional[OutputCloudwatchAuthenticationMethod],
158
- PlainValidator(validate_open_enum(False)),
159
- ],
153
+ Optional[OutputCloudwatchAuthenticationMethod],
160
154
  pydantic.Field(alias="awsAuthenticationMethod"),
161
155
  ] = OutputCloudwatchAuthenticationMethod.AUTO
162
156
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -212,10 +206,7 @@ class OutputCloudwatch(BaseModel):
212
206
  r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
213
207
 
214
208
  on_backpressure: Annotated[
215
- Annotated[
216
- Optional[OutputCloudwatchBackpressureBehavior],
217
- PlainValidator(validate_open_enum(False)),
218
- ],
209
+ Optional[OutputCloudwatchBackpressureBehavior],
219
210
  pydantic.Field(alias="onBackpressure"),
220
211
  ] = OutputCloudwatchBackpressureBehavior.BLOCK
221
212
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -241,28 +232,18 @@ class OutputCloudwatch(BaseModel):
241
232
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
242
233
 
243
234
  pq_compress: Annotated[
244
- Annotated[
245
- Optional[OutputCloudwatchCompression],
246
- PlainValidator(validate_open_enum(False)),
247
- ],
248
- pydantic.Field(alias="pqCompress"),
235
+ Optional[OutputCloudwatchCompression], pydantic.Field(alias="pqCompress")
249
236
  ] = OutputCloudwatchCompression.NONE
250
237
  r"""Codec to use to compress the persisted data"""
251
238
 
252
239
  pq_on_backpressure: Annotated[
253
- Annotated[
254
- Optional[OutputCloudwatchQueueFullBehavior],
255
- PlainValidator(validate_open_enum(False)),
256
- ],
240
+ Optional[OutputCloudwatchQueueFullBehavior],
257
241
  pydantic.Field(alias="pqOnBackpressure"),
258
242
  ] = OutputCloudwatchQueueFullBehavior.BLOCK
259
243
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
260
244
 
261
245
  pq_mode: Annotated[
262
- Annotated[
263
- Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
264
- ],
265
- pydantic.Field(alias="pqMode"),
246
+ Optional[OutputCloudwatchMode], pydantic.Field(alias="pqMode")
266
247
  ] = OutputCloudwatchMode.ERROR
267
248
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
268
249
 
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,14 +12,14 @@ class OutputConfluentCloudType(str, Enum):
15
12
  CONFLUENT_CLOUD = "confluent_cloud"
16
13
 
17
14
 
18
- class OutputConfluentCloudMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputConfluentCloudMinimumTLSVersion(str, Enum):
19
16
  TL_SV1 = "TLSv1"
20
17
  TL_SV1_1 = "TLSv1.1"
21
18
  TL_SV1_2 = "TLSv1.2"
22
19
  TL_SV1_3 = "TLSv1.3"
23
20
 
24
21
 
25
- class OutputConfluentCloudMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
22
+ class OutputConfluentCloudMaximumTLSVersion(str, Enum):
26
23
  TL_SV1 = "TLSv1"
27
24
  TL_SV1_1 = "TLSv1.1"
28
25
  TL_SV1_2 = "TLSv1.2"
@@ -82,23 +79,17 @@ class OutputConfluentCloudTLSSettingsClientSide(BaseModel):
82
79
  r"""Passphrase to use to decrypt private key"""
83
80
 
84
81
  min_version: Annotated[
85
- Annotated[
86
- Optional[OutputConfluentCloudMinimumTLSVersion],
87
- PlainValidator(validate_open_enum(False)),
88
- ],
82
+ Optional[OutputConfluentCloudMinimumTLSVersion],
89
83
  pydantic.Field(alias="minVersion"),
90
84
  ] = None
91
85
 
92
86
  max_version: Annotated[
93
- Annotated[
94
- Optional[OutputConfluentCloudMaximumTLSVersion],
95
- PlainValidator(validate_open_enum(False)),
96
- ],
87
+ Optional[OutputConfluentCloudMaximumTLSVersion],
97
88
  pydantic.Field(alias="maxVersion"),
98
89
  ] = None
99
90
 
100
91
 
101
- class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
92
+ class OutputConfluentCloudAcknowledgments(int, Enum):
102
93
  r"""Control the number of required acknowledgments."""
103
94
 
104
95
  ONE = 1
@@ -106,7 +97,7 @@ class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMet
106
97
  MINUS_1 = -1
107
98
 
108
99
 
109
- class OutputConfluentCloudRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
100
+ class OutputConfluentCloudRecordDataFormat(str, Enum):
110
101
  r"""Format to use to serialize events before writing to Kafka."""
111
102
 
112
103
  JSON = "json"
@@ -114,7 +105,7 @@ class OutputConfluentCloudRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMe
114
105
  PROTOBUF = "protobuf"
115
106
 
116
107
 
117
- class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
108
+ class OutputConfluentCloudCompression(str, Enum):
118
109
  r"""Codec to use to compress the data before sending to Kafka"""
119
110
 
120
111
  NONE = "none"
@@ -123,7 +114,7 @@ class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
123
114
  LZ4 = "lz4"
124
115
 
125
116
 
126
- class OutputConfluentCloudSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
117
+ class OutputConfluentCloudSchemaType(str, Enum):
127
118
  r"""The schema format used to encode and decode event data"""
128
119
 
129
120
  AVRO = "avro"
@@ -149,18 +140,14 @@ class OutputConfluentCloudAuth(BaseModel):
149
140
  r"""Select or create a secret that references your credentials"""
150
141
 
151
142
 
152
- class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(
153
- str, Enum, metaclass=utils.OpenEnumMeta
154
- ):
143
+ class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
155
144
  TL_SV1 = "TLSv1"
156
145
  TL_SV1_1 = "TLSv1.1"
157
146
  TL_SV1_2 = "TLSv1.2"
158
147
  TL_SV1_3 = "TLSv1.3"
159
148
 
160
149
 
161
- class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(
162
- str, Enum, metaclass=utils.OpenEnumMeta
163
- ):
150
+ class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
164
151
  TL_SV1 = "TLSv1"
165
152
  TL_SV1_1 = "TLSv1.1"
166
153
  TL_SV1_2 = "TLSv1.2"
@@ -220,18 +207,12 @@ class OutputConfluentCloudKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
220
207
  r"""Passphrase to use to decrypt private key"""
221
208
 
222
209
  min_version: Annotated[
223
- Annotated[
224
- Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
225
- PlainValidator(validate_open_enum(False)),
226
- ],
210
+ Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
227
211
  pydantic.Field(alias="minVersion"),
228
212
  ] = None
229
213
 
230
214
  max_version: Annotated[
231
- Annotated[
232
- Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
233
- PlainValidator(validate_open_enum(False)),
234
- ],
215
+ Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
235
216
  pydantic.Field(alias="maxVersion"),
236
217
  ] = None
237
218
 
@@ -268,11 +249,7 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
268
249
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
269
250
 
270
251
  schema_type: Annotated[
271
- Annotated[
272
- Optional[OutputConfluentCloudSchemaType],
273
- PlainValidator(validate_open_enum(False)),
274
- ],
275
- pydantic.Field(alias="schemaType"),
252
+ Optional[OutputConfluentCloudSchemaType], pydantic.Field(alias="schemaType")
276
253
  ] = OutputConfluentCloudSchemaType.AVRO
277
254
  r"""The schema format used to encode and decode event data"""
278
255
 
@@ -305,7 +282,7 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
305
282
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
306
283
 
307
284
 
308
- class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
285
+ class OutputConfluentCloudSASLMechanism(str, Enum):
309
286
  PLAIN = "plain"
310
287
  SCRAM_SHA_256 = "scram-sha-256"
311
288
  SCRAM_SHA_512 = "scram-sha-512"
@@ -326,10 +303,9 @@ class OutputConfluentCloudAuthentication(BaseModel):
326
303
 
327
304
  disabled: Optional[bool] = True
328
305
 
329
- mechanism: Annotated[
330
- Optional[OutputConfluentCloudSASLMechanism],
331
- PlainValidator(validate_open_enum(False)),
332
- ] = OutputConfluentCloudSASLMechanism.PLAIN
306
+ mechanism: Optional[OutputConfluentCloudSASLMechanism] = (
307
+ OutputConfluentCloudSASLMechanism.PLAIN
308
+ )
333
309
 
334
310
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
335
311
  False
@@ -337,7 +313,7 @@ class OutputConfluentCloudAuthentication(BaseModel):
337
313
  r"""Enable OAuth authentication"""
338
314
 
339
315
 
340
- class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
316
+ class OutputConfluentCloudBackpressureBehavior(str, Enum):
341
317
  r"""How to handle events when all receivers are exerting backpressure"""
342
318
 
343
319
  BLOCK = "block"
@@ -345,23 +321,21 @@ class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEn
345
321
  QUEUE = "queue"
346
322
 
347
323
 
348
- class OutputConfluentCloudPqCompressCompression(
349
- str, Enum, metaclass=utils.OpenEnumMeta
350
- ):
324
+ class OutputConfluentCloudPqCompressCompression(str, Enum):
351
325
  r"""Codec to use to compress the persisted data"""
352
326
 
353
327
  NONE = "none"
354
328
  GZIP = "gzip"
355
329
 
356
330
 
357
- class OutputConfluentCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
331
+ class OutputConfluentCloudQueueFullBehavior(str, Enum):
358
332
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
359
333
 
360
334
  BLOCK = "block"
361
335
  DROP = "drop"
362
336
 
363
337
 
364
- class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
338
+ class OutputConfluentCloudMode(str, Enum):
365
339
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
366
340
 
367
341
  ERROR = "error"
@@ -475,25 +449,19 @@ class OutputConfluentCloud(BaseModel):
475
449
 
476
450
  tls: Optional[OutputConfluentCloudTLSSettingsClientSide] = None
477
451
 
478
- ack: Annotated[
479
- Optional[OutputConfluentCloudAcknowledgments],
480
- PlainValidator(validate_open_enum(True)),
481
- ] = OutputConfluentCloudAcknowledgments.ONE
452
+ ack: Optional[OutputConfluentCloudAcknowledgments] = (
453
+ OutputConfluentCloudAcknowledgments.ONE
454
+ )
482
455
  r"""Control the number of required acknowledgments."""
483
456
 
484
457
  format_: Annotated[
485
- Annotated[
486
- Optional[OutputConfluentCloudRecordDataFormat],
487
- PlainValidator(validate_open_enum(False)),
488
- ],
489
- pydantic.Field(alias="format"),
458
+ Optional[OutputConfluentCloudRecordDataFormat], pydantic.Field(alias="format")
490
459
  ] = OutputConfluentCloudRecordDataFormat.JSON
491
460
  r"""Format to use to serialize events before writing to Kafka."""
492
461
 
493
- compression: Annotated[
494
- Optional[OutputConfluentCloudCompression],
495
- PlainValidator(validate_open_enum(False)),
496
- ] = OutputConfluentCloudCompression.GZIP
462
+ compression: Optional[OutputConfluentCloudCompression] = (
463
+ OutputConfluentCloudCompression.GZIP
464
+ )
497
465
  r"""Codec to use to compress the data before sending to Kafka"""
498
466
 
499
467
  max_record_size_kb: Annotated[
@@ -554,10 +522,7 @@ class OutputConfluentCloud(BaseModel):
554
522
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
555
523
 
556
524
  on_backpressure: Annotated[
557
- Annotated[
558
- Optional[OutputConfluentCloudBackpressureBehavior],
559
- PlainValidator(validate_open_enum(False)),
560
- ],
525
+ Optional[OutputConfluentCloudBackpressureBehavior],
561
526
  pydantic.Field(alias="onBackpressure"),
562
527
  ] = OutputConfluentCloudBackpressureBehavior.BLOCK
563
528
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -583,29 +548,19 @@ class OutputConfluentCloud(BaseModel):
583
548
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
584
549
 
585
550
  pq_compress: Annotated[
586
- Annotated[
587
- Optional[OutputConfluentCloudPqCompressCompression],
588
- PlainValidator(validate_open_enum(False)),
589
- ],
551
+ Optional[OutputConfluentCloudPqCompressCompression],
590
552
  pydantic.Field(alias="pqCompress"),
591
553
  ] = OutputConfluentCloudPqCompressCompression.NONE
592
554
  r"""Codec to use to compress the persisted data"""
593
555
 
594
556
  pq_on_backpressure: Annotated[
595
- Annotated[
596
- Optional[OutputConfluentCloudQueueFullBehavior],
597
- PlainValidator(validate_open_enum(False)),
598
- ],
557
+ Optional[OutputConfluentCloudQueueFullBehavior],
599
558
  pydantic.Field(alias="pqOnBackpressure"),
600
559
  ] = OutputConfluentCloudQueueFullBehavior.BLOCK
601
560
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
602
561
 
603
562
  pq_mode: Annotated[
604
- Annotated[
605
- Optional[OutputConfluentCloudMode],
606
- PlainValidator(validate_open_enum(False)),
607
- ],
608
- pydantic.Field(alias="pqMode"),
563
+ Optional[OutputConfluentCloudMode], pydantic.Field(alias="pqMode")
609
564
  ] = OutputConfluentCloudMode.ERROR
610
565
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
611
566