cribl-control-plane 0.0.50rc2__py3-none-any.whl → 0.0.51__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (176) hide show
  1. cribl_control_plane/_version.py +5 -3
  2. cribl_control_plane/errors/healthstatus_error.py +2 -8
  3. cribl_control_plane/groups_sdk.py +4 -4
  4. cribl_control_plane/health.py +2 -6
  5. cribl_control_plane/models/__init__.py +31 -56
  6. cribl_control_plane/models/appmode.py +13 -0
  7. cribl_control_plane/models/cacheconnection.py +2 -10
  8. cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
  9. cribl_control_plane/models/cloudprovider.py +1 -2
  10. cribl_control_plane/models/configgroup.py +4 -24
  11. cribl_control_plane/models/configgroupcloud.py +2 -6
  12. cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
  13. cribl_control_plane/models/createinputhectokenbyidop.py +5 -6
  14. cribl_control_plane/models/createversionpushop.py +5 -5
  15. cribl_control_plane/models/cribllakedataset.py +2 -8
  16. cribl_control_plane/models/datasetmetadata.py +2 -8
  17. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
  18. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
  19. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
  20. cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
  21. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
  22. cribl_control_plane/models/getsummaryop.py +2 -7
  23. cribl_control_plane/models/getversionshowop.py +5 -6
  24. cribl_control_plane/models/gitinfo.py +3 -14
  25. cribl_control_plane/models/hbcriblinfo.py +3 -24
  26. cribl_control_plane/models/healthstatus.py +4 -7
  27. cribl_control_plane/models/heartbeatmetadata.py +0 -3
  28. cribl_control_plane/models/input.py +63 -65
  29. cribl_control_plane/models/inputappscope.py +14 -34
  30. cribl_control_plane/models/inputazureblob.py +6 -17
  31. cribl_control_plane/models/inputcollection.py +4 -11
  32. cribl_control_plane/models/inputconfluentcloud.py +32 -41
  33. cribl_control_plane/models/inputcribl.py +4 -11
  34. cribl_control_plane/models/inputcriblhttp.py +8 -23
  35. cribl_control_plane/models/inputcribllakehttp.py +10 -22
  36. cribl_control_plane/models/inputcriblmetrics.py +4 -12
  37. cribl_control_plane/models/inputcribltcp.py +8 -23
  38. cribl_control_plane/models/inputcrowdstrike.py +10 -26
  39. cribl_control_plane/models/inputdatadogagent.py +8 -24
  40. cribl_control_plane/models/inputdatagen.py +4 -11
  41. cribl_control_plane/models/inputedgeprometheus.py +24 -58
  42. cribl_control_plane/models/inputelastic.py +14 -40
  43. cribl_control_plane/models/inputeventhub.py +6 -15
  44. cribl_control_plane/models/inputexec.py +6 -14
  45. cribl_control_plane/models/inputfile.py +6 -15
  46. cribl_control_plane/models/inputfirehose.py +8 -23
  47. cribl_control_plane/models/inputgooglepubsub.py +6 -19
  48. cribl_control_plane/models/inputgrafana.py +24 -67
  49. cribl_control_plane/models/inputhttp.py +8 -23
  50. cribl_control_plane/models/inputhttpraw.py +8 -23
  51. cribl_control_plane/models/inputjournalfiles.py +4 -12
  52. cribl_control_plane/models/inputkafka.py +28 -41
  53. cribl_control_plane/models/inputkinesis.py +14 -38
  54. cribl_control_plane/models/inputkubeevents.py +4 -11
  55. cribl_control_plane/models/inputkubelogs.py +8 -16
  56. cribl_control_plane/models/inputkubemetrics.py +8 -16
  57. cribl_control_plane/models/inputloki.py +10 -29
  58. cribl_control_plane/models/inputmetrics.py +8 -23
  59. cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
  60. cribl_control_plane/models/inputmsk.py +30 -48
  61. cribl_control_plane/models/inputnetflow.py +4 -11
  62. cribl_control_plane/models/inputoffice365mgmt.py +14 -33
  63. cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
  64. cribl_control_plane/models/inputoffice365service.py +16 -35
  65. cribl_control_plane/models/inputopentelemetry.py +16 -38
  66. cribl_control_plane/models/inputprometheus.py +18 -50
  67. cribl_control_plane/models/inputprometheusrw.py +10 -30
  68. cribl_control_plane/models/inputrawudp.py +4 -11
  69. cribl_control_plane/models/inputs3.py +8 -21
  70. cribl_control_plane/models/inputs3inventory.py +10 -26
  71. cribl_control_plane/models/inputsecuritylake.py +10 -27
  72. cribl_control_plane/models/inputsnmp.py +6 -16
  73. cribl_control_plane/models/inputsplunk.py +12 -33
  74. cribl_control_plane/models/inputsplunkhec.py +10 -29
  75. cribl_control_plane/models/inputsplunksearch.py +14 -33
  76. cribl_control_plane/models/inputsqs.py +10 -27
  77. cribl_control_plane/models/inputsyslog.py +16 -43
  78. cribl_control_plane/models/inputsystemmetrics.py +24 -48
  79. cribl_control_plane/models/inputsystemstate.py +8 -16
  80. cribl_control_plane/models/inputtcp.py +10 -29
  81. cribl_control_plane/models/inputtcpjson.py +10 -29
  82. cribl_control_plane/models/inputwef.py +14 -37
  83. cribl_control_plane/models/inputwindowsmetrics.py +24 -44
  84. cribl_control_plane/models/inputwineventlogs.py +10 -20
  85. cribl_control_plane/models/inputwiz.py +8 -21
  86. cribl_control_plane/models/inputwizwebhook.py +8 -23
  87. cribl_control_plane/models/inputzscalerhec.py +10 -29
  88. cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
  89. cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
  90. cribl_control_plane/models/masterworkerentry.py +2 -7
  91. cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
  92. cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
  93. cribl_control_plane/models/nodeprovidedinfo.py +0 -3
  94. cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
  95. cribl_control_plane/models/nodeupgradestate.py +1 -2
  96. cribl_control_plane/models/nodeupgradestatus.py +5 -13
  97. cribl_control_plane/models/output.py +79 -84
  98. cribl_control_plane/models/outputazureblob.py +18 -48
  99. cribl_control_plane/models/outputazuredataexplorer.py +28 -73
  100. cribl_control_plane/models/outputazureeventhub.py +18 -40
  101. cribl_control_plane/models/outputazurelogs.py +12 -35
  102. cribl_control_plane/models/outputclickhouse.py +20 -55
  103. cribl_control_plane/models/outputcloudwatch.py +10 -29
  104. cribl_control_plane/models/outputconfluentcloud.py +44 -71
  105. cribl_control_plane/models/outputcriblhttp.py +16 -44
  106. cribl_control_plane/models/outputcribllake.py +16 -46
  107. cribl_control_plane/models/outputcribltcp.py +18 -45
  108. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
  109. cribl_control_plane/models/outputdatadog.py +20 -48
  110. cribl_control_plane/models/outputdataset.py +18 -46
  111. cribl_control_plane/models/outputdiskspool.py +2 -7
  112. cribl_control_plane/models/outputdls3.py +24 -68
  113. cribl_control_plane/models/outputdynatracehttp.py +20 -53
  114. cribl_control_plane/models/outputdynatraceotlp.py +22 -55
  115. cribl_control_plane/models/outputelastic.py +18 -43
  116. cribl_control_plane/models/outputelasticcloud.py +12 -36
  117. cribl_control_plane/models/outputexabeam.py +10 -29
  118. cribl_control_plane/models/outputfilesystem.py +14 -39
  119. cribl_control_plane/models/outputgooglechronicle.py +16 -50
  120. cribl_control_plane/models/outputgooglecloudlogging.py +18 -50
  121. cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
  122. cribl_control_plane/models/outputgooglepubsub.py +10 -31
  123. cribl_control_plane/models/outputgrafanacloud.py +32 -97
  124. cribl_control_plane/models/outputgraphite.py +14 -31
  125. cribl_control_plane/models/outputhoneycomb.py +12 -35
  126. cribl_control_plane/models/outputhumiohec.py +16 -43
  127. cribl_control_plane/models/outputinfluxdb.py +16 -42
  128. cribl_control_plane/models/outputkafka.py +40 -69
  129. cribl_control_plane/models/outputkinesis.py +16 -40
  130. cribl_control_plane/models/outputloki.py +16 -41
  131. cribl_control_plane/models/outputminio.py +24 -65
  132. cribl_control_plane/models/outputmsk.py +42 -77
  133. cribl_control_plane/models/outputnewrelic.py +18 -43
  134. cribl_control_plane/models/outputnewrelicevents.py +14 -41
  135. cribl_control_plane/models/outputopentelemetry.py +26 -67
  136. cribl_control_plane/models/outputprometheus.py +12 -35
  137. cribl_control_plane/models/outputring.py +8 -19
  138. cribl_control_plane/models/outputs3.py +26 -68
  139. cribl_control_plane/models/outputsecuritylake.py +18 -52
  140. cribl_control_plane/models/outputsentinel.py +18 -45
  141. cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
  142. cribl_control_plane/models/outputservicenow.py +24 -60
  143. cribl_control_plane/models/outputsignalfx.py +14 -37
  144. cribl_control_plane/models/outputsns.py +14 -36
  145. cribl_control_plane/models/outputsplunk.py +24 -60
  146. cribl_control_plane/models/outputsplunkhec.py +12 -35
  147. cribl_control_plane/models/outputsplunklb.py +30 -77
  148. cribl_control_plane/models/outputsqs.py +16 -41
  149. cribl_control_plane/models/outputstatsd.py +14 -30
  150. cribl_control_plane/models/outputstatsdext.py +12 -29
  151. cribl_control_plane/models/outputsumologic.py +12 -35
  152. cribl_control_plane/models/outputsyslog.py +24 -58
  153. cribl_control_plane/models/outputtcpjson.py +20 -52
  154. cribl_control_plane/models/outputwavefront.py +12 -35
  155. cribl_control_plane/models/outputwebhook.py +22 -58
  156. cribl_control_plane/models/outputxsiam.py +14 -35
  157. cribl_control_plane/models/productscore.py +1 -2
  158. cribl_control_plane/models/rbacresource.py +1 -2
  159. cribl_control_plane/models/resourcepolicy.py +2 -4
  160. cribl_control_plane/models/routecloneconf.py +13 -0
  161. cribl_control_plane/models/routeconf.py +4 -3
  162. cribl_control_plane/models/runnablejobcollection.py +13 -30
  163. cribl_control_plane/models/runnablejobexecutor.py +4 -13
  164. cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
  165. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
  166. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
  167. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +5 -6
  168. cribl_control_plane/models/workertypes.py +1 -2
  169. {cribl_control_plane-0.0.50rc2.dist-info → cribl_control_plane-0.0.51.dist-info}/METADATA +14 -5
  170. cribl_control_plane-0.0.51.dist-info/RECORD +325 -0
  171. cribl_control_plane/models/error.py +0 -16
  172. cribl_control_plane/models/gethealthinfoop.py +0 -17
  173. cribl_control_plane/models/gitshowresult.py +0 -19
  174. cribl_control_plane/models/outputdatabricks.py +0 -282
  175. cribl_control_plane-0.0.50rc2.dist-info/RECORD +0 -327
  176. {cribl_control_plane-0.0.50rc2.dist-info → cribl_control_plane-0.0.51.dist-info}/WHEEL +0 -0
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputClickHouseType(str, Enum):
15
12
  CLICK_HOUSE = "click_house"
16
13
 
17
14
 
18
- class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputClickHouseAuthenticationType(str, Enum):
19
16
  NONE = "none"
20
17
  BASIC = "basic"
21
18
  CREDENTIALS_SECRET = "credentialsSecret"
@@ -25,28 +22,28 @@ class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta
25
22
  OAUTH = "oauth"
26
23
 
27
24
 
28
- class OutputClickHouseFormat(str, Enum, metaclass=utils.OpenEnumMeta):
25
+ class OutputClickHouseFormat(str, Enum):
29
26
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
30
27
 
31
28
  JSON_COMPACT_EACH_ROW_WITH_NAMES = "json-compact-each-row-with-names"
32
29
  JSON_EACH_ROW = "json-each-row"
33
30
 
34
31
 
35
- class MappingType(str, Enum, metaclass=utils.OpenEnumMeta):
32
+ class MappingType(str, Enum):
36
33
  r"""How event fields are mapped to ClickHouse columns."""
37
34
 
38
35
  AUTOMATIC = "automatic"
39
36
  CUSTOM = "custom"
40
37
 
41
38
 
42
- class OutputClickHouseMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
39
+ class OutputClickHouseMinimumTLSVersion(str, Enum):
43
40
  TL_SV1 = "TLSv1"
44
41
  TL_SV1_1 = "TLSv1.1"
45
42
  TL_SV1_2 = "TLSv1.2"
46
43
  TL_SV1_3 = "TLSv1.3"
47
44
 
48
45
 
49
- class OutputClickHouseMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
46
+ class OutputClickHouseMaximumTLSVersion(str, Enum):
50
47
  TL_SV1 = "TLSv1"
51
48
  TL_SV1_1 = "TLSv1.1"
52
49
  TL_SV1_2 = "TLSv1.2"
@@ -95,19 +92,11 @@ class OutputClickHouseTLSSettingsClientSide(BaseModel):
95
92
  r"""Passphrase to use to decrypt private key"""
96
93
 
97
94
  min_version: Annotated[
98
- Annotated[
99
- Optional[OutputClickHouseMinimumTLSVersion],
100
- PlainValidator(validate_open_enum(False)),
101
- ],
102
- pydantic.Field(alias="minVersion"),
95
+ Optional[OutputClickHouseMinimumTLSVersion], pydantic.Field(alias="minVersion")
103
96
  ] = None
104
97
 
105
98
  max_version: Annotated[
106
- Annotated[
107
- Optional[OutputClickHouseMaximumTLSVersion],
108
- PlainValidator(validate_open_enum(False)),
109
- ],
110
- pydantic.Field(alias="maxVersion"),
99
+ Optional[OutputClickHouseMaximumTLSVersion], pydantic.Field(alias="maxVersion")
111
100
  ] = None
112
101
 
113
102
 
@@ -122,7 +111,7 @@ class OutputClickHouseExtraHTTPHeader(BaseModel):
122
111
  name: Optional[str] = None
123
112
 
124
113
 
125
- class OutputClickHouseFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
114
+ class OutputClickHouseFailedRequestLoggingMode(str, Enum):
126
115
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
127
116
 
128
117
  PAYLOAD = "payload"
@@ -184,7 +173,7 @@ class OutputClickHouseTimeoutRetrySettings(BaseModel):
184
173
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
185
174
 
186
175
 
187
- class OutputClickHouseBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
176
+ class OutputClickHouseBackpressureBehavior(str, Enum):
188
177
  r"""How to handle events when all receivers are exerting backpressure"""
189
178
 
190
179
  BLOCK = "block"
@@ -244,21 +233,21 @@ class ColumnMapping(BaseModel):
244
233
  r"""Type of the column in the ClickHouse database"""
245
234
 
246
235
 
247
- class OutputClickHouseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
236
+ class OutputClickHouseCompression(str, Enum):
248
237
  r"""Codec to use to compress the persisted data"""
249
238
 
250
239
  NONE = "none"
251
240
  GZIP = "gzip"
252
241
 
253
242
 
254
- class OutputClickHouseQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
243
+ class OutputClickHouseQueueFullBehavior(str, Enum):
255
244
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
256
245
 
257
246
  BLOCK = "block"
258
247
  DROP = "drop"
259
248
 
260
249
 
261
- class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
250
+ class OutputClickHouseMode(str, Enum):
262
251
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
263
252
 
264
253
  ERROR = "error"
@@ -413,24 +402,16 @@ class OutputClickHouse(BaseModel):
413
402
  r"""Tags for filtering and grouping in @{product}"""
414
403
 
415
404
  auth_type: Annotated[
416
- Annotated[
417
- Optional[OutputClickHouseAuthenticationType],
418
- PlainValidator(validate_open_enum(False)),
419
- ],
420
- pydantic.Field(alias="authType"),
405
+ Optional[OutputClickHouseAuthenticationType], pydantic.Field(alias="authType")
421
406
  ] = OutputClickHouseAuthenticationType.NONE
422
407
 
423
408
  format_: Annotated[
424
- Annotated[
425
- Optional[OutputClickHouseFormat], PlainValidator(validate_open_enum(False))
426
- ],
427
- pydantic.Field(alias="format"),
409
+ Optional[OutputClickHouseFormat], pydantic.Field(alias="format")
428
410
  ] = OutputClickHouseFormat.JSON_COMPACT_EACH_ROW_WITH_NAMES
429
411
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
430
412
 
431
413
  mapping_type: Annotated[
432
- Annotated[Optional[MappingType], PlainValidator(validate_open_enum(False))],
433
- pydantic.Field(alias="mappingType"),
414
+ Optional[MappingType], pydantic.Field(alias="mappingType")
434
415
  ] = MappingType.AUTOMATIC
435
416
  r"""How event fields are mapped to ClickHouse columns."""
436
417
 
@@ -485,10 +466,7 @@ class OutputClickHouse(BaseModel):
485
466
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
486
467
 
487
468
  failed_request_logging_mode: Annotated[
488
- Annotated[
489
- Optional[OutputClickHouseFailedRequestLoggingMode],
490
- PlainValidator(validate_open_enum(False)),
491
- ],
469
+ Optional[OutputClickHouseFailedRequestLoggingMode],
492
470
  pydantic.Field(alias="failedRequestLoggingMode"),
493
471
  ] = OutputClickHouseFailedRequestLoggingMode.NONE
494
472
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -520,10 +498,7 @@ class OutputClickHouse(BaseModel):
520
498
  r"""Log the most recent event that fails to match the table schema"""
521
499
 
522
500
  on_backpressure: Annotated[
523
- Annotated[
524
- Optional[OutputClickHouseBackpressureBehavior],
525
- PlainValidator(validate_open_enum(False)),
526
- ],
501
+ Optional[OutputClickHouseBackpressureBehavior],
527
502
  pydantic.Field(alias="onBackpressure"),
528
503
  ] = OutputClickHouseBackpressureBehavior.BLOCK
529
504
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -618,28 +593,18 @@ class OutputClickHouse(BaseModel):
618
593
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
619
594
 
620
595
  pq_compress: Annotated[
621
- Annotated[
622
- Optional[OutputClickHouseCompression],
623
- PlainValidator(validate_open_enum(False)),
624
- ],
625
- pydantic.Field(alias="pqCompress"),
596
+ Optional[OutputClickHouseCompression], pydantic.Field(alias="pqCompress")
626
597
  ] = OutputClickHouseCompression.NONE
627
598
  r"""Codec to use to compress the persisted data"""
628
599
 
629
600
  pq_on_backpressure: Annotated[
630
- Annotated[
631
- Optional[OutputClickHouseQueueFullBehavior],
632
- PlainValidator(validate_open_enum(False)),
633
- ],
601
+ Optional[OutputClickHouseQueueFullBehavior],
634
602
  pydantic.Field(alias="pqOnBackpressure"),
635
603
  ] = OutputClickHouseQueueFullBehavior.BLOCK
636
604
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
637
605
 
638
606
  pq_mode: Annotated[
639
- Annotated[
640
- Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
641
- ],
642
- pydantic.Field(alias="pqMode"),
607
+ Optional[OutputClickHouseMode], pydantic.Field(alias="pqMode")
643
608
  ] = OutputClickHouseMode.ERROR
644
609
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
645
610
 
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputCloudwatchType(str, Enum):
15
12
  CLOUDWATCH = "cloudwatch"
16
13
 
17
14
 
18
- class OutputCloudwatchAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputCloudwatchAuthenticationMethod(str, Enum):
19
16
  r"""AWS authentication method. Choose Auto to use IAM roles."""
20
17
 
21
18
  AUTO = "auto"
@@ -23,7 +20,7 @@ class OutputCloudwatchAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMe
23
20
  SECRET = "secret"
24
21
 
25
22
 
26
- class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
23
+ class OutputCloudwatchBackpressureBehavior(str, Enum):
27
24
  r"""How to handle events when all receivers are exerting backpressure"""
28
25
 
29
26
  BLOCK = "block"
@@ -31,21 +28,21 @@ class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMe
31
28
  QUEUE = "queue"
32
29
 
33
30
 
34
- class OutputCloudwatchCompression(str, Enum, metaclass=utils.OpenEnumMeta):
31
+ class OutputCloudwatchCompression(str, Enum):
35
32
  r"""Codec to use to compress the persisted data"""
36
33
 
37
34
  NONE = "none"
38
35
  GZIP = "gzip"
39
36
 
40
37
 
41
- class OutputCloudwatchQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
38
+ class OutputCloudwatchQueueFullBehavior(str, Enum):
42
39
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
43
40
 
44
41
  BLOCK = "block"
45
42
  DROP = "drop"
46
43
 
47
44
 
48
- class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
45
+ class OutputCloudwatchMode(str, Enum):
49
46
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
50
47
 
51
48
  ERROR = "error"
@@ -153,10 +150,7 @@ class OutputCloudwatch(BaseModel):
153
150
  r"""Tags for filtering and grouping in @{product}"""
154
151
 
155
152
  aws_authentication_method: Annotated[
156
- Annotated[
157
- Optional[OutputCloudwatchAuthenticationMethod],
158
- PlainValidator(validate_open_enum(False)),
159
- ],
153
+ Optional[OutputCloudwatchAuthenticationMethod],
160
154
  pydantic.Field(alias="awsAuthenticationMethod"),
161
155
  ] = OutputCloudwatchAuthenticationMethod.AUTO
162
156
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -212,10 +206,7 @@ class OutputCloudwatch(BaseModel):
212
206
  r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
213
207
 
214
208
  on_backpressure: Annotated[
215
- Annotated[
216
- Optional[OutputCloudwatchBackpressureBehavior],
217
- PlainValidator(validate_open_enum(False)),
218
- ],
209
+ Optional[OutputCloudwatchBackpressureBehavior],
219
210
  pydantic.Field(alias="onBackpressure"),
220
211
  ] = OutputCloudwatchBackpressureBehavior.BLOCK
221
212
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -241,28 +232,18 @@ class OutputCloudwatch(BaseModel):
241
232
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
242
233
 
243
234
  pq_compress: Annotated[
244
- Annotated[
245
- Optional[OutputCloudwatchCompression],
246
- PlainValidator(validate_open_enum(False)),
247
- ],
248
- pydantic.Field(alias="pqCompress"),
235
+ Optional[OutputCloudwatchCompression], pydantic.Field(alias="pqCompress")
249
236
  ] = OutputCloudwatchCompression.NONE
250
237
  r"""Codec to use to compress the persisted data"""
251
238
 
252
239
  pq_on_backpressure: Annotated[
253
- Annotated[
254
- Optional[OutputCloudwatchQueueFullBehavior],
255
- PlainValidator(validate_open_enum(False)),
256
- ],
240
+ Optional[OutputCloudwatchQueueFullBehavior],
257
241
  pydantic.Field(alias="pqOnBackpressure"),
258
242
  ] = OutputCloudwatchQueueFullBehavior.BLOCK
259
243
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
260
244
 
261
245
  pq_mode: Annotated[
262
- Annotated[
263
- Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
264
- ],
265
- pydantic.Field(alias="pqMode"),
246
+ Optional[OutputCloudwatchMode], pydantic.Field(alias="pqMode")
266
247
  ] = OutputCloudwatchMode.ERROR
267
248
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
268
249
 
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,14 +12,14 @@ class OutputConfluentCloudType(str, Enum):
15
12
  CONFLUENT_CLOUD = "confluent_cloud"
16
13
 
17
14
 
18
- class OutputConfluentCloudMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputConfluentCloudMinimumTLSVersion(str, Enum):
19
16
  TL_SV1 = "TLSv1"
20
17
  TL_SV1_1 = "TLSv1.1"
21
18
  TL_SV1_2 = "TLSv1.2"
22
19
  TL_SV1_3 = "TLSv1.3"
23
20
 
24
21
 
25
- class OutputConfluentCloudMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
22
+ class OutputConfluentCloudMaximumTLSVersion(str, Enum):
26
23
  TL_SV1 = "TLSv1"
27
24
  TL_SV1_1 = "TLSv1.1"
28
25
  TL_SV1_2 = "TLSv1.2"
@@ -82,23 +79,17 @@ class OutputConfluentCloudTLSSettingsClientSide(BaseModel):
82
79
  r"""Passphrase to use to decrypt private key"""
83
80
 
84
81
  min_version: Annotated[
85
- Annotated[
86
- Optional[OutputConfluentCloudMinimumTLSVersion],
87
- PlainValidator(validate_open_enum(False)),
88
- ],
82
+ Optional[OutputConfluentCloudMinimumTLSVersion],
89
83
  pydantic.Field(alias="minVersion"),
90
84
  ] = None
91
85
 
92
86
  max_version: Annotated[
93
- Annotated[
94
- Optional[OutputConfluentCloudMaximumTLSVersion],
95
- PlainValidator(validate_open_enum(False)),
96
- ],
87
+ Optional[OutputConfluentCloudMaximumTLSVersion],
97
88
  pydantic.Field(alias="maxVersion"),
98
89
  ] = None
99
90
 
100
91
 
101
- class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
92
+ class OutputConfluentCloudAcknowledgments(int, Enum):
102
93
  r"""Control the number of required acknowledgments."""
103
94
 
104
95
  ONE = 1
@@ -106,7 +97,7 @@ class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMet
106
97
  MINUS_1 = -1
107
98
 
108
99
 
109
- class OutputConfluentCloudRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
100
+ class OutputConfluentCloudRecordDataFormat(str, Enum):
110
101
  r"""Format to use to serialize events before writing to Kafka."""
111
102
 
112
103
  JSON = "json"
@@ -114,7 +105,7 @@ class OutputConfluentCloudRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMe
114
105
  PROTOBUF = "protobuf"
115
106
 
116
107
 
117
- class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
108
+ class OutputConfluentCloudCompression(str, Enum):
118
109
  r"""Codec to use to compress the data before sending to Kafka"""
119
110
 
120
111
  NONE = "none"
@@ -123,6 +114,13 @@ class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
123
114
  LZ4 = "lz4"
124
115
 
125
116
 
117
+ class OutputConfluentCloudSchemaType(str, Enum):
118
+ r"""The schema format used to encode and decode event data"""
119
+
120
+ AVRO = "avro"
121
+ JSON = "json"
122
+
123
+
126
124
  class OutputConfluentCloudAuthTypedDict(TypedDict):
127
125
  r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
128
126
 
@@ -142,18 +140,14 @@ class OutputConfluentCloudAuth(BaseModel):
142
140
  r"""Select or create a secret that references your credentials"""
143
141
 
144
142
 
145
- class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(
146
- str, Enum, metaclass=utils.OpenEnumMeta
147
- ):
143
+ class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
148
144
  TL_SV1 = "TLSv1"
149
145
  TL_SV1_1 = "TLSv1.1"
150
146
  TL_SV1_2 = "TLSv1.2"
151
147
  TL_SV1_3 = "TLSv1.3"
152
148
 
153
149
 
154
- class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(
155
- str, Enum, metaclass=utils.OpenEnumMeta
156
- ):
150
+ class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
157
151
  TL_SV1 = "TLSv1"
158
152
  TL_SV1_1 = "TLSv1.1"
159
153
  TL_SV1_2 = "TLSv1.2"
@@ -213,18 +207,12 @@ class OutputConfluentCloudKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
213
207
  r"""Passphrase to use to decrypt private key"""
214
208
 
215
209
  min_version: Annotated[
216
- Annotated[
217
- Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
218
- PlainValidator(validate_open_enum(False)),
219
- ],
210
+ Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
220
211
  pydantic.Field(alias="minVersion"),
221
212
  ] = None
222
213
 
223
214
  max_version: Annotated[
224
- Annotated[
225
- Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
226
- PlainValidator(validate_open_enum(False)),
227
- ],
215
+ Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
228
216
  pydantic.Field(alias="maxVersion"),
229
217
  ] = None
230
218
 
@@ -233,6 +221,8 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
233
221
  disabled: NotRequired[bool]
234
222
  schema_registry_url: NotRequired[str]
235
223
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
224
+ schema_type: NotRequired[OutputConfluentCloudSchemaType]
225
+ r"""The schema format used to encode and decode event data"""
236
226
  connection_timeout: NotRequired[float]
237
227
  r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
238
228
  request_timeout: NotRequired[float]
@@ -258,6 +248,11 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
258
248
  ] = "http://localhost:8081"
259
249
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
260
250
 
251
+ schema_type: Annotated[
252
+ Optional[OutputConfluentCloudSchemaType], pydantic.Field(alias="schemaType")
253
+ ] = OutputConfluentCloudSchemaType.AVRO
254
+ r"""The schema format used to encode and decode event data"""
255
+
261
256
  connection_timeout: Annotated[
262
257
  Optional[float], pydantic.Field(alias="connectionTimeout")
263
258
  ] = 30000
@@ -287,7 +282,7 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
287
282
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
288
283
 
289
284
 
290
- class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
285
+ class OutputConfluentCloudSASLMechanism(str, Enum):
291
286
  PLAIN = "plain"
292
287
  SCRAM_SHA_256 = "scram-sha-256"
293
288
  SCRAM_SHA_512 = "scram-sha-512"
@@ -308,10 +303,9 @@ class OutputConfluentCloudAuthentication(BaseModel):
308
303
 
309
304
  disabled: Optional[bool] = True
310
305
 
311
- mechanism: Annotated[
312
- Optional[OutputConfluentCloudSASLMechanism],
313
- PlainValidator(validate_open_enum(False)),
314
- ] = OutputConfluentCloudSASLMechanism.PLAIN
306
+ mechanism: Optional[OutputConfluentCloudSASLMechanism] = (
307
+ OutputConfluentCloudSASLMechanism.PLAIN
308
+ )
315
309
 
316
310
  oauth_enabled: Annotated[Optional[bool], pydantic.Field(alias="oauthEnabled")] = (
317
311
  False
@@ -319,7 +313,7 @@ class OutputConfluentCloudAuthentication(BaseModel):
319
313
  r"""Enable OAuth authentication"""
320
314
 
321
315
 
322
- class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
316
+ class OutputConfluentCloudBackpressureBehavior(str, Enum):
323
317
  r"""How to handle events when all receivers are exerting backpressure"""
324
318
 
325
319
  BLOCK = "block"
@@ -327,23 +321,21 @@ class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEn
327
321
  QUEUE = "queue"
328
322
 
329
323
 
330
- class OutputConfluentCloudPqCompressCompression(
331
- str, Enum, metaclass=utils.OpenEnumMeta
332
- ):
324
+ class OutputConfluentCloudPqCompressCompression(str, Enum):
333
325
  r"""Codec to use to compress the persisted data"""
334
326
 
335
327
  NONE = "none"
336
328
  GZIP = "gzip"
337
329
 
338
330
 
339
- class OutputConfluentCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
331
+ class OutputConfluentCloudQueueFullBehavior(str, Enum):
340
332
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
341
333
 
342
334
  BLOCK = "block"
343
335
  DROP = "drop"
344
336
 
345
337
 
346
- class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
338
+ class OutputConfluentCloudMode(str, Enum):
347
339
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
348
340
 
349
341
  ERROR = "error"
@@ -457,25 +449,19 @@ class OutputConfluentCloud(BaseModel):
457
449
 
458
450
  tls: Optional[OutputConfluentCloudTLSSettingsClientSide] = None
459
451
 
460
- ack: Annotated[
461
- Optional[OutputConfluentCloudAcknowledgments],
462
- PlainValidator(validate_open_enum(True)),
463
- ] = OutputConfluentCloudAcknowledgments.ONE
452
+ ack: Optional[OutputConfluentCloudAcknowledgments] = (
453
+ OutputConfluentCloudAcknowledgments.ONE
454
+ )
464
455
  r"""Control the number of required acknowledgments."""
465
456
 
466
457
  format_: Annotated[
467
- Annotated[
468
- Optional[OutputConfluentCloudRecordDataFormat],
469
- PlainValidator(validate_open_enum(False)),
470
- ],
471
- pydantic.Field(alias="format"),
458
+ Optional[OutputConfluentCloudRecordDataFormat], pydantic.Field(alias="format")
472
459
  ] = OutputConfluentCloudRecordDataFormat.JSON
473
460
  r"""Format to use to serialize events before writing to Kafka."""
474
461
 
475
- compression: Annotated[
476
- Optional[OutputConfluentCloudCompression],
477
- PlainValidator(validate_open_enum(False)),
478
- ] = OutputConfluentCloudCompression.GZIP
462
+ compression: Optional[OutputConfluentCloudCompression] = (
463
+ OutputConfluentCloudCompression.GZIP
464
+ )
479
465
  r"""Codec to use to compress the data before sending to Kafka"""
480
466
 
481
467
  max_record_size_kb: Annotated[
@@ -536,10 +522,7 @@ class OutputConfluentCloud(BaseModel):
536
522
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
537
523
 
538
524
  on_backpressure: Annotated[
539
- Annotated[
540
- Optional[OutputConfluentCloudBackpressureBehavior],
541
- PlainValidator(validate_open_enum(False)),
542
- ],
525
+ Optional[OutputConfluentCloudBackpressureBehavior],
543
526
  pydantic.Field(alias="onBackpressure"),
544
527
  ] = OutputConfluentCloudBackpressureBehavior.BLOCK
545
528
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -565,29 +548,19 @@ class OutputConfluentCloud(BaseModel):
565
548
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
566
549
 
567
550
  pq_compress: Annotated[
568
- Annotated[
569
- Optional[OutputConfluentCloudPqCompressCompression],
570
- PlainValidator(validate_open_enum(False)),
571
- ],
551
+ Optional[OutputConfluentCloudPqCompressCompression],
572
552
  pydantic.Field(alias="pqCompress"),
573
553
  ] = OutputConfluentCloudPqCompressCompression.NONE
574
554
  r"""Codec to use to compress the persisted data"""
575
555
 
576
556
  pq_on_backpressure: Annotated[
577
- Annotated[
578
- Optional[OutputConfluentCloudQueueFullBehavior],
579
- PlainValidator(validate_open_enum(False)),
580
- ],
557
+ Optional[OutputConfluentCloudQueueFullBehavior],
581
558
  pydantic.Field(alias="pqOnBackpressure"),
582
559
  ] = OutputConfluentCloudQueueFullBehavior.BLOCK
583
560
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
584
561
 
585
562
  pq_mode: Annotated[
586
- Annotated[
587
- Optional[OutputConfluentCloudMode],
588
- PlainValidator(validate_open_enum(False)),
589
- ],
590
- pydantic.Field(alias="pqMode"),
563
+ Optional[OutputConfluentCloudMode], pydantic.Field(alias="pqMode")
591
564
  ] = OutputConfluentCloudMode.ERROR
592
565
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
593
566