cribl-control-plane 0.0.16__py3-none-any.whl → 0.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (133) hide show
  1. cribl_control_plane/_version.py +3 -3
  2. cribl_control_plane/errors/healthstatus_error.py +2 -8
  3. cribl_control_plane/models/__init__.py +4124 -4124
  4. cribl_control_plane/models/createinputop.py +1734 -2771
  5. cribl_control_plane/models/createoutputop.py +2153 -4314
  6. cribl_control_plane/models/healthstatus.py +4 -7
  7. cribl_control_plane/models/inputappscope.py +16 -36
  8. cribl_control_plane/models/inputazureblob.py +8 -19
  9. cribl_control_plane/models/inputcollection.py +6 -15
  10. cribl_control_plane/models/inputconfluentcloud.py +20 -45
  11. cribl_control_plane/models/inputcribl.py +6 -13
  12. cribl_control_plane/models/inputcriblhttp.py +10 -27
  13. cribl_control_plane/models/inputcribllakehttp.py +12 -26
  14. cribl_control_plane/models/inputcriblmetrics.py +6 -14
  15. cribl_control_plane/models/inputcribltcp.py +10 -27
  16. cribl_control_plane/models/inputcrowdstrike.py +12 -28
  17. cribl_control_plane/models/inputdatadogagent.py +10 -28
  18. cribl_control_plane/models/inputdatagen.py +6 -13
  19. cribl_control_plane/models/inputedgeprometheus.py +31 -64
  20. cribl_control_plane/models/inputelastic.py +16 -44
  21. cribl_control_plane/models/inputeventhub.py +8 -19
  22. cribl_control_plane/models/inputexec.py +8 -16
  23. cribl_control_plane/models/inputfile.py +8 -17
  24. cribl_control_plane/models/inputfirehose.py +10 -27
  25. cribl_control_plane/models/inputgooglepubsub.py +8 -23
  26. cribl_control_plane/models/inputgrafana_union.py +35 -81
  27. cribl_control_plane/models/inputhttp.py +10 -27
  28. cribl_control_plane/models/inputhttpraw.py +10 -27
  29. cribl_control_plane/models/inputjournalfiles.py +6 -16
  30. cribl_control_plane/models/inputkafka.py +16 -45
  31. cribl_control_plane/models/inputkinesis.py +16 -42
  32. cribl_control_plane/models/inputkubeevents.py +6 -13
  33. cribl_control_plane/models/inputkubelogs.py +10 -18
  34. cribl_control_plane/models/inputkubemetrics.py +10 -18
  35. cribl_control_plane/models/inputloki.py +12 -33
  36. cribl_control_plane/models/inputmetrics.py +10 -25
  37. cribl_control_plane/models/inputmodeldriventelemetry.py +12 -32
  38. cribl_control_plane/models/inputmsk.py +18 -52
  39. cribl_control_plane/models/inputnetflow.py +6 -15
  40. cribl_control_plane/models/inputoffice365mgmt.py +16 -37
  41. cribl_control_plane/models/inputoffice365msgtrace.py +18 -39
  42. cribl_control_plane/models/inputoffice365service.py +18 -39
  43. cribl_control_plane/models/inputopentelemetry.py +18 -42
  44. cribl_control_plane/models/inputprometheus.py +20 -54
  45. cribl_control_plane/models/inputprometheusrw.py +12 -34
  46. cribl_control_plane/models/inputrawudp.py +6 -15
  47. cribl_control_plane/models/inputs3.py +10 -23
  48. cribl_control_plane/models/inputs3inventory.py +12 -28
  49. cribl_control_plane/models/inputsecuritylake.py +12 -29
  50. cribl_control_plane/models/inputsnmp.py +8 -20
  51. cribl_control_plane/models/inputsplunk.py +14 -37
  52. cribl_control_plane/models/inputsplunkhec.py +12 -33
  53. cribl_control_plane/models/inputsplunksearch.py +16 -37
  54. cribl_control_plane/models/inputsqs.py +12 -31
  55. cribl_control_plane/models/inputsyslog_union.py +29 -53
  56. cribl_control_plane/models/inputsystemmetrics.py +26 -50
  57. cribl_control_plane/models/inputsystemstate.py +10 -18
  58. cribl_control_plane/models/inputtcp.py +12 -33
  59. cribl_control_plane/models/inputtcpjson.py +12 -33
  60. cribl_control_plane/models/inputwef.py +20 -45
  61. cribl_control_plane/models/inputwindowsmetrics.py +26 -46
  62. cribl_control_plane/models/inputwineventlogs.py +12 -22
  63. cribl_control_plane/models/inputwiz.py +10 -25
  64. cribl_control_plane/models/inputzscalerhec.py +12 -33
  65. cribl_control_plane/models/output.py +3 -6
  66. cribl_control_plane/models/outputazureblob.py +20 -52
  67. cribl_control_plane/models/outputazuredataexplorer.py +30 -77
  68. cribl_control_plane/models/outputazureeventhub.py +20 -44
  69. cribl_control_plane/models/outputazurelogs.py +14 -37
  70. cribl_control_plane/models/outputclickhouse.py +22 -59
  71. cribl_control_plane/models/outputcloudwatch.py +12 -33
  72. cribl_control_plane/models/outputconfluentcloud.py +32 -75
  73. cribl_control_plane/models/outputcriblhttp.py +18 -46
  74. cribl_control_plane/models/outputcribllake.py +18 -48
  75. cribl_control_plane/models/outputcribltcp.py +20 -47
  76. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
  77. cribl_control_plane/models/outputdatadog.py +22 -50
  78. cribl_control_plane/models/outputdataset.py +20 -48
  79. cribl_control_plane/models/outputdefault.py +2 -5
  80. cribl_control_plane/models/outputdevnull.py +2 -5
  81. cribl_control_plane/models/outputdiskspool.py +4 -9
  82. cribl_control_plane/models/outputdls3.py +26 -72
  83. cribl_control_plane/models/outputdynatracehttp.py +22 -57
  84. cribl_control_plane/models/outputdynatraceotlp.py +24 -59
  85. cribl_control_plane/models/outputelastic.py +20 -45
  86. cribl_control_plane/models/outputelasticcloud.py +14 -40
  87. cribl_control_plane/models/outputexabeam.py +12 -33
  88. cribl_control_plane/models/outputfilesystem.py +16 -41
  89. cribl_control_plane/models/outputgooglechronicle.py +18 -54
  90. cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
  91. cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
  92. cribl_control_plane/models/outputgooglepubsub.py +16 -39
  93. cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
  94. cribl_control_plane/models/outputgraphite.py +16 -35
  95. cribl_control_plane/models/outputhoneycomb.py +14 -37
  96. cribl_control_plane/models/outputhumiohec.py +18 -47
  97. cribl_control_plane/models/outputinfluxdb.py +18 -44
  98. cribl_control_plane/models/outputkafka.py +28 -73
  99. cribl_control_plane/models/outputkinesis.py +18 -44
  100. cribl_control_plane/models/outputloki.py +18 -43
  101. cribl_control_plane/models/outputminio.py +26 -69
  102. cribl_control_plane/models/outputmsk.py +30 -81
  103. cribl_control_plane/models/outputnetflow.py +2 -5
  104. cribl_control_plane/models/outputnewrelic.py +20 -45
  105. cribl_control_plane/models/outputnewrelicevents.py +16 -45
  106. cribl_control_plane/models/outputopentelemetry.py +28 -69
  107. cribl_control_plane/models/outputprometheus.py +14 -37
  108. cribl_control_plane/models/outputring.py +10 -21
  109. cribl_control_plane/models/outputrouter.py +2 -5
  110. cribl_control_plane/models/outputs3.py +28 -72
  111. cribl_control_plane/models/outputsecuritylake.py +20 -56
  112. cribl_control_plane/models/outputsentinel.py +20 -49
  113. cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
  114. cribl_control_plane/models/outputservicenow.py +26 -64
  115. cribl_control_plane/models/outputsignalfx.py +16 -39
  116. cribl_control_plane/models/outputsnmp.py +2 -5
  117. cribl_control_plane/models/outputsns.py +16 -40
  118. cribl_control_plane/models/outputsplunk.py +26 -64
  119. cribl_control_plane/models/outputsplunkhec.py +14 -37
  120. cribl_control_plane/models/outputsplunklb.py +36 -83
  121. cribl_control_plane/models/outputsqs.py +18 -45
  122. cribl_control_plane/models/outputstatsd.py +16 -34
  123. cribl_control_plane/models/outputstatsdext.py +14 -33
  124. cribl_control_plane/models/outputsumologic.py +14 -37
  125. cribl_control_plane/models/outputsyslog.py +26 -60
  126. cribl_control_plane/models/outputtcpjson.py +22 -54
  127. cribl_control_plane/models/outputwavefront.py +14 -37
  128. cribl_control_plane/models/outputwebhook.py +24 -60
  129. cribl_control_plane/models/outputxsiam.py +16 -37
  130. {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/METADATA +1 -1
  131. cribl_control_plane-0.0.17.dist-info/RECORD +215 -0
  132. cribl_control_plane-0.0.16.dist-info/RECORD +0 -215
  133. {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/WHEEL +0 -0
@@ -1,21 +1,18 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
13
10
 
14
- class OutputClickHouseType(str, Enum, metaclass=utils.OpenEnumMeta):
11
+ class OutputClickHouseType(str, Enum):
15
12
  CLICK_HOUSE = "click_house"
16
13
 
17
14
 
18
- class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputClickHouseAuthenticationType(str, Enum):
19
16
  NONE = "none"
20
17
  BASIC = "basic"
21
18
  CREDENTIALS_SECRET = "credentialsSecret"
@@ -25,28 +22,28 @@ class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta
25
22
  OAUTH = "oauth"
26
23
 
27
24
 
28
- class OutputClickHouseFormat(str, Enum, metaclass=utils.OpenEnumMeta):
25
+ class OutputClickHouseFormat(str, Enum):
29
26
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
30
27
 
31
28
  JSON_COMPACT_EACH_ROW_WITH_NAMES = "json-compact-each-row-with-names"
32
29
  JSON_EACH_ROW = "json-each-row"
33
30
 
34
31
 
35
- class MappingType(str, Enum, metaclass=utils.OpenEnumMeta):
32
+ class MappingType(str, Enum):
36
33
  r"""How event fields are mapped to ClickHouse columns."""
37
34
 
38
35
  AUTOMATIC = "automatic"
39
36
  CUSTOM = "custom"
40
37
 
41
38
 
42
- class OutputClickHouseMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
39
+ class OutputClickHouseMinimumTLSVersion(str, Enum):
43
40
  TL_SV1 = "TLSv1"
44
41
  TL_SV1_1 = "TLSv1.1"
45
42
  TL_SV1_2 = "TLSv1.2"
46
43
  TL_SV1_3 = "TLSv1.3"
47
44
 
48
45
 
49
- class OutputClickHouseMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
46
+ class OutputClickHouseMaximumTLSVersion(str, Enum):
50
47
  TL_SV1 = "TLSv1"
51
48
  TL_SV1_1 = "TLSv1.1"
52
49
  TL_SV1_2 = "TLSv1.2"
@@ -95,19 +92,11 @@ class OutputClickHouseTLSSettingsClientSide(BaseModel):
95
92
  r"""Passphrase to use to decrypt private key"""
96
93
 
97
94
  min_version: Annotated[
98
- Annotated[
99
- Optional[OutputClickHouseMinimumTLSVersion],
100
- PlainValidator(validate_open_enum(False)),
101
- ],
102
- pydantic.Field(alias="minVersion"),
95
+ Optional[OutputClickHouseMinimumTLSVersion], pydantic.Field(alias="minVersion")
103
96
  ] = None
104
97
 
105
98
  max_version: Annotated[
106
- Annotated[
107
- Optional[OutputClickHouseMaximumTLSVersion],
108
- PlainValidator(validate_open_enum(False)),
109
- ],
110
- pydantic.Field(alias="maxVersion"),
99
+ Optional[OutputClickHouseMaximumTLSVersion], pydantic.Field(alias="maxVersion")
111
100
  ] = None
112
101
 
113
102
 
@@ -122,7 +111,7 @@ class OutputClickHouseExtraHTTPHeader(BaseModel):
122
111
  name: Optional[str] = None
123
112
 
124
113
 
125
- class OutputClickHouseFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
114
+ class OutputClickHouseFailedRequestLoggingMode(str, Enum):
126
115
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
127
116
 
128
117
  PAYLOAD = "payload"
@@ -184,7 +173,7 @@ class OutputClickHouseTimeoutRetrySettings(BaseModel):
184
173
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
185
174
 
186
175
 
187
- class OutputClickHouseBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
176
+ class OutputClickHouseBackpressureBehavior(str, Enum):
188
177
  r"""How to handle events when all receivers are exerting backpressure"""
189
178
 
190
179
  BLOCK = "block"
@@ -244,21 +233,21 @@ class ColumnMapping(BaseModel):
244
233
  r"""Type of the column in the ClickHouse database"""
245
234
 
246
235
 
247
- class OutputClickHouseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
236
+ class OutputClickHouseCompression(str, Enum):
248
237
  r"""Codec to use to compress the persisted data"""
249
238
 
250
239
  NONE = "none"
251
240
  GZIP = "gzip"
252
241
 
253
242
 
254
- class OutputClickHouseQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
243
+ class OutputClickHouseQueueFullBehavior(str, Enum):
255
244
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
256
245
 
257
246
  BLOCK = "block"
258
247
  DROP = "drop"
259
248
 
260
249
 
261
- class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
250
+ class OutputClickHouseMode(str, Enum):
262
251
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
263
252
 
264
253
  ERROR = "error"
@@ -396,9 +385,7 @@ class OutputClickHouse(BaseModel):
396
385
  id: Optional[str] = None
397
386
  r"""Unique ID for this output"""
398
387
 
399
- type: Annotated[
400
- Optional[OutputClickHouseType], PlainValidator(validate_open_enum(False))
401
- ] = None
388
+ type: Optional[OutputClickHouseType] = None
402
389
 
403
390
  pipeline: Optional[str] = None
404
391
  r"""Pipeline to process data before sending out to this output"""
@@ -415,24 +402,16 @@ class OutputClickHouse(BaseModel):
415
402
  r"""Tags for filtering and grouping in @{product}"""
416
403
 
417
404
  auth_type: Annotated[
418
- Annotated[
419
- Optional[OutputClickHouseAuthenticationType],
420
- PlainValidator(validate_open_enum(False)),
421
- ],
422
- pydantic.Field(alias="authType"),
405
+ Optional[OutputClickHouseAuthenticationType], pydantic.Field(alias="authType")
423
406
  ] = OutputClickHouseAuthenticationType.NONE
424
407
 
425
408
  format_: Annotated[
426
- Annotated[
427
- Optional[OutputClickHouseFormat], PlainValidator(validate_open_enum(False))
428
- ],
429
- pydantic.Field(alias="format"),
409
+ Optional[OutputClickHouseFormat], pydantic.Field(alias="format")
430
410
  ] = OutputClickHouseFormat.JSON_COMPACT_EACH_ROW_WITH_NAMES
431
411
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
432
412
 
433
413
  mapping_type: Annotated[
434
- Annotated[Optional[MappingType], PlainValidator(validate_open_enum(False))],
435
- pydantic.Field(alias="mappingType"),
414
+ Optional[MappingType], pydantic.Field(alias="mappingType")
436
415
  ] = MappingType.AUTOMATIC
437
416
  r"""How event fields are mapped to ClickHouse columns."""
438
417
 
@@ -487,10 +466,7 @@ class OutputClickHouse(BaseModel):
487
466
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
488
467
 
489
468
  failed_request_logging_mode: Annotated[
490
- Annotated[
491
- Optional[OutputClickHouseFailedRequestLoggingMode],
492
- PlainValidator(validate_open_enum(False)),
493
- ],
469
+ Optional[OutputClickHouseFailedRequestLoggingMode],
494
470
  pydantic.Field(alias="failedRequestLoggingMode"),
495
471
  ] = OutputClickHouseFailedRequestLoggingMode.NONE
496
472
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -522,10 +498,7 @@ class OutputClickHouse(BaseModel):
522
498
  r"""Log the most recent event that fails to match the table schema"""
523
499
 
524
500
  on_backpressure: Annotated[
525
- Annotated[
526
- Optional[OutputClickHouseBackpressureBehavior],
527
- PlainValidator(validate_open_enum(False)),
528
- ],
501
+ Optional[OutputClickHouseBackpressureBehavior],
529
502
  pydantic.Field(alias="onBackpressure"),
530
503
  ] = OutputClickHouseBackpressureBehavior.BLOCK
531
504
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -620,28 +593,18 @@ class OutputClickHouse(BaseModel):
620
593
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
621
594
 
622
595
  pq_compress: Annotated[
623
- Annotated[
624
- Optional[OutputClickHouseCompression],
625
- PlainValidator(validate_open_enum(False)),
626
- ],
627
- pydantic.Field(alias="pqCompress"),
596
+ Optional[OutputClickHouseCompression], pydantic.Field(alias="pqCompress")
628
597
  ] = OutputClickHouseCompression.NONE
629
598
  r"""Codec to use to compress the persisted data"""
630
599
 
631
600
  pq_on_backpressure: Annotated[
632
- Annotated[
633
- Optional[OutputClickHouseQueueFullBehavior],
634
- PlainValidator(validate_open_enum(False)),
635
- ],
601
+ Optional[OutputClickHouseQueueFullBehavior],
636
602
  pydantic.Field(alias="pqOnBackpressure"),
637
603
  ] = OutputClickHouseQueueFullBehavior.BLOCK
638
604
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
639
605
 
640
606
  pq_mode: Annotated[
641
- Annotated[
642
- Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
643
- ],
644
- pydantic.Field(alias="pqMode"),
607
+ Optional[OutputClickHouseMode], pydantic.Field(alias="pqMode")
645
608
  ] = OutputClickHouseMode.ERROR
646
609
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
647
610
 
@@ -1,21 +1,18 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
13
10
 
14
- class OutputCloudwatchType(str, Enum, metaclass=utils.OpenEnumMeta):
11
+ class OutputCloudwatchType(str, Enum):
15
12
  CLOUDWATCH = "cloudwatch"
16
13
 
17
14
 
18
- class OutputCloudwatchAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputCloudwatchAuthenticationMethod(str, Enum):
19
16
  r"""AWS authentication method. Choose Auto to use IAM roles."""
20
17
 
21
18
  AUTO = "auto"
@@ -23,7 +20,7 @@ class OutputCloudwatchAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMe
23
20
  SECRET = "secret"
24
21
 
25
22
 
26
- class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
23
+ class OutputCloudwatchBackpressureBehavior(str, Enum):
27
24
  r"""How to handle events when all receivers are exerting backpressure"""
28
25
 
29
26
  BLOCK = "block"
@@ -31,21 +28,21 @@ class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMe
31
28
  QUEUE = "queue"
32
29
 
33
30
 
34
- class OutputCloudwatchCompression(str, Enum, metaclass=utils.OpenEnumMeta):
31
+ class OutputCloudwatchCompression(str, Enum):
35
32
  r"""Codec to use to compress the persisted data"""
36
33
 
37
34
  NONE = "none"
38
35
  GZIP = "gzip"
39
36
 
40
37
 
41
- class OutputCloudwatchQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
38
+ class OutputCloudwatchQueueFullBehavior(str, Enum):
42
39
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
43
40
 
44
41
  BLOCK = "block"
45
42
  DROP = "drop"
46
43
 
47
44
 
48
- class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
45
+ class OutputCloudwatchMode(str, Enum):
49
46
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
50
47
 
51
48
  ERROR = "error"
@@ -136,9 +133,7 @@ class OutputCloudwatch(BaseModel):
136
133
  id: Optional[str] = None
137
134
  r"""Unique ID for this output"""
138
135
 
139
- type: Annotated[
140
- Optional[OutputCloudwatchType], PlainValidator(validate_open_enum(False))
141
- ] = None
136
+ type: Optional[OutputCloudwatchType] = None
142
137
 
143
138
  pipeline: Optional[str] = None
144
139
  r"""Pipeline to process data before sending out to this output"""
@@ -155,10 +150,7 @@ class OutputCloudwatch(BaseModel):
155
150
  r"""Tags for filtering and grouping in @{product}"""
156
151
 
157
152
  aws_authentication_method: Annotated[
158
- Annotated[
159
- Optional[OutputCloudwatchAuthenticationMethod],
160
- PlainValidator(validate_open_enum(False)),
161
- ],
153
+ Optional[OutputCloudwatchAuthenticationMethod],
162
154
  pydantic.Field(alias="awsAuthenticationMethod"),
163
155
  ] = OutputCloudwatchAuthenticationMethod.AUTO
164
156
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -214,10 +206,7 @@ class OutputCloudwatch(BaseModel):
214
206
  r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
215
207
 
216
208
  on_backpressure: Annotated[
217
- Annotated[
218
- Optional[OutputCloudwatchBackpressureBehavior],
219
- PlainValidator(validate_open_enum(False)),
220
- ],
209
+ Optional[OutputCloudwatchBackpressureBehavior],
221
210
  pydantic.Field(alias="onBackpressure"),
222
211
  ] = OutputCloudwatchBackpressureBehavior.BLOCK
223
212
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -243,28 +232,18 @@ class OutputCloudwatch(BaseModel):
243
232
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
244
233
 
245
234
  pq_compress: Annotated[
246
- Annotated[
247
- Optional[OutputCloudwatchCompression],
248
- PlainValidator(validate_open_enum(False)),
249
- ],
250
- pydantic.Field(alias="pqCompress"),
235
+ Optional[OutputCloudwatchCompression], pydantic.Field(alias="pqCompress")
251
236
  ] = OutputCloudwatchCompression.NONE
252
237
  r"""Codec to use to compress the persisted data"""
253
238
 
254
239
  pq_on_backpressure: Annotated[
255
- Annotated[
256
- Optional[OutputCloudwatchQueueFullBehavior],
257
- PlainValidator(validate_open_enum(False)),
258
- ],
240
+ Optional[OutputCloudwatchQueueFullBehavior],
259
241
  pydantic.Field(alias="pqOnBackpressure"),
260
242
  ] = OutputCloudwatchQueueFullBehavior.BLOCK
261
243
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
262
244
 
263
245
  pq_mode: Annotated[
264
- Annotated[
265
- Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
266
- ],
267
- pydantic.Field(alias="pqMode"),
246
+ Optional[OutputCloudwatchMode], pydantic.Field(alias="pqMode")
268
247
  ] = OutputCloudwatchMode.ERROR
269
248
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
270
249
 
@@ -1,28 +1,25 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
13
10
 
14
- class OutputConfluentCloudType(str, Enum, metaclass=utils.OpenEnumMeta):
11
+ class OutputConfluentCloudType(str, Enum):
15
12
  CONFLUENT_CLOUD = "confluent_cloud"
16
13
 
17
14
 
18
- class OutputConfluentCloudMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputConfluentCloudMinimumTLSVersion(str, Enum):
19
16
  TL_SV1 = "TLSv1"
20
17
  TL_SV1_1 = "TLSv1.1"
21
18
  TL_SV1_2 = "TLSv1.2"
22
19
  TL_SV1_3 = "TLSv1.3"
23
20
 
24
21
 
25
- class OutputConfluentCloudMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
22
+ class OutputConfluentCloudMaximumTLSVersion(str, Enum):
26
23
  TL_SV1 = "TLSv1"
27
24
  TL_SV1_1 = "TLSv1.1"
28
25
  TL_SV1_2 = "TLSv1.2"
@@ -82,23 +79,17 @@ class OutputConfluentCloudTLSSettingsClientSide(BaseModel):
82
79
  r"""Passphrase to use to decrypt private key"""
83
80
 
84
81
  min_version: Annotated[
85
- Annotated[
86
- Optional[OutputConfluentCloudMinimumTLSVersion],
87
- PlainValidator(validate_open_enum(False)),
88
- ],
82
+ Optional[OutputConfluentCloudMinimumTLSVersion],
89
83
  pydantic.Field(alias="minVersion"),
90
84
  ] = None
91
85
 
92
86
  max_version: Annotated[
93
- Annotated[
94
- Optional[OutputConfluentCloudMaximumTLSVersion],
95
- PlainValidator(validate_open_enum(False)),
96
- ],
87
+ Optional[OutputConfluentCloudMaximumTLSVersion],
97
88
  pydantic.Field(alias="maxVersion"),
98
89
  ] = None
99
90
 
100
91
 
101
- class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
92
+ class OutputConfluentCloudAcknowledgments(int, Enum):
102
93
  r"""Control the number of required acknowledgments."""
103
94
 
104
95
  ONE = 1
@@ -106,7 +97,7 @@ class OutputConfluentCloudAcknowledgments(int, Enum, metaclass=utils.OpenEnumMet
106
97
  MINUS_1 = -1
107
98
 
108
99
 
109
- class OutputConfluentCloudRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
100
+ class OutputConfluentCloudRecordDataFormat(str, Enum):
110
101
  r"""Format to use to serialize events before writing to Kafka."""
111
102
 
112
103
  JSON = "json"
@@ -114,7 +105,7 @@ class OutputConfluentCloudRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMe
114
105
  PROTOBUF = "protobuf"
115
106
 
116
107
 
117
- class OutputConfluentCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
108
+ class OutputConfluentCloudCompression(str, Enum):
118
109
  r"""Codec to use to compress the data before sending to Kafka"""
119
110
 
120
111
  NONE = "none"
@@ -142,18 +133,14 @@ class OutputConfluentCloudAuth(BaseModel):
142
133
  r"""Select or create a secret that references your credentials"""
143
134
 
144
135
 
145
- class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(
146
- str, Enum, metaclass=utils.OpenEnumMeta
147
- ):
136
+ class OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
148
137
  TL_SV1 = "TLSv1"
149
138
  TL_SV1_1 = "TLSv1.1"
150
139
  TL_SV1_2 = "TLSv1.2"
151
140
  TL_SV1_3 = "TLSv1.3"
152
141
 
153
142
 
154
- class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(
155
- str, Enum, metaclass=utils.OpenEnumMeta
156
- ):
143
+ class OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
157
144
  TL_SV1 = "TLSv1"
158
145
  TL_SV1_1 = "TLSv1.1"
159
146
  TL_SV1_2 = "TLSv1.2"
@@ -213,18 +200,12 @@ class OutputConfluentCloudKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
213
200
  r"""Passphrase to use to decrypt private key"""
214
201
 
215
202
  min_version: Annotated[
216
- Annotated[
217
- Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
218
- PlainValidator(validate_open_enum(False)),
219
- ],
203
+ Optional[OutputConfluentCloudKafkaSchemaRegistryMinimumTLSVersion],
220
204
  pydantic.Field(alias="minVersion"),
221
205
  ] = None
222
206
 
223
207
  max_version: Annotated[
224
- Annotated[
225
- Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
226
- PlainValidator(validate_open_enum(False)),
227
- ],
208
+ Optional[OutputConfluentCloudKafkaSchemaRegistryMaximumTLSVersion],
228
209
  pydantic.Field(alias="maxVersion"),
229
210
  ] = None
230
211
 
@@ -287,7 +268,7 @@ class OutputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
287
268
  r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
288
269
 
289
270
 
290
- class OutputConfluentCloudSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
271
+ class OutputConfluentCloudSASLMechanism(str, Enum):
291
272
  PLAIN = "plain"
292
273
  SCRAM_SHA_256 = "scram-sha-256"
293
274
  SCRAM_SHA_512 = "scram-sha-512"
@@ -306,13 +287,12 @@ class OutputConfluentCloudAuthentication(BaseModel):
306
287
 
307
288
  disabled: Optional[bool] = True
308
289
 
309
- mechanism: Annotated[
310
- Optional[OutputConfluentCloudSASLMechanism],
311
- PlainValidator(validate_open_enum(False)),
312
- ] = OutputConfluentCloudSASLMechanism.PLAIN
290
+ mechanism: Optional[OutputConfluentCloudSASLMechanism] = (
291
+ OutputConfluentCloudSASLMechanism.PLAIN
292
+ )
313
293
 
314
294
 
315
- class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
295
+ class OutputConfluentCloudBackpressureBehavior(str, Enum):
316
296
  r"""How to handle events when all receivers are exerting backpressure"""
317
297
 
318
298
  BLOCK = "block"
@@ -320,23 +300,21 @@ class OutputConfluentCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEn
320
300
  QUEUE = "queue"
321
301
 
322
302
 
323
- class OutputConfluentCloudPqCompressCompression(
324
- str, Enum, metaclass=utils.OpenEnumMeta
325
- ):
303
+ class OutputConfluentCloudPqCompressCompression(str, Enum):
326
304
  r"""Codec to use to compress the persisted data"""
327
305
 
328
306
  NONE = "none"
329
307
  GZIP = "gzip"
330
308
 
331
309
 
332
- class OutputConfluentCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
310
+ class OutputConfluentCloudQueueFullBehavior(str, Enum):
333
311
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
334
312
 
335
313
  BLOCK = "block"
336
314
  DROP = "drop"
337
315
 
338
316
 
339
- class OutputConfluentCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
317
+ class OutputConfluentCloudMode(str, Enum):
340
318
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
341
319
 
342
320
  ERROR = "error"
@@ -432,9 +410,7 @@ class OutputConfluentCloud(BaseModel):
432
410
  id: Optional[str] = None
433
411
  r"""Unique ID for this output"""
434
412
 
435
- type: Annotated[
436
- Optional[OutputConfluentCloudType], PlainValidator(validate_open_enum(False))
437
- ] = None
413
+ type: Optional[OutputConfluentCloudType] = None
438
414
 
439
415
  pipeline: Optional[str] = None
440
416
  r"""Pipeline to process data before sending out to this output"""
@@ -452,25 +428,19 @@ class OutputConfluentCloud(BaseModel):
452
428
 
453
429
  tls: Optional[OutputConfluentCloudTLSSettingsClientSide] = None
454
430
 
455
- ack: Annotated[
456
- Optional[OutputConfluentCloudAcknowledgments],
457
- PlainValidator(validate_open_enum(True)),
458
- ] = OutputConfluentCloudAcknowledgments.ONE
431
+ ack: Optional[OutputConfluentCloudAcknowledgments] = (
432
+ OutputConfluentCloudAcknowledgments.ONE
433
+ )
459
434
  r"""Control the number of required acknowledgments."""
460
435
 
461
436
  format_: Annotated[
462
- Annotated[
463
- Optional[OutputConfluentCloudRecordDataFormat],
464
- PlainValidator(validate_open_enum(False)),
465
- ],
466
- pydantic.Field(alias="format"),
437
+ Optional[OutputConfluentCloudRecordDataFormat], pydantic.Field(alias="format")
467
438
  ] = OutputConfluentCloudRecordDataFormat.JSON
468
439
  r"""Format to use to serialize events before writing to Kafka."""
469
440
 
470
- compression: Annotated[
471
- Optional[OutputConfluentCloudCompression],
472
- PlainValidator(validate_open_enum(False)),
473
- ] = OutputConfluentCloudCompression.GZIP
441
+ compression: Optional[OutputConfluentCloudCompression] = (
442
+ OutputConfluentCloudCompression.GZIP
443
+ )
474
444
  r"""Codec to use to compress the data before sending to Kafka"""
475
445
 
476
446
  max_record_size_kb: Annotated[
@@ -531,10 +501,7 @@ class OutputConfluentCloud(BaseModel):
531
501
  r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
532
502
 
533
503
  on_backpressure: Annotated[
534
- Annotated[
535
- Optional[OutputConfluentCloudBackpressureBehavior],
536
- PlainValidator(validate_open_enum(False)),
537
- ],
504
+ Optional[OutputConfluentCloudBackpressureBehavior],
538
505
  pydantic.Field(alias="onBackpressure"),
539
506
  ] = OutputConfluentCloudBackpressureBehavior.BLOCK
540
507
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -560,29 +527,19 @@ class OutputConfluentCloud(BaseModel):
560
527
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
561
528
 
562
529
  pq_compress: Annotated[
563
- Annotated[
564
- Optional[OutputConfluentCloudPqCompressCompression],
565
- PlainValidator(validate_open_enum(False)),
566
- ],
530
+ Optional[OutputConfluentCloudPqCompressCompression],
567
531
  pydantic.Field(alias="pqCompress"),
568
532
  ] = OutputConfluentCloudPqCompressCompression.NONE
569
533
  r"""Codec to use to compress the persisted data"""
570
534
 
571
535
  pq_on_backpressure: Annotated[
572
- Annotated[
573
- Optional[OutputConfluentCloudQueueFullBehavior],
574
- PlainValidator(validate_open_enum(False)),
575
- ],
536
+ Optional[OutputConfluentCloudQueueFullBehavior],
576
537
  pydantic.Field(alias="pqOnBackpressure"),
577
538
  ] = OutputConfluentCloudQueueFullBehavior.BLOCK
578
539
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
579
540
 
580
541
  pq_mode: Annotated[
581
- Annotated[
582
- Optional[OutputConfluentCloudMode],
583
- PlainValidator(validate_open_enum(False)),
584
- ],
585
- pydantic.Field(alias="pqMode"),
542
+ Optional[OutputConfluentCloudMode], pydantic.Field(alias="pqMode")
586
543
  ] = OutputConfluentCloudMode.ERROR
587
544
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
588
545