cribl-control-plane 0.0.46__py3-none-any.whl → 0.0.48a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (167) hide show
  1. cribl_control_plane/_version.py +4 -6
  2. cribl_control_plane/errors/apierror.py +2 -0
  3. cribl_control_plane/errors/criblcontrolplaneerror.py +11 -7
  4. cribl_control_plane/errors/error.py +4 -2
  5. cribl_control_plane/errors/healthstatus_error.py +12 -4
  6. cribl_control_plane/errors/no_response_error.py +5 -1
  7. cribl_control_plane/errors/responsevalidationerror.py +2 -0
  8. cribl_control_plane/models/__init__.py +12 -12
  9. cribl_control_plane/models/cacheconnection.py +10 -2
  10. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  11. cribl_control_plane/models/cloudprovider.py +2 -1
  12. cribl_control_plane/models/configgroup.py +7 -2
  13. cribl_control_plane/models/configgroupcloud.py +6 -2
  14. cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
  15. cribl_control_plane/models/cribllakedataset.py +8 -2
  16. cribl_control_plane/models/datasetmetadata.py +8 -2
  17. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
  18. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
  19. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
  20. cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
  21. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
  22. cribl_control_plane/models/getsummaryop.py +7 -2
  23. cribl_control_plane/models/hbcriblinfo.py +19 -3
  24. cribl_control_plane/models/healthstatus.py +7 -4
  25. cribl_control_plane/models/heartbeatmetadata.py +3 -0
  26. cribl_control_plane/models/inputappscope.py +34 -14
  27. cribl_control_plane/models/inputazureblob.py +17 -6
  28. cribl_control_plane/models/inputcollection.py +11 -4
  29. cribl_control_plane/models/inputconfluentcloud.py +47 -20
  30. cribl_control_plane/models/inputcribl.py +11 -4
  31. cribl_control_plane/models/inputcriblhttp.py +23 -8
  32. cribl_control_plane/models/inputcribllakehttp.py +22 -10
  33. cribl_control_plane/models/inputcriblmetrics.py +12 -4
  34. cribl_control_plane/models/inputcribltcp.py +23 -8
  35. cribl_control_plane/models/inputcrowdstrike.py +26 -10
  36. cribl_control_plane/models/inputdatadogagent.py +24 -8
  37. cribl_control_plane/models/inputdatagen.py +11 -4
  38. cribl_control_plane/models/inputedgeprometheus.py +58 -24
  39. cribl_control_plane/models/inputelastic.py +40 -14
  40. cribl_control_plane/models/inputeventhub.py +15 -6
  41. cribl_control_plane/models/inputexec.py +14 -6
  42. cribl_control_plane/models/inputfile.py +15 -6
  43. cribl_control_plane/models/inputfirehose.py +23 -8
  44. cribl_control_plane/models/inputgooglepubsub.py +19 -6
  45. cribl_control_plane/models/inputgrafana.py +67 -24
  46. cribl_control_plane/models/inputhttp.py +23 -8
  47. cribl_control_plane/models/inputhttpraw.py +23 -8
  48. cribl_control_plane/models/inputjournalfiles.py +12 -4
  49. cribl_control_plane/models/inputkafka.py +46 -16
  50. cribl_control_plane/models/inputkinesis.py +38 -14
  51. cribl_control_plane/models/inputkubeevents.py +11 -4
  52. cribl_control_plane/models/inputkubelogs.py +16 -8
  53. cribl_control_plane/models/inputkubemetrics.py +16 -8
  54. cribl_control_plane/models/inputloki.py +29 -10
  55. cribl_control_plane/models/inputmetrics.py +23 -8
  56. cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
  57. cribl_control_plane/models/inputmsk.py +53 -18
  58. cribl_control_plane/models/inputnetflow.py +11 -4
  59. cribl_control_plane/models/inputoffice365mgmt.py +33 -14
  60. cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
  61. cribl_control_plane/models/inputoffice365service.py +35 -16
  62. cribl_control_plane/models/inputopentelemetry.py +38 -16
  63. cribl_control_plane/models/inputprometheus.py +50 -18
  64. cribl_control_plane/models/inputprometheusrw.py +30 -10
  65. cribl_control_plane/models/inputrawudp.py +11 -4
  66. cribl_control_plane/models/inputs3.py +21 -8
  67. cribl_control_plane/models/inputs3inventory.py +26 -10
  68. cribl_control_plane/models/inputsecuritylake.py +27 -10
  69. cribl_control_plane/models/inputsnmp.py +16 -6
  70. cribl_control_plane/models/inputsplunk.py +33 -12
  71. cribl_control_plane/models/inputsplunkhec.py +29 -10
  72. cribl_control_plane/models/inputsplunksearch.py +33 -14
  73. cribl_control_plane/models/inputsqs.py +27 -10
  74. cribl_control_plane/models/inputsyslog.py +43 -16
  75. cribl_control_plane/models/inputsystemmetrics.py +48 -24
  76. cribl_control_plane/models/inputsystemstate.py +16 -8
  77. cribl_control_plane/models/inputtcp.py +29 -10
  78. cribl_control_plane/models/inputtcpjson.py +29 -10
  79. cribl_control_plane/models/inputwef.py +37 -14
  80. cribl_control_plane/models/inputwindowsmetrics.py +44 -24
  81. cribl_control_plane/models/inputwineventlogs.py +20 -10
  82. cribl_control_plane/models/inputwiz.py +21 -8
  83. cribl_control_plane/models/inputwizwebhook.py +23 -8
  84. cribl_control_plane/models/inputzscalerhec.py +29 -10
  85. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  86. cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
  87. cribl_control_plane/models/masterworkerentry.py +7 -2
  88. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  89. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  90. cribl_control_plane/models/nodeprovidedinfo.py +3 -0
  91. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  92. cribl_control_plane/models/nodeupgradestate.py +2 -1
  93. cribl_control_plane/models/nodeupgradestatus.py +13 -5
  94. cribl_control_plane/models/outputazureblob.py +48 -18
  95. cribl_control_plane/models/outputazuredataexplorer.py +73 -28
  96. cribl_control_plane/models/outputazureeventhub.py +40 -18
  97. cribl_control_plane/models/outputazurelogs.py +35 -12
  98. cribl_control_plane/models/outputclickhouse.py +55 -20
  99. cribl_control_plane/models/outputcloudwatch.py +29 -10
  100. cribl_control_plane/models/outputconfluentcloud.py +77 -32
  101. cribl_control_plane/models/outputcriblhttp.py +44 -16
  102. cribl_control_plane/models/outputcribllake.py +46 -16
  103. cribl_control_plane/models/outputcribltcp.py +45 -18
  104. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
  105. cribl_control_plane/models/outputdatadog.py +48 -20
  106. cribl_control_plane/models/outputdataset.py +46 -18
  107. cribl_control_plane/models/outputdiskspool.py +7 -2
  108. cribl_control_plane/models/outputdls3.py +68 -24
  109. cribl_control_plane/models/outputdynatracehttp.py +53 -20
  110. cribl_control_plane/models/outputdynatraceotlp.py +55 -22
  111. cribl_control_plane/models/outputelastic.py +43 -18
  112. cribl_control_plane/models/outputelasticcloud.py +36 -12
  113. cribl_control_plane/models/outputexabeam.py +29 -10
  114. cribl_control_plane/models/outputfilesystem.py +39 -14
  115. cribl_control_plane/models/outputgooglechronicle.py +50 -16
  116. cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
  117. cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
  118. cribl_control_plane/models/outputgooglepubsub.py +31 -10
  119. cribl_control_plane/models/outputgrafanacloud.py +97 -32
  120. cribl_control_plane/models/outputgraphite.py +31 -14
  121. cribl_control_plane/models/outputhoneycomb.py +35 -12
  122. cribl_control_plane/models/outputhumiohec.py +43 -16
  123. cribl_control_plane/models/outputinfluxdb.py +42 -16
  124. cribl_control_plane/models/outputkafka.py +74 -28
  125. cribl_control_plane/models/outputkinesis.py +40 -16
  126. cribl_control_plane/models/outputloki.py +41 -16
  127. cribl_control_plane/models/outputminio.py +65 -24
  128. cribl_control_plane/models/outputmsk.py +82 -30
  129. cribl_control_plane/models/outputnewrelic.py +43 -18
  130. cribl_control_plane/models/outputnewrelicevents.py +41 -14
  131. cribl_control_plane/models/outputopentelemetry.py +67 -26
  132. cribl_control_plane/models/outputprometheus.py +35 -12
  133. cribl_control_plane/models/outputring.py +19 -8
  134. cribl_control_plane/models/outputs3.py +68 -26
  135. cribl_control_plane/models/outputsecuritylake.py +52 -18
  136. cribl_control_plane/models/outputsentinel.py +45 -18
  137. cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
  138. cribl_control_plane/models/outputservicenow.py +60 -24
  139. cribl_control_plane/models/outputsignalfx.py +37 -14
  140. cribl_control_plane/models/outputsns.py +36 -14
  141. cribl_control_plane/models/outputsplunk.py +60 -24
  142. cribl_control_plane/models/outputsplunkhec.py +35 -12
  143. cribl_control_plane/models/outputsplunklb.py +77 -30
  144. cribl_control_plane/models/outputsqs.py +41 -16
  145. cribl_control_plane/models/outputstatsd.py +30 -14
  146. cribl_control_plane/models/outputstatsdext.py +29 -12
  147. cribl_control_plane/models/outputsumologic.py +35 -12
  148. cribl_control_plane/models/outputsyslog.py +58 -24
  149. cribl_control_plane/models/outputtcpjson.py +52 -20
  150. cribl_control_plane/models/outputwavefront.py +35 -12
  151. cribl_control_plane/models/outputwebhook.py +58 -22
  152. cribl_control_plane/models/outputxsiam.py +35 -14
  153. cribl_control_plane/models/productscore.py +2 -1
  154. cribl_control_plane/models/rbacresource.py +2 -1
  155. cribl_control_plane/models/resourcepolicy.py +4 -2
  156. cribl_control_plane/models/routeconf.py +3 -4
  157. cribl_control_plane/models/runnablejobcollection.py +30 -13
  158. cribl_control_plane/models/runnablejobexecutor.py +13 -4
  159. cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
  160. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
  161. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
  162. cribl_control_plane/models/workertypes.py +2 -1
  163. {cribl_control_plane-0.0.46.dist-info → cribl_control_plane-0.0.48a1.dist-info}/METADATA +1 -1
  164. {cribl_control_plane-0.0.46.dist-info → cribl_control_plane-0.0.48a1.dist-info}/RECORD +165 -167
  165. {cribl_control_plane-0.0.46.dist-info → cribl_control_plane-0.0.48a1.dist-info}/WHEEL +1 -1
  166. cribl_control_plane/models/appmode.py +0 -13
  167. cribl_control_plane/models/routecloneconf.py +0 -13
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputAzureEventhubType(str, Enum):
12
15
  AZURE_EVENTHUB = "azure_eventhub"
13
16
 
14
17
 
15
- class OutputAzureEventhubAcknowledgments(int, Enum):
18
+ class OutputAzureEventhubAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Control the number of required acknowledgments"""
17
20
 
18
21
  ONE = 1
@@ -20,14 +23,14 @@ class OutputAzureEventhubAcknowledgments(int, Enum):
20
23
  MINUS_1 = -1
21
24
 
22
25
 
23
- class OutputAzureEventhubRecordDataFormat(str, Enum):
26
+ class OutputAzureEventhubRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
25
28
 
26
29
  JSON = "json"
27
30
  RAW = "raw"
28
31
 
29
32
 
30
- class OutputAzureEventhubSASLMechanism(str, Enum):
33
+ class OutputAzureEventhubSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
31
34
  PLAIN = "plain"
32
35
  OAUTHBEARER = "oauthbearer"
33
36
 
@@ -44,9 +47,10 @@ class OutputAzureEventhubAuthentication(BaseModel):
44
47
 
45
48
  disabled: Optional[bool] = False
46
49
 
47
- mechanism: Optional[OutputAzureEventhubSASLMechanism] = (
48
- OutputAzureEventhubSASLMechanism.PLAIN
49
- )
50
+ mechanism: Annotated[
51
+ Optional[OutputAzureEventhubSASLMechanism],
52
+ PlainValidator(validate_open_enum(False)),
53
+ ] = OutputAzureEventhubSASLMechanism.PLAIN
50
54
 
51
55
 
52
56
  class OutputAzureEventhubTLSSettingsClientSideTypedDict(TypedDict):
@@ -64,7 +68,7 @@ class OutputAzureEventhubTLSSettingsClientSide(BaseModel):
64
68
  r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another trusted CA (such as the system's)"""
65
69
 
66
70
 
67
- class OutputAzureEventhubBackpressureBehavior(str, Enum):
71
+ class OutputAzureEventhubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
68
72
  r"""How to handle events when all receivers are exerting backpressure"""
69
73
 
70
74
  BLOCK = "block"
@@ -72,21 +76,21 @@ class OutputAzureEventhubBackpressureBehavior(str, Enum):
72
76
  QUEUE = "queue"
73
77
 
74
78
 
75
- class OutputAzureEventhubCompression(str, Enum):
79
+ class OutputAzureEventhubCompression(str, Enum, metaclass=utils.OpenEnumMeta):
76
80
  r"""Codec to use to compress the persisted data"""
77
81
 
78
82
  NONE = "none"
79
83
  GZIP = "gzip"
80
84
 
81
85
 
82
- class OutputAzureEventhubQueueFullBehavior(str, Enum):
86
+ class OutputAzureEventhubQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
83
87
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
84
88
 
85
89
  BLOCK = "block"
86
90
  DROP = "drop"
87
91
 
88
92
 
89
- class OutputAzureEventhubMode(str, Enum):
93
+ class OutputAzureEventhubMode(str, Enum, metaclass=utils.OpenEnumMeta):
90
94
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
91
95
 
92
96
  ERROR = "error"
@@ -191,13 +195,18 @@ class OutputAzureEventhub(BaseModel):
191
195
  streamtags: Optional[List[str]] = None
192
196
  r"""Tags for filtering and grouping in @{product}"""
193
197
 
194
- ack: Optional[OutputAzureEventhubAcknowledgments] = (
195
- OutputAzureEventhubAcknowledgments.ONE
196
- )
198
+ ack: Annotated[
199
+ Optional[OutputAzureEventhubAcknowledgments],
200
+ PlainValidator(validate_open_enum(True)),
201
+ ] = OutputAzureEventhubAcknowledgments.ONE
197
202
  r"""Control the number of required acknowledgments"""
198
203
 
199
204
  format_: Annotated[
200
- Optional[OutputAzureEventhubRecordDataFormat], pydantic.Field(alias="format")
205
+ Annotated[
206
+ Optional[OutputAzureEventhubRecordDataFormat],
207
+ PlainValidator(validate_open_enum(False)),
208
+ ],
209
+ pydantic.Field(alias="format"),
201
210
  ] = OutputAzureEventhubRecordDataFormat.JSON
202
211
  r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
203
212
 
@@ -256,7 +265,10 @@ class OutputAzureEventhub(BaseModel):
256
265
  tls: Optional[OutputAzureEventhubTLSSettingsClientSide] = None
257
266
 
258
267
  on_backpressure: Annotated[
259
- Optional[OutputAzureEventhubBackpressureBehavior],
268
+ Annotated[
269
+ Optional[OutputAzureEventhubBackpressureBehavior],
270
+ PlainValidator(validate_open_enum(False)),
271
+ ],
260
272
  pydantic.Field(alias="onBackpressure"),
261
273
  ] = OutputAzureEventhubBackpressureBehavior.BLOCK
262
274
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -277,18 +289,28 @@ class OutputAzureEventhub(BaseModel):
277
289
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
278
290
 
279
291
  pq_compress: Annotated[
280
- Optional[OutputAzureEventhubCompression], pydantic.Field(alias="pqCompress")
292
+ Annotated[
293
+ Optional[OutputAzureEventhubCompression],
294
+ PlainValidator(validate_open_enum(False)),
295
+ ],
296
+ pydantic.Field(alias="pqCompress"),
281
297
  ] = OutputAzureEventhubCompression.NONE
282
298
  r"""Codec to use to compress the persisted data"""
283
299
 
284
300
  pq_on_backpressure: Annotated[
285
- Optional[OutputAzureEventhubQueueFullBehavior],
301
+ Annotated[
302
+ Optional[OutputAzureEventhubQueueFullBehavior],
303
+ PlainValidator(validate_open_enum(False)),
304
+ ],
286
305
  pydantic.Field(alias="pqOnBackpressure"),
287
306
  ] = OutputAzureEventhubQueueFullBehavior.BLOCK
288
307
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
289
308
 
290
309
  pq_mode: Annotated[
291
- Optional[OutputAzureEventhubMode], pydantic.Field(alias="pqMode")
310
+ Annotated[
311
+ Optional[OutputAzureEventhubMode], PlainValidator(validate_open_enum(False))
312
+ ],
313
+ pydantic.Field(alias="pqMode"),
292
314
  ] = OutputAzureEventhubMode.ERROR
293
315
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
294
316
 
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -23,7 +26,7 @@ class OutputAzureLogsExtraHTTPHeader(BaseModel):
23
26
  name: Optional[str] = None
24
27
 
25
28
 
26
- class OutputAzureLogsFailedRequestLoggingMode(str, Enum):
29
+ class OutputAzureLogsFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
27
30
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
28
31
 
29
32
  PAYLOAD = "payload"
@@ -85,7 +88,7 @@ class OutputAzureLogsTimeoutRetrySettings(BaseModel):
85
88
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
86
89
 
87
90
 
88
- class OutputAzureLogsBackpressureBehavior(str, Enum):
91
+ class OutputAzureLogsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
89
92
  r"""How to handle events when all receivers are exerting backpressure"""
90
93
 
91
94
  BLOCK = "block"
@@ -93,28 +96,28 @@ class OutputAzureLogsBackpressureBehavior(str, Enum):
93
96
  QUEUE = "queue"
94
97
 
95
98
 
96
- class OutputAzureLogsAuthenticationMethod(str, Enum):
99
+ class OutputAzureLogsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
97
100
  r"""Enter workspace ID and workspace key directly, or select a stored secret"""
98
101
 
99
102
  MANUAL = "manual"
100
103
  SECRET = "secret"
101
104
 
102
105
 
103
- class OutputAzureLogsCompression(str, Enum):
106
+ class OutputAzureLogsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
104
107
  r"""Codec to use to compress the persisted data"""
105
108
 
106
109
  NONE = "none"
107
110
  GZIP = "gzip"
108
111
 
109
112
 
110
- class OutputAzureLogsQueueFullBehavior(str, Enum):
113
+ class OutputAzureLogsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
111
114
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
112
115
 
113
116
  BLOCK = "block"
114
117
  DROP = "drop"
115
118
 
116
119
 
117
- class OutputAzureLogsMode(str, Enum):
120
+ class OutputAzureLogsMode(str, Enum, metaclass=utils.OpenEnumMeta):
118
121
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
119
122
 
120
123
  ERROR = "error"
@@ -274,7 +277,10 @@ class OutputAzureLogs(BaseModel):
274
277
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
275
278
 
276
279
  failed_request_logging_mode: Annotated[
277
- Optional[OutputAzureLogsFailedRequestLoggingMode],
280
+ Annotated[
281
+ Optional[OutputAzureLogsFailedRequestLoggingMode],
282
+ PlainValidator(validate_open_enum(False)),
283
+ ],
278
284
  pydantic.Field(alias="failedRequestLoggingMode"),
279
285
  ] = OutputAzureLogsFailedRequestLoggingMode.NONE
280
286
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -306,13 +312,20 @@ class OutputAzureLogs(BaseModel):
306
312
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
307
313
 
308
314
  on_backpressure: Annotated[
309
- Optional[OutputAzureLogsBackpressureBehavior],
315
+ Annotated[
316
+ Optional[OutputAzureLogsBackpressureBehavior],
317
+ PlainValidator(validate_open_enum(False)),
318
+ ],
310
319
  pydantic.Field(alias="onBackpressure"),
311
320
  ] = OutputAzureLogsBackpressureBehavior.BLOCK
312
321
  r"""How to handle events when all receivers are exerting backpressure"""
313
322
 
314
323
  auth_type: Annotated[
315
- Optional[OutputAzureLogsAuthenticationMethod], pydantic.Field(alias="authType")
324
+ Annotated[
325
+ Optional[OutputAzureLogsAuthenticationMethod],
326
+ PlainValidator(validate_open_enum(False)),
327
+ ],
328
+ pydantic.Field(alias="authType"),
316
329
  ] = OutputAzureLogsAuthenticationMethod.MANUAL
317
330
  r"""Enter workspace ID and workspace key directly, or select a stored secret"""
318
331
 
@@ -332,18 +345,28 @@ class OutputAzureLogs(BaseModel):
332
345
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
333
346
 
334
347
  pq_compress: Annotated[
335
- Optional[OutputAzureLogsCompression], pydantic.Field(alias="pqCompress")
348
+ Annotated[
349
+ Optional[OutputAzureLogsCompression],
350
+ PlainValidator(validate_open_enum(False)),
351
+ ],
352
+ pydantic.Field(alias="pqCompress"),
336
353
  ] = OutputAzureLogsCompression.NONE
337
354
  r"""Codec to use to compress the persisted data"""
338
355
 
339
356
  pq_on_backpressure: Annotated[
340
- Optional[OutputAzureLogsQueueFullBehavior],
357
+ Annotated[
358
+ Optional[OutputAzureLogsQueueFullBehavior],
359
+ PlainValidator(validate_open_enum(False)),
360
+ ],
341
361
  pydantic.Field(alias="pqOnBackpressure"),
342
362
  ] = OutputAzureLogsQueueFullBehavior.BLOCK
343
363
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
344
364
 
345
365
  pq_mode: Annotated[
346
- Optional[OutputAzureLogsMode], pydantic.Field(alias="pqMode")
366
+ Annotated[
367
+ Optional[OutputAzureLogsMode], PlainValidator(validate_open_enum(False))
368
+ ],
369
+ pydantic.Field(alias="pqMode"),
347
370
  ] = OutputAzureLogsMode.ERROR
348
371
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
349
372
 
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputClickHouseType(str, Enum):
12
15
  CLICK_HOUSE = "click_house"
13
16
 
14
17
 
15
- class OutputClickHouseAuthenticationType(str, Enum):
18
+ class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  NONE = "none"
17
20
  BASIC = "basic"
18
21
  CREDENTIALS_SECRET = "credentialsSecret"
@@ -22,28 +25,28 @@ class OutputClickHouseAuthenticationType(str, Enum):
22
25
  OAUTH = "oauth"
23
26
 
24
27
 
25
- class OutputClickHouseFormat(str, Enum):
28
+ class OutputClickHouseFormat(str, Enum, metaclass=utils.OpenEnumMeta):
26
29
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
27
30
 
28
31
  JSON_COMPACT_EACH_ROW_WITH_NAMES = "json-compact-each-row-with-names"
29
32
  JSON_EACH_ROW = "json-each-row"
30
33
 
31
34
 
32
- class MappingType(str, Enum):
35
+ class MappingType(str, Enum, metaclass=utils.OpenEnumMeta):
33
36
  r"""How event fields are mapped to ClickHouse columns."""
34
37
 
35
38
  AUTOMATIC = "automatic"
36
39
  CUSTOM = "custom"
37
40
 
38
41
 
39
- class OutputClickHouseMinimumTLSVersion(str, Enum):
42
+ class OutputClickHouseMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
40
43
  TL_SV1 = "TLSv1"
41
44
  TL_SV1_1 = "TLSv1.1"
42
45
  TL_SV1_2 = "TLSv1.2"
43
46
  TL_SV1_3 = "TLSv1.3"
44
47
 
45
48
 
46
- class OutputClickHouseMaximumTLSVersion(str, Enum):
49
+ class OutputClickHouseMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
47
50
  TL_SV1 = "TLSv1"
48
51
  TL_SV1_1 = "TLSv1.1"
49
52
  TL_SV1_2 = "TLSv1.2"
@@ -92,11 +95,19 @@ class OutputClickHouseTLSSettingsClientSide(BaseModel):
92
95
  r"""Passphrase to use to decrypt private key"""
93
96
 
94
97
  min_version: Annotated[
95
- Optional[OutputClickHouseMinimumTLSVersion], pydantic.Field(alias="minVersion")
98
+ Annotated[
99
+ Optional[OutputClickHouseMinimumTLSVersion],
100
+ PlainValidator(validate_open_enum(False)),
101
+ ],
102
+ pydantic.Field(alias="minVersion"),
96
103
  ] = None
97
104
 
98
105
  max_version: Annotated[
99
- Optional[OutputClickHouseMaximumTLSVersion], pydantic.Field(alias="maxVersion")
106
+ Annotated[
107
+ Optional[OutputClickHouseMaximumTLSVersion],
108
+ PlainValidator(validate_open_enum(False)),
109
+ ],
110
+ pydantic.Field(alias="maxVersion"),
100
111
  ] = None
101
112
 
102
113
 
@@ -111,7 +122,7 @@ class OutputClickHouseExtraHTTPHeader(BaseModel):
111
122
  name: Optional[str] = None
112
123
 
113
124
 
114
- class OutputClickHouseFailedRequestLoggingMode(str, Enum):
125
+ class OutputClickHouseFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
115
126
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
116
127
 
117
128
  PAYLOAD = "payload"
@@ -173,7 +184,7 @@ class OutputClickHouseTimeoutRetrySettings(BaseModel):
173
184
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
174
185
 
175
186
 
176
- class OutputClickHouseBackpressureBehavior(str, Enum):
187
+ class OutputClickHouseBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
177
188
  r"""How to handle events when all receivers are exerting backpressure"""
178
189
 
179
190
  BLOCK = "block"
@@ -233,21 +244,21 @@ class ColumnMapping(BaseModel):
233
244
  r"""Type of the column in the ClickHouse database"""
234
245
 
235
246
 
236
- class OutputClickHouseCompression(str, Enum):
247
+ class OutputClickHouseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
237
248
  r"""Codec to use to compress the persisted data"""
238
249
 
239
250
  NONE = "none"
240
251
  GZIP = "gzip"
241
252
 
242
253
 
243
- class OutputClickHouseQueueFullBehavior(str, Enum):
254
+ class OutputClickHouseQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
244
255
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
245
256
 
246
257
  BLOCK = "block"
247
258
  DROP = "drop"
248
259
 
249
260
 
250
- class OutputClickHouseMode(str, Enum):
261
+ class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
251
262
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
252
263
 
253
264
  ERROR = "error"
@@ -402,16 +413,24 @@ class OutputClickHouse(BaseModel):
402
413
  r"""Tags for filtering and grouping in @{product}"""
403
414
 
404
415
  auth_type: Annotated[
405
- Optional[OutputClickHouseAuthenticationType], pydantic.Field(alias="authType")
416
+ Annotated[
417
+ Optional[OutputClickHouseAuthenticationType],
418
+ PlainValidator(validate_open_enum(False)),
419
+ ],
420
+ pydantic.Field(alias="authType"),
406
421
  ] = OutputClickHouseAuthenticationType.NONE
407
422
 
408
423
  format_: Annotated[
409
- Optional[OutputClickHouseFormat], pydantic.Field(alias="format")
424
+ Annotated[
425
+ Optional[OutputClickHouseFormat], PlainValidator(validate_open_enum(False))
426
+ ],
427
+ pydantic.Field(alias="format"),
410
428
  ] = OutputClickHouseFormat.JSON_COMPACT_EACH_ROW_WITH_NAMES
411
429
  r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
412
430
 
413
431
  mapping_type: Annotated[
414
- Optional[MappingType], pydantic.Field(alias="mappingType")
432
+ Annotated[Optional[MappingType], PlainValidator(validate_open_enum(False))],
433
+ pydantic.Field(alias="mappingType"),
415
434
  ] = MappingType.AUTOMATIC
416
435
  r"""How event fields are mapped to ClickHouse columns."""
417
436
 
@@ -466,7 +485,10 @@ class OutputClickHouse(BaseModel):
466
485
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
467
486
 
468
487
  failed_request_logging_mode: Annotated[
469
- Optional[OutputClickHouseFailedRequestLoggingMode],
488
+ Annotated[
489
+ Optional[OutputClickHouseFailedRequestLoggingMode],
490
+ PlainValidator(validate_open_enum(False)),
491
+ ],
470
492
  pydantic.Field(alias="failedRequestLoggingMode"),
471
493
  ] = OutputClickHouseFailedRequestLoggingMode.NONE
472
494
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -498,7 +520,10 @@ class OutputClickHouse(BaseModel):
498
520
  r"""Log the most recent event that fails to match the table schema"""
499
521
 
500
522
  on_backpressure: Annotated[
501
- Optional[OutputClickHouseBackpressureBehavior],
523
+ Annotated[
524
+ Optional[OutputClickHouseBackpressureBehavior],
525
+ PlainValidator(validate_open_enum(False)),
526
+ ],
502
527
  pydantic.Field(alias="onBackpressure"),
503
528
  ] = OutputClickHouseBackpressureBehavior.BLOCK
504
529
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -593,18 +618,28 @@ class OutputClickHouse(BaseModel):
593
618
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
594
619
 
595
620
  pq_compress: Annotated[
596
- Optional[OutputClickHouseCompression], pydantic.Field(alias="pqCompress")
621
+ Annotated[
622
+ Optional[OutputClickHouseCompression],
623
+ PlainValidator(validate_open_enum(False)),
624
+ ],
625
+ pydantic.Field(alias="pqCompress"),
597
626
  ] = OutputClickHouseCompression.NONE
598
627
  r"""Codec to use to compress the persisted data"""
599
628
 
600
629
  pq_on_backpressure: Annotated[
601
- Optional[OutputClickHouseQueueFullBehavior],
630
+ Annotated[
631
+ Optional[OutputClickHouseQueueFullBehavior],
632
+ PlainValidator(validate_open_enum(False)),
633
+ ],
602
634
  pydantic.Field(alias="pqOnBackpressure"),
603
635
  ] = OutputClickHouseQueueFullBehavior.BLOCK
604
636
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
605
637
 
606
638
  pq_mode: Annotated[
607
- Optional[OutputClickHouseMode], pydantic.Field(alias="pqMode")
639
+ Annotated[
640
+ Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
641
+ ],
642
+ pydantic.Field(alias="pqMode"),
608
643
  ] = OutputClickHouseMode.ERROR
609
644
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
610
645
 
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputCloudwatchType(str, Enum):
12
15
  CLOUDWATCH = "cloudwatch"
13
16
 
14
17
 
15
- class OutputCloudwatchAuthenticationMethod(str, Enum):
18
+ class OutputCloudwatchAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
17
20
 
18
21
  AUTO = "auto"
@@ -20,7 +23,7 @@ class OutputCloudwatchAuthenticationMethod(str, Enum):
20
23
  SECRET = "secret"
21
24
 
22
25
 
23
- class OutputCloudwatchBackpressureBehavior(str, Enum):
26
+ class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""How to handle events when all receivers are exerting backpressure"""
25
28
 
26
29
  BLOCK = "block"
@@ -28,21 +31,21 @@ class OutputCloudwatchBackpressureBehavior(str, Enum):
28
31
  QUEUE = "queue"
29
32
 
30
33
 
31
- class OutputCloudwatchCompression(str, Enum):
34
+ class OutputCloudwatchCompression(str, Enum, metaclass=utils.OpenEnumMeta):
32
35
  r"""Codec to use to compress the persisted data"""
33
36
 
34
37
  NONE = "none"
35
38
  GZIP = "gzip"
36
39
 
37
40
 
38
- class OutputCloudwatchQueueFullBehavior(str, Enum):
41
+ class OutputCloudwatchQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
39
42
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
40
43
 
41
44
  BLOCK = "block"
42
45
  DROP = "drop"
43
46
 
44
47
 
45
- class OutputCloudwatchMode(str, Enum):
48
+ class OutputCloudwatchMode(str, Enum, metaclass=utils.OpenEnumMeta):
46
49
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
47
50
 
48
51
  ERROR = "error"
@@ -150,7 +153,10 @@ class OutputCloudwatch(BaseModel):
150
153
  r"""Tags for filtering and grouping in @{product}"""
151
154
 
152
155
  aws_authentication_method: Annotated[
153
- Optional[OutputCloudwatchAuthenticationMethod],
156
+ Annotated[
157
+ Optional[OutputCloudwatchAuthenticationMethod],
158
+ PlainValidator(validate_open_enum(False)),
159
+ ],
154
160
  pydantic.Field(alias="awsAuthenticationMethod"),
155
161
  ] = OutputCloudwatchAuthenticationMethod.AUTO
156
162
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -206,7 +212,10 @@ class OutputCloudwatch(BaseModel):
206
212
  r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
207
213
 
208
214
  on_backpressure: Annotated[
209
- Optional[OutputCloudwatchBackpressureBehavior],
215
+ Annotated[
216
+ Optional[OutputCloudwatchBackpressureBehavior],
217
+ PlainValidator(validate_open_enum(False)),
218
+ ],
210
219
  pydantic.Field(alias="onBackpressure"),
211
220
  ] = OutputCloudwatchBackpressureBehavior.BLOCK
212
221
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -232,18 +241,28 @@ class OutputCloudwatch(BaseModel):
232
241
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
233
242
 
234
243
  pq_compress: Annotated[
235
- Optional[OutputCloudwatchCompression], pydantic.Field(alias="pqCompress")
244
+ Annotated[
245
+ Optional[OutputCloudwatchCompression],
246
+ PlainValidator(validate_open_enum(False)),
247
+ ],
248
+ pydantic.Field(alias="pqCompress"),
236
249
  ] = OutputCloudwatchCompression.NONE
237
250
  r"""Codec to use to compress the persisted data"""
238
251
 
239
252
  pq_on_backpressure: Annotated[
240
- Optional[OutputCloudwatchQueueFullBehavior],
253
+ Annotated[
254
+ Optional[OutputCloudwatchQueueFullBehavior],
255
+ PlainValidator(validate_open_enum(False)),
256
+ ],
241
257
  pydantic.Field(alias="pqOnBackpressure"),
242
258
  ] = OutputCloudwatchQueueFullBehavior.BLOCK
243
259
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
244
260
 
245
261
  pq_mode: Annotated[
246
- Optional[OutputCloudwatchMode], pydantic.Field(alias="pqMode")
262
+ Annotated[
263
+ Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
264
+ ],
265
+ pydantic.Field(alias="pqMode"),
247
266
  ] = OutputCloudwatchMode.ERROR
248
267
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
249
268