cribl-control-plane 0.0.16__py3-none-any.whl → 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +4365 -4124
- cribl_control_plane/models/createinputop.py +1734 -2771
- cribl_control_plane/models/createoutputop.py +2153 -4314
- cribl_control_plane/models/createversioncommitop.py +24 -0
- cribl_control_plane/models/createversionpushop.py +23 -0
- cribl_control_plane/models/createversionrevertop.py +47 -0
- cribl_control_plane/models/createversionsyncop.py +23 -0
- cribl_control_plane/models/createversionundoop.py +37 -0
- cribl_control_plane/models/getversionbranchop.py +23 -0
- cribl_control_plane/models/getversioncountop.py +47 -0
- cribl_control_plane/models/getversioncurrentbranchop.py +23 -0
- cribl_control_plane/models/getversiondiffop.py +63 -0
- cribl_control_plane/models/getversionfilesop.py +48 -0
- cribl_control_plane/models/getversioninfoop.py +24 -0
- cribl_control_plane/models/getversionshowop.py +63 -0
- cribl_control_plane/models/getversionstatusop.py +38 -0
- cribl_control_plane/models/gitcommitparams.py +23 -0
- cribl_control_plane/models/gitcommitsummary.py +68 -0
- cribl_control_plane/models/gitfile.py +20 -0
- cribl_control_plane/models/gitfilesresponse.py +22 -0
- cribl_control_plane/models/gitinfo.py +23 -0
- cribl_control_plane/models/gitrevertparams.py +20 -0
- cribl_control_plane/models/gitrevertresult.py +48 -0
- cribl_control_plane/models/gitstatusresult.py +73 -0
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +16 -36
- cribl_control_plane/models/inputazureblob.py +8 -19
- cribl_control_plane/models/inputcollection.py +6 -15
- cribl_control_plane/models/inputconfluentcloud.py +20 -45
- cribl_control_plane/models/inputcribl.py +6 -13
- cribl_control_plane/models/inputcriblhttp.py +10 -27
- cribl_control_plane/models/inputcribllakehttp.py +12 -26
- cribl_control_plane/models/inputcriblmetrics.py +6 -14
- cribl_control_plane/models/inputcribltcp.py +10 -27
- cribl_control_plane/models/inputcrowdstrike.py +12 -28
- cribl_control_plane/models/inputdatadogagent.py +10 -28
- cribl_control_plane/models/inputdatagen.py +6 -13
- cribl_control_plane/models/inputedgeprometheus.py +31 -64
- cribl_control_plane/models/inputelastic.py +16 -44
- cribl_control_plane/models/inputeventhub.py +8 -19
- cribl_control_plane/models/inputexec.py +8 -16
- cribl_control_plane/models/inputfile.py +8 -17
- cribl_control_plane/models/inputfirehose.py +10 -27
- cribl_control_plane/models/inputgooglepubsub.py +8 -23
- cribl_control_plane/models/inputgrafana_union.py +35 -81
- cribl_control_plane/models/inputhttp.py +10 -27
- cribl_control_plane/models/inputhttpraw.py +10 -27
- cribl_control_plane/models/inputjournalfiles.py +6 -16
- cribl_control_plane/models/inputkafka.py +16 -45
- cribl_control_plane/models/inputkinesis.py +16 -42
- cribl_control_plane/models/inputkubeevents.py +6 -13
- cribl_control_plane/models/inputkubelogs.py +10 -18
- cribl_control_plane/models/inputkubemetrics.py +10 -18
- cribl_control_plane/models/inputloki.py +12 -33
- cribl_control_plane/models/inputmetrics.py +10 -25
- cribl_control_plane/models/inputmodeldriventelemetry.py +12 -32
- cribl_control_plane/models/inputmsk.py +18 -52
- cribl_control_plane/models/inputnetflow.py +6 -15
- cribl_control_plane/models/inputoffice365mgmt.py +16 -37
- cribl_control_plane/models/inputoffice365msgtrace.py +18 -39
- cribl_control_plane/models/inputoffice365service.py +18 -39
- cribl_control_plane/models/inputopentelemetry.py +18 -42
- cribl_control_plane/models/inputprometheus.py +20 -54
- cribl_control_plane/models/inputprometheusrw.py +12 -34
- cribl_control_plane/models/inputrawudp.py +6 -15
- cribl_control_plane/models/inputs3.py +10 -23
- cribl_control_plane/models/inputs3inventory.py +12 -28
- cribl_control_plane/models/inputsecuritylake.py +12 -29
- cribl_control_plane/models/inputsnmp.py +8 -20
- cribl_control_plane/models/inputsplunk.py +14 -37
- cribl_control_plane/models/inputsplunkhec.py +12 -33
- cribl_control_plane/models/inputsplunksearch.py +16 -37
- cribl_control_plane/models/inputsqs.py +12 -31
- cribl_control_plane/models/inputsyslog_union.py +29 -53
- cribl_control_plane/models/inputsystemmetrics.py +26 -50
- cribl_control_plane/models/inputsystemstate.py +10 -18
- cribl_control_plane/models/inputtcp.py +12 -33
- cribl_control_plane/models/inputtcpjson.py +12 -33
- cribl_control_plane/models/inputwef.py +20 -45
- cribl_control_plane/models/inputwindowsmetrics.py +26 -46
- cribl_control_plane/models/inputwineventlogs.py +12 -22
- cribl_control_plane/models/inputwiz.py +10 -25
- cribl_control_plane/models/inputzscalerhec.py +12 -33
- cribl_control_plane/models/output.py +3 -6
- cribl_control_plane/models/outputazureblob.py +20 -52
- cribl_control_plane/models/outputazuredataexplorer.py +30 -77
- cribl_control_plane/models/outputazureeventhub.py +20 -44
- cribl_control_plane/models/outputazurelogs.py +14 -37
- cribl_control_plane/models/outputclickhouse.py +22 -59
- cribl_control_plane/models/outputcloudwatch.py +12 -33
- cribl_control_plane/models/outputconfluentcloud.py +32 -75
- cribl_control_plane/models/outputcriblhttp.py +18 -46
- cribl_control_plane/models/outputcribllake.py +18 -48
- cribl_control_plane/models/outputcribltcp.py +20 -47
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
- cribl_control_plane/models/outputdatadog.py +22 -50
- cribl_control_plane/models/outputdataset.py +20 -48
- cribl_control_plane/models/outputdefault.py +2 -5
- cribl_control_plane/models/outputdevnull.py +2 -5
- cribl_control_plane/models/outputdiskspool.py +4 -9
- cribl_control_plane/models/outputdls3.py +26 -72
- cribl_control_plane/models/outputdynatracehttp.py +22 -57
- cribl_control_plane/models/outputdynatraceotlp.py +24 -59
- cribl_control_plane/models/outputelastic.py +20 -45
- cribl_control_plane/models/outputelasticcloud.py +14 -40
- cribl_control_plane/models/outputexabeam.py +12 -33
- cribl_control_plane/models/outputfilesystem.py +16 -41
- cribl_control_plane/models/outputgooglechronicle.py +18 -54
- cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
- cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
- cribl_control_plane/models/outputgooglepubsub.py +16 -39
- cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
- cribl_control_plane/models/outputgraphite.py +16 -35
- cribl_control_plane/models/outputhoneycomb.py +14 -37
- cribl_control_plane/models/outputhumiohec.py +18 -47
- cribl_control_plane/models/outputinfluxdb.py +18 -44
- cribl_control_plane/models/outputkafka.py +28 -73
- cribl_control_plane/models/outputkinesis.py +18 -44
- cribl_control_plane/models/outputloki.py +18 -43
- cribl_control_plane/models/outputminio.py +26 -69
- cribl_control_plane/models/outputmsk.py +30 -81
- cribl_control_plane/models/outputnetflow.py +2 -5
- cribl_control_plane/models/outputnewrelic.py +20 -45
- cribl_control_plane/models/outputnewrelicevents.py +16 -45
- cribl_control_plane/models/outputopentelemetry.py +28 -69
- cribl_control_plane/models/outputprometheus.py +14 -37
- cribl_control_plane/models/outputring.py +10 -21
- cribl_control_plane/models/outputrouter.py +2 -5
- cribl_control_plane/models/outputs3.py +28 -72
- cribl_control_plane/models/outputsecuritylake.py +20 -56
- cribl_control_plane/models/outputsentinel.py +20 -49
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
- cribl_control_plane/models/outputservicenow.py +26 -64
- cribl_control_plane/models/outputsignalfx.py +16 -39
- cribl_control_plane/models/outputsnmp.py +2 -5
- cribl_control_plane/models/outputsns.py +16 -40
- cribl_control_plane/models/outputsplunk.py +26 -64
- cribl_control_plane/models/outputsplunkhec.py +14 -37
- cribl_control_plane/models/outputsplunklb.py +36 -83
- cribl_control_plane/models/outputsqs.py +18 -45
- cribl_control_plane/models/outputstatsd.py +16 -34
- cribl_control_plane/models/outputstatsdext.py +14 -33
- cribl_control_plane/models/outputsumologic.py +14 -37
- cribl_control_plane/models/outputsyslog.py +26 -60
- cribl_control_plane/models/outputtcpjson.py +22 -54
- cribl_control_plane/models/outputwavefront.py +14 -37
- cribl_control_plane/models/outputwebhook.py +24 -60
- cribl_control_plane/models/outputxsiam.py +16 -37
- cribl_control_plane/sdk.py +4 -0
- cribl_control_plane/versioning.py +2309 -0
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.18.dist-info}/METADATA +18 -2
- cribl_control_plane-0.0.18.dist-info/RECORD +237 -0
- cribl_control_plane-0.0.16.dist-info/RECORD +0 -215
- {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.18.dist-info}/WHEEL +0 -0
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputAzureEventhubType(str, Enum
|
|
11
|
+
class OutputAzureEventhubType(str, Enum):
|
|
15
12
|
AZURE_EVENTHUB = "azure_eventhub"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputAzureEventhubAcknowledgments(int, Enum
|
|
15
|
+
class OutputAzureEventhubAcknowledgments(int, Enum):
|
|
19
16
|
r"""Control the number of required acknowledgments"""
|
|
20
17
|
|
|
21
18
|
ONE = 1
|
|
@@ -23,14 +20,14 @@ class OutputAzureEventhubAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta
|
|
|
23
20
|
MINUS_1 = -1
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputAzureEventhubRecordDataFormat(str, Enum
|
|
23
|
+
class OutputAzureEventhubRecordDataFormat(str, Enum):
|
|
27
24
|
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
28
25
|
|
|
29
26
|
JSON = "json"
|
|
30
27
|
RAW = "raw"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputAzureEventhubSASLMechanism(str, Enum
|
|
30
|
+
class OutputAzureEventhubSASLMechanism(str, Enum):
|
|
34
31
|
PLAIN = "plain"
|
|
35
32
|
OAUTHBEARER = "oauthbearer"
|
|
36
33
|
|
|
@@ -47,10 +44,9 @@ class OutputAzureEventhubAuthentication(BaseModel):
|
|
|
47
44
|
|
|
48
45
|
disabled: Optional[bool] = False
|
|
49
46
|
|
|
50
|
-
mechanism:
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
] = OutputAzureEventhubSASLMechanism.PLAIN
|
|
47
|
+
mechanism: Optional[OutputAzureEventhubSASLMechanism] = (
|
|
48
|
+
OutputAzureEventhubSASLMechanism.PLAIN
|
|
49
|
+
)
|
|
54
50
|
|
|
55
51
|
|
|
56
52
|
class OutputAzureEventhubTLSSettingsClientSideTypedDict(TypedDict):
|
|
@@ -68,7 +64,7 @@ class OutputAzureEventhubTLSSettingsClientSide(BaseModel):
|
|
|
68
64
|
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another trusted CA (such as the system's)"""
|
|
69
65
|
|
|
70
66
|
|
|
71
|
-
class OutputAzureEventhubBackpressureBehavior(str, Enum
|
|
67
|
+
class OutputAzureEventhubBackpressureBehavior(str, Enum):
|
|
72
68
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
73
69
|
|
|
74
70
|
BLOCK = "block"
|
|
@@ -76,21 +72,21 @@ class OutputAzureEventhubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnu
|
|
|
76
72
|
QUEUE = "queue"
|
|
77
73
|
|
|
78
74
|
|
|
79
|
-
class OutputAzureEventhubCompression(str, Enum
|
|
75
|
+
class OutputAzureEventhubCompression(str, Enum):
|
|
80
76
|
r"""Codec to use to compress the persisted data"""
|
|
81
77
|
|
|
82
78
|
NONE = "none"
|
|
83
79
|
GZIP = "gzip"
|
|
84
80
|
|
|
85
81
|
|
|
86
|
-
class OutputAzureEventhubQueueFullBehavior(str, Enum
|
|
82
|
+
class OutputAzureEventhubQueueFullBehavior(str, Enum):
|
|
87
83
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
88
84
|
|
|
89
85
|
BLOCK = "block"
|
|
90
86
|
DROP = "drop"
|
|
91
87
|
|
|
92
88
|
|
|
93
|
-
class OutputAzureEventhubMode(str, Enum
|
|
89
|
+
class OutputAzureEventhubMode(str, Enum):
|
|
94
90
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
95
91
|
|
|
96
92
|
ERROR = "error"
|
|
@@ -179,9 +175,7 @@ class OutputAzureEventhub(BaseModel):
|
|
|
179
175
|
id: Optional[str] = None
|
|
180
176
|
r"""Unique ID for this output"""
|
|
181
177
|
|
|
182
|
-
type:
|
|
183
|
-
Optional[OutputAzureEventhubType], PlainValidator(validate_open_enum(False))
|
|
184
|
-
] = None
|
|
178
|
+
type: Optional[OutputAzureEventhubType] = None
|
|
185
179
|
|
|
186
180
|
pipeline: Optional[str] = None
|
|
187
181
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -197,18 +191,13 @@ class OutputAzureEventhub(BaseModel):
|
|
|
197
191
|
streamtags: Optional[List[str]] = None
|
|
198
192
|
r"""Tags for filtering and grouping in @{product}"""
|
|
199
193
|
|
|
200
|
-
ack:
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
] = OutputAzureEventhubAcknowledgments.ONE
|
|
194
|
+
ack: Optional[OutputAzureEventhubAcknowledgments] = (
|
|
195
|
+
OutputAzureEventhubAcknowledgments.ONE
|
|
196
|
+
)
|
|
204
197
|
r"""Control the number of required acknowledgments"""
|
|
205
198
|
|
|
206
199
|
format_: Annotated[
|
|
207
|
-
|
|
208
|
-
Optional[OutputAzureEventhubRecordDataFormat],
|
|
209
|
-
PlainValidator(validate_open_enum(False)),
|
|
210
|
-
],
|
|
211
|
-
pydantic.Field(alias="format"),
|
|
200
|
+
Optional[OutputAzureEventhubRecordDataFormat], pydantic.Field(alias="format")
|
|
212
201
|
] = OutputAzureEventhubRecordDataFormat.JSON
|
|
213
202
|
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
214
203
|
|
|
@@ -267,10 +256,7 @@ class OutputAzureEventhub(BaseModel):
|
|
|
267
256
|
tls: Optional[OutputAzureEventhubTLSSettingsClientSide] = None
|
|
268
257
|
|
|
269
258
|
on_backpressure: Annotated[
|
|
270
|
-
|
|
271
|
-
Optional[OutputAzureEventhubBackpressureBehavior],
|
|
272
|
-
PlainValidator(validate_open_enum(False)),
|
|
273
|
-
],
|
|
259
|
+
Optional[OutputAzureEventhubBackpressureBehavior],
|
|
274
260
|
pydantic.Field(alias="onBackpressure"),
|
|
275
261
|
] = OutputAzureEventhubBackpressureBehavior.BLOCK
|
|
276
262
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -291,28 +277,18 @@ class OutputAzureEventhub(BaseModel):
|
|
|
291
277
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
292
278
|
|
|
293
279
|
pq_compress: Annotated[
|
|
294
|
-
|
|
295
|
-
Optional[OutputAzureEventhubCompression],
|
|
296
|
-
PlainValidator(validate_open_enum(False)),
|
|
297
|
-
],
|
|
298
|
-
pydantic.Field(alias="pqCompress"),
|
|
280
|
+
Optional[OutputAzureEventhubCompression], pydantic.Field(alias="pqCompress")
|
|
299
281
|
] = OutputAzureEventhubCompression.NONE
|
|
300
282
|
r"""Codec to use to compress the persisted data"""
|
|
301
283
|
|
|
302
284
|
pq_on_backpressure: Annotated[
|
|
303
|
-
|
|
304
|
-
Optional[OutputAzureEventhubQueueFullBehavior],
|
|
305
|
-
PlainValidator(validate_open_enum(False)),
|
|
306
|
-
],
|
|
285
|
+
Optional[OutputAzureEventhubQueueFullBehavior],
|
|
307
286
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
308
287
|
] = OutputAzureEventhubQueueFullBehavior.BLOCK
|
|
309
288
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
310
289
|
|
|
311
290
|
pq_mode: Annotated[
|
|
312
|
-
|
|
313
|
-
Optional[OutputAzureEventhubMode], PlainValidator(validate_open_enum(False))
|
|
314
|
-
],
|
|
315
|
-
pydantic.Field(alias="pqMode"),
|
|
291
|
+
Optional[OutputAzureEventhubMode], pydantic.Field(alias="pqMode")
|
|
316
292
|
] = OutputAzureEventhubMode.ERROR
|
|
317
293
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
318
294
|
|
|
@@ -1,17 +1,14 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputAzureLogsType(str, Enum
|
|
11
|
+
class OutputAzureLogsType(str, Enum):
|
|
15
12
|
AZURE_LOGS = "azure_logs"
|
|
16
13
|
|
|
17
14
|
|
|
@@ -26,7 +23,7 @@ class OutputAzureLogsExtraHTTPHeader(BaseModel):
|
|
|
26
23
|
name: Optional[str] = None
|
|
27
24
|
|
|
28
25
|
|
|
29
|
-
class OutputAzureLogsFailedRequestLoggingMode(str, Enum
|
|
26
|
+
class OutputAzureLogsFailedRequestLoggingMode(str, Enum):
|
|
30
27
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
31
28
|
|
|
32
29
|
PAYLOAD = "payload"
|
|
@@ -88,7 +85,7 @@ class OutputAzureLogsTimeoutRetrySettings(BaseModel):
|
|
|
88
85
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
89
86
|
|
|
90
87
|
|
|
91
|
-
class OutputAzureLogsBackpressureBehavior(str, Enum
|
|
88
|
+
class OutputAzureLogsBackpressureBehavior(str, Enum):
|
|
92
89
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
93
90
|
|
|
94
91
|
BLOCK = "block"
|
|
@@ -96,28 +93,28 @@ class OutputAzureLogsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
96
93
|
QUEUE = "queue"
|
|
97
94
|
|
|
98
95
|
|
|
99
|
-
class OutputAzureLogsAuthenticationMethod(str, Enum
|
|
96
|
+
class OutputAzureLogsAuthenticationMethod(str, Enum):
|
|
100
97
|
r"""Enter workspace ID and workspace key directly, or select a stored secret"""
|
|
101
98
|
|
|
102
99
|
MANUAL = "manual"
|
|
103
100
|
SECRET = "secret"
|
|
104
101
|
|
|
105
102
|
|
|
106
|
-
class OutputAzureLogsCompression(str, Enum
|
|
103
|
+
class OutputAzureLogsCompression(str, Enum):
|
|
107
104
|
r"""Codec to use to compress the persisted data"""
|
|
108
105
|
|
|
109
106
|
NONE = "none"
|
|
110
107
|
GZIP = "gzip"
|
|
111
108
|
|
|
112
109
|
|
|
113
|
-
class OutputAzureLogsQueueFullBehavior(str, Enum
|
|
110
|
+
class OutputAzureLogsQueueFullBehavior(str, Enum):
|
|
114
111
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
115
112
|
|
|
116
113
|
BLOCK = "block"
|
|
117
114
|
DROP = "drop"
|
|
118
115
|
|
|
119
116
|
|
|
120
|
-
class OutputAzureLogsMode(str, Enum
|
|
117
|
+
class OutputAzureLogsMode(str, Enum):
|
|
121
118
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
122
119
|
|
|
123
120
|
ERROR = "error"
|
|
@@ -209,7 +206,7 @@ class OutputAzureLogsTypedDict(TypedDict):
|
|
|
209
206
|
|
|
210
207
|
|
|
211
208
|
class OutputAzureLogs(BaseModel):
|
|
212
|
-
type:
|
|
209
|
+
type: OutputAzureLogsType
|
|
213
210
|
|
|
214
211
|
id: Optional[str] = None
|
|
215
212
|
r"""Unique ID for this output"""
|
|
@@ -277,10 +274,7 @@ class OutputAzureLogs(BaseModel):
|
|
|
277
274
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
278
275
|
|
|
279
276
|
failed_request_logging_mode: Annotated[
|
|
280
|
-
|
|
281
|
-
Optional[OutputAzureLogsFailedRequestLoggingMode],
|
|
282
|
-
PlainValidator(validate_open_enum(False)),
|
|
283
|
-
],
|
|
277
|
+
Optional[OutputAzureLogsFailedRequestLoggingMode],
|
|
284
278
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
285
279
|
] = OutputAzureLogsFailedRequestLoggingMode.NONE
|
|
286
280
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -312,20 +306,13 @@ class OutputAzureLogs(BaseModel):
|
|
|
312
306
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
313
307
|
|
|
314
308
|
on_backpressure: Annotated[
|
|
315
|
-
|
|
316
|
-
Optional[OutputAzureLogsBackpressureBehavior],
|
|
317
|
-
PlainValidator(validate_open_enum(False)),
|
|
318
|
-
],
|
|
309
|
+
Optional[OutputAzureLogsBackpressureBehavior],
|
|
319
310
|
pydantic.Field(alias="onBackpressure"),
|
|
320
311
|
] = OutputAzureLogsBackpressureBehavior.BLOCK
|
|
321
312
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
322
313
|
|
|
323
314
|
auth_type: Annotated[
|
|
324
|
-
|
|
325
|
-
Optional[OutputAzureLogsAuthenticationMethod],
|
|
326
|
-
PlainValidator(validate_open_enum(False)),
|
|
327
|
-
],
|
|
328
|
-
pydantic.Field(alias="authType"),
|
|
315
|
+
Optional[OutputAzureLogsAuthenticationMethod], pydantic.Field(alias="authType")
|
|
329
316
|
] = OutputAzureLogsAuthenticationMethod.MANUAL
|
|
330
317
|
r"""Enter workspace ID and workspace key directly, or select a stored secret"""
|
|
331
318
|
|
|
@@ -345,28 +332,18 @@ class OutputAzureLogs(BaseModel):
|
|
|
345
332
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
346
333
|
|
|
347
334
|
pq_compress: Annotated[
|
|
348
|
-
|
|
349
|
-
Optional[OutputAzureLogsCompression],
|
|
350
|
-
PlainValidator(validate_open_enum(False)),
|
|
351
|
-
],
|
|
352
|
-
pydantic.Field(alias="pqCompress"),
|
|
335
|
+
Optional[OutputAzureLogsCompression], pydantic.Field(alias="pqCompress")
|
|
353
336
|
] = OutputAzureLogsCompression.NONE
|
|
354
337
|
r"""Codec to use to compress the persisted data"""
|
|
355
338
|
|
|
356
339
|
pq_on_backpressure: Annotated[
|
|
357
|
-
|
|
358
|
-
Optional[OutputAzureLogsQueueFullBehavior],
|
|
359
|
-
PlainValidator(validate_open_enum(False)),
|
|
360
|
-
],
|
|
340
|
+
Optional[OutputAzureLogsQueueFullBehavior],
|
|
361
341
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
362
342
|
] = OutputAzureLogsQueueFullBehavior.BLOCK
|
|
363
343
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
364
344
|
|
|
365
345
|
pq_mode: Annotated[
|
|
366
|
-
|
|
367
|
-
Optional[OutputAzureLogsMode], PlainValidator(validate_open_enum(False))
|
|
368
|
-
],
|
|
369
|
-
pydantic.Field(alias="pqMode"),
|
|
346
|
+
Optional[OutputAzureLogsMode], pydantic.Field(alias="pqMode")
|
|
370
347
|
] = OutputAzureLogsMode.ERROR
|
|
371
348
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
372
349
|
|
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputClickHouseType(str, Enum
|
|
11
|
+
class OutputClickHouseType(str, Enum):
|
|
15
12
|
CLICK_HOUSE = "click_house"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputClickHouseAuthenticationType(str, Enum
|
|
15
|
+
class OutputClickHouseAuthenticationType(str, Enum):
|
|
19
16
|
NONE = "none"
|
|
20
17
|
BASIC = "basic"
|
|
21
18
|
CREDENTIALS_SECRET = "credentialsSecret"
|
|
@@ -25,28 +22,28 @@ class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta
|
|
|
25
22
|
OAUTH = "oauth"
|
|
26
23
|
|
|
27
24
|
|
|
28
|
-
class OutputClickHouseFormat(str, Enum
|
|
25
|
+
class OutputClickHouseFormat(str, Enum):
|
|
29
26
|
r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
|
|
30
27
|
|
|
31
28
|
JSON_COMPACT_EACH_ROW_WITH_NAMES = "json-compact-each-row-with-names"
|
|
32
29
|
JSON_EACH_ROW = "json-each-row"
|
|
33
30
|
|
|
34
31
|
|
|
35
|
-
class MappingType(str, Enum
|
|
32
|
+
class MappingType(str, Enum):
|
|
36
33
|
r"""How event fields are mapped to ClickHouse columns."""
|
|
37
34
|
|
|
38
35
|
AUTOMATIC = "automatic"
|
|
39
36
|
CUSTOM = "custom"
|
|
40
37
|
|
|
41
38
|
|
|
42
|
-
class OutputClickHouseMinimumTLSVersion(str, Enum
|
|
39
|
+
class OutputClickHouseMinimumTLSVersion(str, Enum):
|
|
43
40
|
TL_SV1 = "TLSv1"
|
|
44
41
|
TL_SV1_1 = "TLSv1.1"
|
|
45
42
|
TL_SV1_2 = "TLSv1.2"
|
|
46
43
|
TL_SV1_3 = "TLSv1.3"
|
|
47
44
|
|
|
48
45
|
|
|
49
|
-
class OutputClickHouseMaximumTLSVersion(str, Enum
|
|
46
|
+
class OutputClickHouseMaximumTLSVersion(str, Enum):
|
|
50
47
|
TL_SV1 = "TLSv1"
|
|
51
48
|
TL_SV1_1 = "TLSv1.1"
|
|
52
49
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -95,19 +92,11 @@ class OutputClickHouseTLSSettingsClientSide(BaseModel):
|
|
|
95
92
|
r"""Passphrase to use to decrypt private key"""
|
|
96
93
|
|
|
97
94
|
min_version: Annotated[
|
|
98
|
-
|
|
99
|
-
Optional[OutputClickHouseMinimumTLSVersion],
|
|
100
|
-
PlainValidator(validate_open_enum(False)),
|
|
101
|
-
],
|
|
102
|
-
pydantic.Field(alias="minVersion"),
|
|
95
|
+
Optional[OutputClickHouseMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
103
96
|
] = None
|
|
104
97
|
|
|
105
98
|
max_version: Annotated[
|
|
106
|
-
|
|
107
|
-
Optional[OutputClickHouseMaximumTLSVersion],
|
|
108
|
-
PlainValidator(validate_open_enum(False)),
|
|
109
|
-
],
|
|
110
|
-
pydantic.Field(alias="maxVersion"),
|
|
99
|
+
Optional[OutputClickHouseMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
111
100
|
] = None
|
|
112
101
|
|
|
113
102
|
|
|
@@ -122,7 +111,7 @@ class OutputClickHouseExtraHTTPHeader(BaseModel):
|
|
|
122
111
|
name: Optional[str] = None
|
|
123
112
|
|
|
124
113
|
|
|
125
|
-
class OutputClickHouseFailedRequestLoggingMode(str, Enum
|
|
114
|
+
class OutputClickHouseFailedRequestLoggingMode(str, Enum):
|
|
126
115
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
127
116
|
|
|
128
117
|
PAYLOAD = "payload"
|
|
@@ -184,7 +173,7 @@ class OutputClickHouseTimeoutRetrySettings(BaseModel):
|
|
|
184
173
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
185
174
|
|
|
186
175
|
|
|
187
|
-
class OutputClickHouseBackpressureBehavior(str, Enum
|
|
176
|
+
class OutputClickHouseBackpressureBehavior(str, Enum):
|
|
188
177
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
189
178
|
|
|
190
179
|
BLOCK = "block"
|
|
@@ -244,21 +233,21 @@ class ColumnMapping(BaseModel):
|
|
|
244
233
|
r"""Type of the column in the ClickHouse database"""
|
|
245
234
|
|
|
246
235
|
|
|
247
|
-
class OutputClickHouseCompression(str, Enum
|
|
236
|
+
class OutputClickHouseCompression(str, Enum):
|
|
248
237
|
r"""Codec to use to compress the persisted data"""
|
|
249
238
|
|
|
250
239
|
NONE = "none"
|
|
251
240
|
GZIP = "gzip"
|
|
252
241
|
|
|
253
242
|
|
|
254
|
-
class OutputClickHouseQueueFullBehavior(str, Enum
|
|
243
|
+
class OutputClickHouseQueueFullBehavior(str, Enum):
|
|
255
244
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
256
245
|
|
|
257
246
|
BLOCK = "block"
|
|
258
247
|
DROP = "drop"
|
|
259
248
|
|
|
260
249
|
|
|
261
|
-
class OutputClickHouseMode(str, Enum
|
|
250
|
+
class OutputClickHouseMode(str, Enum):
|
|
262
251
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
263
252
|
|
|
264
253
|
ERROR = "error"
|
|
@@ -396,9 +385,7 @@ class OutputClickHouse(BaseModel):
|
|
|
396
385
|
id: Optional[str] = None
|
|
397
386
|
r"""Unique ID for this output"""
|
|
398
387
|
|
|
399
|
-
type:
|
|
400
|
-
Optional[OutputClickHouseType], PlainValidator(validate_open_enum(False))
|
|
401
|
-
] = None
|
|
388
|
+
type: Optional[OutputClickHouseType] = None
|
|
402
389
|
|
|
403
390
|
pipeline: Optional[str] = None
|
|
404
391
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -415,24 +402,16 @@ class OutputClickHouse(BaseModel):
|
|
|
415
402
|
r"""Tags for filtering and grouping in @{product}"""
|
|
416
403
|
|
|
417
404
|
auth_type: Annotated[
|
|
418
|
-
|
|
419
|
-
Optional[OutputClickHouseAuthenticationType],
|
|
420
|
-
PlainValidator(validate_open_enum(False)),
|
|
421
|
-
],
|
|
422
|
-
pydantic.Field(alias="authType"),
|
|
405
|
+
Optional[OutputClickHouseAuthenticationType], pydantic.Field(alias="authType")
|
|
423
406
|
] = OutputClickHouseAuthenticationType.NONE
|
|
424
407
|
|
|
425
408
|
format_: Annotated[
|
|
426
|
-
|
|
427
|
-
Optional[OutputClickHouseFormat], PlainValidator(validate_open_enum(False))
|
|
428
|
-
],
|
|
429
|
-
pydantic.Field(alias="format"),
|
|
409
|
+
Optional[OutputClickHouseFormat], pydantic.Field(alias="format")
|
|
430
410
|
] = OutputClickHouseFormat.JSON_COMPACT_EACH_ROW_WITH_NAMES
|
|
431
411
|
r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
|
|
432
412
|
|
|
433
413
|
mapping_type: Annotated[
|
|
434
|
-
|
|
435
|
-
pydantic.Field(alias="mappingType"),
|
|
414
|
+
Optional[MappingType], pydantic.Field(alias="mappingType")
|
|
436
415
|
] = MappingType.AUTOMATIC
|
|
437
416
|
r"""How event fields are mapped to ClickHouse columns."""
|
|
438
417
|
|
|
@@ -487,10 +466,7 @@ class OutputClickHouse(BaseModel):
|
|
|
487
466
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
488
467
|
|
|
489
468
|
failed_request_logging_mode: Annotated[
|
|
490
|
-
|
|
491
|
-
Optional[OutputClickHouseFailedRequestLoggingMode],
|
|
492
|
-
PlainValidator(validate_open_enum(False)),
|
|
493
|
-
],
|
|
469
|
+
Optional[OutputClickHouseFailedRequestLoggingMode],
|
|
494
470
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
495
471
|
] = OutputClickHouseFailedRequestLoggingMode.NONE
|
|
496
472
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -522,10 +498,7 @@ class OutputClickHouse(BaseModel):
|
|
|
522
498
|
r"""Log the most recent event that fails to match the table schema"""
|
|
523
499
|
|
|
524
500
|
on_backpressure: Annotated[
|
|
525
|
-
|
|
526
|
-
Optional[OutputClickHouseBackpressureBehavior],
|
|
527
|
-
PlainValidator(validate_open_enum(False)),
|
|
528
|
-
],
|
|
501
|
+
Optional[OutputClickHouseBackpressureBehavior],
|
|
529
502
|
pydantic.Field(alias="onBackpressure"),
|
|
530
503
|
] = OutputClickHouseBackpressureBehavior.BLOCK
|
|
531
504
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -620,28 +593,18 @@ class OutputClickHouse(BaseModel):
|
|
|
620
593
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
621
594
|
|
|
622
595
|
pq_compress: Annotated[
|
|
623
|
-
|
|
624
|
-
Optional[OutputClickHouseCompression],
|
|
625
|
-
PlainValidator(validate_open_enum(False)),
|
|
626
|
-
],
|
|
627
|
-
pydantic.Field(alias="pqCompress"),
|
|
596
|
+
Optional[OutputClickHouseCompression], pydantic.Field(alias="pqCompress")
|
|
628
597
|
] = OutputClickHouseCompression.NONE
|
|
629
598
|
r"""Codec to use to compress the persisted data"""
|
|
630
599
|
|
|
631
600
|
pq_on_backpressure: Annotated[
|
|
632
|
-
|
|
633
|
-
Optional[OutputClickHouseQueueFullBehavior],
|
|
634
|
-
PlainValidator(validate_open_enum(False)),
|
|
635
|
-
],
|
|
601
|
+
Optional[OutputClickHouseQueueFullBehavior],
|
|
636
602
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
637
603
|
] = OutputClickHouseQueueFullBehavior.BLOCK
|
|
638
604
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
639
605
|
|
|
640
606
|
pq_mode: Annotated[
|
|
641
|
-
|
|
642
|
-
Optional[OutputClickHouseMode], PlainValidator(validate_open_enum(False))
|
|
643
|
-
],
|
|
644
|
-
pydantic.Field(alias="pqMode"),
|
|
607
|
+
Optional[OutputClickHouseMode], pydantic.Field(alias="pqMode")
|
|
645
608
|
] = OutputClickHouseMode.ERROR
|
|
646
609
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
647
610
|
|
|
@@ -1,21 +1,18 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
13
10
|
|
|
14
|
-
class OutputCloudwatchType(str, Enum
|
|
11
|
+
class OutputCloudwatchType(str, Enum):
|
|
15
12
|
CLOUDWATCH = "cloudwatch"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputCloudwatchAuthenticationMethod(str, Enum
|
|
15
|
+
class OutputCloudwatchAuthenticationMethod(str, Enum):
|
|
19
16
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
20
17
|
|
|
21
18
|
AUTO = "auto"
|
|
@@ -23,7 +20,7 @@ class OutputCloudwatchAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMe
|
|
|
23
20
|
SECRET = "secret"
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputCloudwatchBackpressureBehavior(str, Enum
|
|
23
|
+
class OutputCloudwatchBackpressureBehavior(str, Enum):
|
|
27
24
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
28
25
|
|
|
29
26
|
BLOCK = "block"
|
|
@@ -31,21 +28,21 @@ class OutputCloudwatchBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMe
|
|
|
31
28
|
QUEUE = "queue"
|
|
32
29
|
|
|
33
30
|
|
|
34
|
-
class OutputCloudwatchCompression(str, Enum
|
|
31
|
+
class OutputCloudwatchCompression(str, Enum):
|
|
35
32
|
r"""Codec to use to compress the persisted data"""
|
|
36
33
|
|
|
37
34
|
NONE = "none"
|
|
38
35
|
GZIP = "gzip"
|
|
39
36
|
|
|
40
37
|
|
|
41
|
-
class OutputCloudwatchQueueFullBehavior(str, Enum
|
|
38
|
+
class OutputCloudwatchQueueFullBehavior(str, Enum):
|
|
42
39
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
43
40
|
|
|
44
41
|
BLOCK = "block"
|
|
45
42
|
DROP = "drop"
|
|
46
43
|
|
|
47
44
|
|
|
48
|
-
class OutputCloudwatchMode(str, Enum
|
|
45
|
+
class OutputCloudwatchMode(str, Enum):
|
|
49
46
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
50
47
|
|
|
51
48
|
ERROR = "error"
|
|
@@ -136,9 +133,7 @@ class OutputCloudwatch(BaseModel):
|
|
|
136
133
|
id: Optional[str] = None
|
|
137
134
|
r"""Unique ID for this output"""
|
|
138
135
|
|
|
139
|
-
type:
|
|
140
|
-
Optional[OutputCloudwatchType], PlainValidator(validate_open_enum(False))
|
|
141
|
-
] = None
|
|
136
|
+
type: Optional[OutputCloudwatchType] = None
|
|
142
137
|
|
|
143
138
|
pipeline: Optional[str] = None
|
|
144
139
|
r"""Pipeline to process data before sending out to this output"""
|
|
@@ -155,10 +150,7 @@ class OutputCloudwatch(BaseModel):
|
|
|
155
150
|
r"""Tags for filtering and grouping in @{product}"""
|
|
156
151
|
|
|
157
152
|
aws_authentication_method: Annotated[
|
|
158
|
-
|
|
159
|
-
Optional[OutputCloudwatchAuthenticationMethod],
|
|
160
|
-
PlainValidator(validate_open_enum(False)),
|
|
161
|
-
],
|
|
153
|
+
Optional[OutputCloudwatchAuthenticationMethod],
|
|
162
154
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
163
155
|
] = OutputCloudwatchAuthenticationMethod.AUTO
|
|
164
156
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -214,10 +206,7 @@ class OutputCloudwatch(BaseModel):
|
|
|
214
206
|
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
|
|
215
207
|
|
|
216
208
|
on_backpressure: Annotated[
|
|
217
|
-
|
|
218
|
-
Optional[OutputCloudwatchBackpressureBehavior],
|
|
219
|
-
PlainValidator(validate_open_enum(False)),
|
|
220
|
-
],
|
|
209
|
+
Optional[OutputCloudwatchBackpressureBehavior],
|
|
221
210
|
pydantic.Field(alias="onBackpressure"),
|
|
222
211
|
] = OutputCloudwatchBackpressureBehavior.BLOCK
|
|
223
212
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -243,28 +232,18 @@ class OutputCloudwatch(BaseModel):
|
|
|
243
232
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
244
233
|
|
|
245
234
|
pq_compress: Annotated[
|
|
246
|
-
|
|
247
|
-
Optional[OutputCloudwatchCompression],
|
|
248
|
-
PlainValidator(validate_open_enum(False)),
|
|
249
|
-
],
|
|
250
|
-
pydantic.Field(alias="pqCompress"),
|
|
235
|
+
Optional[OutputCloudwatchCompression], pydantic.Field(alias="pqCompress")
|
|
251
236
|
] = OutputCloudwatchCompression.NONE
|
|
252
237
|
r"""Codec to use to compress the persisted data"""
|
|
253
238
|
|
|
254
239
|
pq_on_backpressure: Annotated[
|
|
255
|
-
|
|
256
|
-
Optional[OutputCloudwatchQueueFullBehavior],
|
|
257
|
-
PlainValidator(validate_open_enum(False)),
|
|
258
|
-
],
|
|
240
|
+
Optional[OutputCloudwatchQueueFullBehavior],
|
|
259
241
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
260
242
|
] = OutputCloudwatchQueueFullBehavior.BLOCK
|
|
261
243
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
262
244
|
|
|
263
245
|
pq_mode: Annotated[
|
|
264
|
-
|
|
265
|
-
Optional[OutputCloudwatchMode], PlainValidator(validate_open_enum(False))
|
|
266
|
-
],
|
|
267
|
-
pydantic.Field(alias="pqMode"),
|
|
246
|
+
Optional[OutputCloudwatchMode], pydantic.Field(alias="pqMode")
|
|
268
247
|
] = OutputCloudwatchMode.ERROR
|
|
269
248
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
270
249
|
|