cribl-control-plane 0.0.44a2__py3-none-any.whl → 0.0.45__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/models/__init__.py +3 -3
- cribl_control_plane/models/appmode.py +1 -2
- cribl_control_plane/models/cacheconnection.py +2 -10
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
- cribl_control_plane/models/cloudprovider.py +1 -2
- cribl_control_plane/models/configgroup.py +2 -7
- cribl_control_plane/models/configgroupcloud.py +2 -6
- cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
- cribl_control_plane/models/cribllakedataset.py +2 -8
- cribl_control_plane/models/datasetmetadata.py +2 -8
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
- cribl_control_plane/models/getsummaryop.py +2 -7
- cribl_control_plane/models/hbcriblinfo.py +6 -6
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/inputappscope.py +14 -34
- cribl_control_plane/models/inputazureblob.py +6 -17
- cribl_control_plane/models/inputcollection.py +4 -11
- cribl_control_plane/models/inputconfluentcloud.py +20 -47
- cribl_control_plane/models/inputcribl.py +4 -11
- cribl_control_plane/models/inputcriblhttp.py +8 -23
- cribl_control_plane/models/inputcribllakehttp.py +10 -22
- cribl_control_plane/models/inputcriblmetrics.py +4 -12
- cribl_control_plane/models/inputcribltcp.py +8 -23
- cribl_control_plane/models/inputcrowdstrike.py +10 -26
- cribl_control_plane/models/inputdatadogagent.py +8 -24
- cribl_control_plane/models/inputdatagen.py +4 -11
- cribl_control_plane/models/inputedgeprometheus.py +24 -58
- cribl_control_plane/models/inputelastic.py +14 -40
- cribl_control_plane/models/inputeventhub.py +6 -15
- cribl_control_plane/models/inputexec.py +6 -14
- cribl_control_plane/models/inputfile.py +6 -15
- cribl_control_plane/models/inputfirehose.py +8 -23
- cribl_control_plane/models/inputgooglepubsub.py +6 -19
- cribl_control_plane/models/inputgrafana.py +24 -67
- cribl_control_plane/models/inputhttp.py +8 -23
- cribl_control_plane/models/inputhttpraw.py +8 -23
- cribl_control_plane/models/inputjournalfiles.py +4 -12
- cribl_control_plane/models/inputkafka.py +16 -46
- cribl_control_plane/models/inputkinesis.py +14 -38
- cribl_control_plane/models/inputkubeevents.py +4 -11
- cribl_control_plane/models/inputkubelogs.py +8 -16
- cribl_control_plane/models/inputkubemetrics.py +8 -16
- cribl_control_plane/models/inputloki.py +10 -29
- cribl_control_plane/models/inputmetrics.py +8 -23
- cribl_control_plane/models/inputmodeldriventelemetry.py +10 -27
- cribl_control_plane/models/inputmsk.py +18 -53
- cribl_control_plane/models/inputnetflow.py +4 -11
- cribl_control_plane/models/inputoffice365mgmt.py +14 -33
- cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
- cribl_control_plane/models/inputoffice365service.py +16 -35
- cribl_control_plane/models/inputopentelemetry.py +16 -38
- cribl_control_plane/models/inputprometheus.py +18 -50
- cribl_control_plane/models/inputprometheusrw.py +10 -30
- cribl_control_plane/models/inputrawudp.py +4 -11
- cribl_control_plane/models/inputs3.py +8 -21
- cribl_control_plane/models/inputs3inventory.py +10 -26
- cribl_control_plane/models/inputsecuritylake.py +10 -27
- cribl_control_plane/models/inputsnmp.py +6 -16
- cribl_control_plane/models/inputsplunk.py +12 -33
- cribl_control_plane/models/inputsplunkhec.py +10 -29
- cribl_control_plane/models/inputsplunksearch.py +14 -33
- cribl_control_plane/models/inputsqs.py +10 -27
- cribl_control_plane/models/inputsyslog.py +16 -43
- cribl_control_plane/models/inputsystemmetrics.py +24 -48
- cribl_control_plane/models/inputsystemstate.py +8 -16
- cribl_control_plane/models/inputtcp.py +10 -29
- cribl_control_plane/models/inputtcpjson.py +10 -29
- cribl_control_plane/models/inputwef.py +14 -37
- cribl_control_plane/models/inputwindowsmetrics.py +24 -44
- cribl_control_plane/models/inputwineventlogs.py +10 -20
- cribl_control_plane/models/inputwiz.py +8 -21
- cribl_control_plane/models/inputwizwebhook.py +8 -23
- cribl_control_plane/models/inputzscalerhec.py +10 -29
- cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
- cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
- cribl_control_plane/models/masterworkerentry.py +2 -7
- cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
- cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeupgradestate.py +1 -2
- cribl_control_plane/models/nodeupgradestatus.py +5 -13
- cribl_control_plane/models/outputazureblob.py +18 -48
- cribl_control_plane/models/outputazuredataexplorer.py +28 -73
- cribl_control_plane/models/outputazureeventhub.py +18 -40
- cribl_control_plane/models/outputazurelogs.py +12 -35
- cribl_control_plane/models/outputclickhouse.py +20 -55
- cribl_control_plane/models/outputcloudwatch.py +10 -29
- cribl_control_plane/models/outputconfluentcloud.py +32 -77
- cribl_control_plane/models/outputcriblhttp.py +16 -44
- cribl_control_plane/models/outputcribllake.py +16 -46
- cribl_control_plane/models/outputcribltcp.py +18 -45
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
- cribl_control_plane/models/outputdatadog.py +20 -48
- cribl_control_plane/models/outputdataset.py +18 -46
- cribl_control_plane/models/outputdiskspool.py +2 -7
- cribl_control_plane/models/outputdls3.py +24 -68
- cribl_control_plane/models/outputdynatracehttp.py +20 -53
- cribl_control_plane/models/outputdynatraceotlp.py +22 -55
- cribl_control_plane/models/outputelastic.py +18 -43
- cribl_control_plane/models/outputelasticcloud.py +12 -36
- cribl_control_plane/models/outputexabeam.py +10 -29
- cribl_control_plane/models/outputfilesystem.py +14 -39
- cribl_control_plane/models/outputgooglechronicle.py +16 -50
- cribl_control_plane/models/outputgooglecloudlogging.py +14 -41
- cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
- cribl_control_plane/models/outputgooglepubsub.py +10 -31
- cribl_control_plane/models/outputgrafanacloud.py +32 -97
- cribl_control_plane/models/outputgraphite.py +14 -31
- cribl_control_plane/models/outputhoneycomb.py +12 -35
- cribl_control_plane/models/outputhumiohec.py +16 -43
- cribl_control_plane/models/outputinfluxdb.py +16 -42
- cribl_control_plane/models/outputkafka.py +28 -74
- cribl_control_plane/models/outputkinesis.py +16 -40
- cribl_control_plane/models/outputloki.py +16 -41
- cribl_control_plane/models/outputminio.py +24 -65
- cribl_control_plane/models/outputmsk.py +30 -82
- cribl_control_plane/models/outputnewrelic.py +18 -43
- cribl_control_plane/models/outputnewrelicevents.py +14 -41
- cribl_control_plane/models/outputopentelemetry.py +26 -67
- cribl_control_plane/models/outputprometheus.py +12 -35
- cribl_control_plane/models/outputring.py +8 -19
- cribl_control_plane/models/outputs3.py +26 -68
- cribl_control_plane/models/outputsecuritylake.py +18 -52
- cribl_control_plane/models/outputsentinel.py +18 -45
- cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
- cribl_control_plane/models/outputservicenow.py +24 -60
- cribl_control_plane/models/outputsignalfx.py +14 -37
- cribl_control_plane/models/outputsns.py +14 -36
- cribl_control_plane/models/outputsplunk.py +24 -60
- cribl_control_plane/models/outputsplunkhec.py +12 -35
- cribl_control_plane/models/outputsplunklb.py +30 -77
- cribl_control_plane/models/outputsqs.py +16 -41
- cribl_control_plane/models/outputstatsd.py +14 -30
- cribl_control_plane/models/outputstatsdext.py +12 -29
- cribl_control_plane/models/outputsumologic.py +12 -35
- cribl_control_plane/models/outputsyslog.py +24 -58
- cribl_control_plane/models/outputtcpjson.py +20 -52
- cribl_control_plane/models/outputwavefront.py +12 -35
- cribl_control_plane/models/outputwebhook.py +22 -58
- cribl_control_plane/models/outputxsiam.py +14 -35
- cribl_control_plane/models/productscore.py +1 -2
- cribl_control_plane/models/rbacresource.py +1 -2
- cribl_control_plane/models/resourcepolicy.py +2 -4
- cribl_control_plane/models/runnablejobcollection.py +13 -30
- cribl_control_plane/models/runnablejobexecutor.py +4 -13
- cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
- cribl_control_plane/models/workertypes.py +1 -2
- {cribl_control_plane-0.0.44a2.dist-info → cribl_control_plane-0.0.45.dist-info}/METADATA +1 -1
- {cribl_control_plane-0.0.44a2.dist-info → cribl_control_plane-0.0.45.dist-info}/RECORD +158 -158
- {cribl_control_plane-0.0.44a2.dist-info → cribl_control_plane-0.0.45.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,7 +12,7 @@ class OutputSentinelOneAiSiemType(str, Enum):
|
|
|
15
12
|
SENTINEL_ONE_AI_SIEM = "sentinel_one_ai_siem"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSentinelOneAiSiemRegion(str, Enum
|
|
15
|
+
class OutputSentinelOneAiSiemRegion(str, Enum):
|
|
19
16
|
r"""The SentinelOne region to send events to. In most cases you can find the region by either looking at your SentinelOne URL or knowing what geographic region your SentinelOne instance is contained in."""
|
|
20
17
|
|
|
21
18
|
US = "US"
|
|
@@ -27,7 +24,7 @@ class OutputSentinelOneAiSiemRegion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
27
24
|
CUSTOM = "Custom"
|
|
28
25
|
|
|
29
26
|
|
|
30
|
-
class AISIEMEndpointPath(str, Enum
|
|
27
|
+
class AISIEMEndpointPath(str, Enum):
|
|
31
28
|
r"""Endpoint to send events to. Use /services/collector/event for structured JSON payloads with standard HEC top-level fields. Use /services/collector/raw for unstructured log lines (plain text)."""
|
|
32
29
|
|
|
33
30
|
ROOT_SERVICES_COLLECTOR_EVENT = "/services/collector/event"
|
|
@@ -45,9 +42,7 @@ class OutputSentinelOneAiSiemExtraHTTPHeader(BaseModel):
|
|
|
45
42
|
name: Optional[str] = None
|
|
46
43
|
|
|
47
44
|
|
|
48
|
-
class OutputSentinelOneAiSiemFailedRequestLoggingMode(
|
|
49
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
50
|
-
):
|
|
45
|
+
class OutputSentinelOneAiSiemFailedRequestLoggingMode(str, Enum):
|
|
51
46
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
52
47
|
|
|
53
48
|
PAYLOAD = "payload"
|
|
@@ -55,9 +50,7 @@ class OutputSentinelOneAiSiemFailedRequestLoggingMode(
|
|
|
55
50
|
NONE = "none"
|
|
56
51
|
|
|
57
52
|
|
|
58
|
-
class OutputSentinelOneAiSiemAuthenticationMethod(
|
|
59
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
60
|
-
):
|
|
53
|
+
class OutputSentinelOneAiSiemAuthenticationMethod(str, Enum):
|
|
61
54
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
62
55
|
|
|
63
56
|
MANUAL = "manual"
|
|
@@ -118,9 +111,7 @@ class OutputSentinelOneAiSiemTimeoutRetrySettings(BaseModel):
|
|
|
118
111
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
119
112
|
|
|
120
113
|
|
|
121
|
-
class OutputSentinelOneAiSiemBackpressureBehavior(
|
|
122
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
123
|
-
):
|
|
114
|
+
class OutputSentinelOneAiSiemBackpressureBehavior(str, Enum):
|
|
124
115
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
125
116
|
|
|
126
117
|
BLOCK = "block"
|
|
@@ -128,21 +119,21 @@ class OutputSentinelOneAiSiemBackpressureBehavior(
|
|
|
128
119
|
QUEUE = "queue"
|
|
129
120
|
|
|
130
121
|
|
|
131
|
-
class OutputSentinelOneAiSiemCompression(str, Enum
|
|
122
|
+
class OutputSentinelOneAiSiemCompression(str, Enum):
|
|
132
123
|
r"""Codec to use to compress the persisted data"""
|
|
133
124
|
|
|
134
125
|
NONE = "none"
|
|
135
126
|
GZIP = "gzip"
|
|
136
127
|
|
|
137
128
|
|
|
138
|
-
class OutputSentinelOneAiSiemQueueFullBehavior(str, Enum
|
|
129
|
+
class OutputSentinelOneAiSiemQueueFullBehavior(str, Enum):
|
|
139
130
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
140
131
|
|
|
141
132
|
BLOCK = "block"
|
|
142
133
|
DROP = "drop"
|
|
143
134
|
|
|
144
135
|
|
|
145
|
-
class OutputSentinelOneAiSiemMode(str, Enum
|
|
136
|
+
class OutputSentinelOneAiSiemMode(str, Enum):
|
|
146
137
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
147
138
|
|
|
148
139
|
ERROR = "error"
|
|
@@ -284,15 +275,12 @@ class OutputSentinelOneAiSiem(BaseModel):
|
|
|
284
275
|
streamtags: Optional[List[str]] = None
|
|
285
276
|
r"""Tags for filtering and grouping in @{product}"""
|
|
286
277
|
|
|
287
|
-
region:
|
|
288
|
-
Optional[OutputSentinelOneAiSiemRegion],
|
|
289
|
-
PlainValidator(validate_open_enum(False)),
|
|
290
|
-
] = OutputSentinelOneAiSiemRegion.US
|
|
278
|
+
region: Optional[OutputSentinelOneAiSiemRegion] = OutputSentinelOneAiSiemRegion.US
|
|
291
279
|
r"""The SentinelOne region to send events to. In most cases you can find the region by either looking at your SentinelOne URL or knowing what geographic region your SentinelOne instance is contained in."""
|
|
292
280
|
|
|
293
|
-
endpoint:
|
|
294
|
-
|
|
295
|
-
|
|
281
|
+
endpoint: Optional[AISIEMEndpointPath] = (
|
|
282
|
+
AISIEMEndpointPath.ROOT_SERVICES_COLLECTOR_EVENT
|
|
283
|
+
)
|
|
296
284
|
r"""Endpoint to send events to. Use /services/collector/event for structured JSON payloads with standard HEC top-level fields. Use /services/collector/raw for unstructured log lines (plain text)."""
|
|
297
285
|
|
|
298
286
|
concurrency: Optional[float] = 5
|
|
@@ -334,10 +322,7 @@ class OutputSentinelOneAiSiem(BaseModel):
|
|
|
334
322
|
r"""Headers to add to all events"""
|
|
335
323
|
|
|
336
324
|
failed_request_logging_mode: Annotated[
|
|
337
|
-
|
|
338
|
-
Optional[OutputSentinelOneAiSiemFailedRequestLoggingMode],
|
|
339
|
-
PlainValidator(validate_open_enum(False)),
|
|
340
|
-
],
|
|
325
|
+
Optional[OutputSentinelOneAiSiemFailedRequestLoggingMode],
|
|
341
326
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
342
327
|
] = OutputSentinelOneAiSiemFailedRequestLoggingMode.NONE
|
|
343
328
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -348,10 +333,7 @@ class OutputSentinelOneAiSiem(BaseModel):
|
|
|
348
333
|
r"""List of headers that are safe to log in plain text"""
|
|
349
334
|
|
|
350
335
|
auth_type: Annotated[
|
|
351
|
-
|
|
352
|
-
Optional[OutputSentinelOneAiSiemAuthenticationMethod],
|
|
353
|
-
PlainValidator(validate_open_enum(False)),
|
|
354
|
-
],
|
|
336
|
+
Optional[OutputSentinelOneAiSiemAuthenticationMethod],
|
|
355
337
|
pydantic.Field(alias="authType"),
|
|
356
338
|
] = OutputSentinelOneAiSiemAuthenticationMethod.MANUAL
|
|
357
339
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
@@ -373,10 +355,7 @@ class OutputSentinelOneAiSiem(BaseModel):
|
|
|
373
355
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
374
356
|
|
|
375
357
|
on_backpressure: Annotated[
|
|
376
|
-
|
|
377
|
-
Optional[OutputSentinelOneAiSiemBackpressureBehavior],
|
|
378
|
-
PlainValidator(validate_open_enum(False)),
|
|
379
|
-
],
|
|
358
|
+
Optional[OutputSentinelOneAiSiemBackpressureBehavior],
|
|
380
359
|
pydantic.Field(alias="onBackpressure"),
|
|
381
360
|
] = OutputSentinelOneAiSiemBackpressureBehavior.BLOCK
|
|
382
361
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -472,29 +451,18 @@ class OutputSentinelOneAiSiem(BaseModel):
|
|
|
472
451
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
473
452
|
|
|
474
453
|
pq_compress: Annotated[
|
|
475
|
-
|
|
476
|
-
Optional[OutputSentinelOneAiSiemCompression],
|
|
477
|
-
PlainValidator(validate_open_enum(False)),
|
|
478
|
-
],
|
|
479
|
-
pydantic.Field(alias="pqCompress"),
|
|
454
|
+
Optional[OutputSentinelOneAiSiemCompression], pydantic.Field(alias="pqCompress")
|
|
480
455
|
] = OutputSentinelOneAiSiemCompression.NONE
|
|
481
456
|
r"""Codec to use to compress the persisted data"""
|
|
482
457
|
|
|
483
458
|
pq_on_backpressure: Annotated[
|
|
484
|
-
|
|
485
|
-
Optional[OutputSentinelOneAiSiemQueueFullBehavior],
|
|
486
|
-
PlainValidator(validate_open_enum(False)),
|
|
487
|
-
],
|
|
459
|
+
Optional[OutputSentinelOneAiSiemQueueFullBehavior],
|
|
488
460
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
489
461
|
] = OutputSentinelOneAiSiemQueueFullBehavior.BLOCK
|
|
490
462
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
491
463
|
|
|
492
464
|
pq_mode: Annotated[
|
|
493
|
-
|
|
494
|
-
Optional[OutputSentinelOneAiSiemMode],
|
|
495
|
-
PlainValidator(validate_open_enum(False)),
|
|
496
|
-
],
|
|
497
|
-
pydantic.Field(alias="pqMode"),
|
|
465
|
+
Optional[OutputSentinelOneAiSiemMode], pydantic.Field(alias="pqMode")
|
|
498
466
|
] = OutputSentinelOneAiSiemMode.ERROR
|
|
499
467
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
500
468
|
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,20 +12,20 @@ class OutputServiceNowType(str, Enum):
|
|
|
15
12
|
SERVICE_NOW = "service_now"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputServiceNowOTLPVersion(str, Enum
|
|
15
|
+
class OutputServiceNowOTLPVersion(str, Enum):
|
|
19
16
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
20
17
|
|
|
21
18
|
ONE_DOT_3_DOT_1 = "1.3.1"
|
|
22
19
|
|
|
23
20
|
|
|
24
|
-
class OutputServiceNowProtocol(str, Enum
|
|
21
|
+
class OutputServiceNowProtocol(str, Enum):
|
|
25
22
|
r"""Select a transport option for OpenTelemetry"""
|
|
26
23
|
|
|
27
24
|
GRPC = "grpc"
|
|
28
25
|
HTTP = "http"
|
|
29
26
|
|
|
30
27
|
|
|
31
|
-
class OutputServiceNowCompressCompression(str, Enum
|
|
28
|
+
class OutputServiceNowCompressCompression(str, Enum):
|
|
32
29
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
33
30
|
|
|
34
31
|
NONE = "none"
|
|
@@ -36,7 +33,7 @@ class OutputServiceNowCompressCompression(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
36
33
|
GZIP = "gzip"
|
|
37
34
|
|
|
38
35
|
|
|
39
|
-
class OutputServiceNowHTTPCompressCompression(str, Enum
|
|
36
|
+
class OutputServiceNowHTTPCompressCompression(str, Enum):
|
|
40
37
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
41
38
|
|
|
42
39
|
NONE = "none"
|
|
@@ -54,7 +51,7 @@ class OutputServiceNowMetadatum(BaseModel):
|
|
|
54
51
|
key: Optional[str] = ""
|
|
55
52
|
|
|
56
53
|
|
|
57
|
-
class OutputServiceNowFailedRequestLoggingMode(str, Enum
|
|
54
|
+
class OutputServiceNowFailedRequestLoggingMode(str, Enum):
|
|
58
55
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
59
56
|
|
|
60
57
|
PAYLOAD = "payload"
|
|
@@ -62,7 +59,7 @@ class OutputServiceNowFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEn
|
|
|
62
59
|
NONE = "none"
|
|
63
60
|
|
|
64
61
|
|
|
65
|
-
class OutputServiceNowBackpressureBehavior(str, Enum
|
|
62
|
+
class OutputServiceNowBackpressureBehavior(str, Enum):
|
|
66
63
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
67
64
|
|
|
68
65
|
BLOCK = "block"
|
|
@@ -135,14 +132,14 @@ class OutputServiceNowTimeoutRetrySettings(BaseModel):
|
|
|
135
132
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
136
133
|
|
|
137
134
|
|
|
138
|
-
class OutputServiceNowMinimumTLSVersion(str, Enum
|
|
135
|
+
class OutputServiceNowMinimumTLSVersion(str, Enum):
|
|
139
136
|
TL_SV1 = "TLSv1"
|
|
140
137
|
TL_SV1_1 = "TLSv1.1"
|
|
141
138
|
TL_SV1_2 = "TLSv1.2"
|
|
142
139
|
TL_SV1_3 = "TLSv1.3"
|
|
143
140
|
|
|
144
141
|
|
|
145
|
-
class OutputServiceNowMaximumTLSVersion(str, Enum
|
|
142
|
+
class OutputServiceNowMaximumTLSVersion(str, Enum):
|
|
146
143
|
TL_SV1 = "TLSv1"
|
|
147
144
|
TL_SV1_1 = "TLSv1.1"
|
|
148
145
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -197,37 +194,29 @@ class OutputServiceNowTLSSettingsClientSide(BaseModel):
|
|
|
197
194
|
r"""Passphrase to use to decrypt private key"""
|
|
198
195
|
|
|
199
196
|
min_version: Annotated[
|
|
200
|
-
|
|
201
|
-
Optional[OutputServiceNowMinimumTLSVersion],
|
|
202
|
-
PlainValidator(validate_open_enum(False)),
|
|
203
|
-
],
|
|
204
|
-
pydantic.Field(alias="minVersion"),
|
|
197
|
+
Optional[OutputServiceNowMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
205
198
|
] = None
|
|
206
199
|
|
|
207
200
|
max_version: Annotated[
|
|
208
|
-
|
|
209
|
-
Optional[OutputServiceNowMaximumTLSVersion],
|
|
210
|
-
PlainValidator(validate_open_enum(False)),
|
|
211
|
-
],
|
|
212
|
-
pydantic.Field(alias="maxVersion"),
|
|
201
|
+
Optional[OutputServiceNowMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
213
202
|
] = None
|
|
214
203
|
|
|
215
204
|
|
|
216
|
-
class OutputServiceNowPqCompressCompression(str, Enum
|
|
205
|
+
class OutputServiceNowPqCompressCompression(str, Enum):
|
|
217
206
|
r"""Codec to use to compress the persisted data"""
|
|
218
207
|
|
|
219
208
|
NONE = "none"
|
|
220
209
|
GZIP = "gzip"
|
|
221
210
|
|
|
222
211
|
|
|
223
|
-
class OutputServiceNowQueueFullBehavior(str, Enum
|
|
212
|
+
class OutputServiceNowQueueFullBehavior(str, Enum):
|
|
224
213
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
225
214
|
|
|
226
215
|
BLOCK = "block"
|
|
227
216
|
DROP = "drop"
|
|
228
217
|
|
|
229
218
|
|
|
230
|
-
class OutputServiceNowMode(str, Enum
|
|
219
|
+
class OutputServiceNowMode(str, Enum):
|
|
231
220
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
232
221
|
|
|
233
222
|
ERROR = "error"
|
|
@@ -360,11 +349,7 @@ class OutputServiceNow(BaseModel):
|
|
|
360
349
|
)
|
|
361
350
|
|
|
362
351
|
otlp_version: Annotated[
|
|
363
|
-
|
|
364
|
-
Optional[OutputServiceNowOTLPVersion],
|
|
365
|
-
PlainValidator(validate_open_enum(False)),
|
|
366
|
-
],
|
|
367
|
-
pydantic.Field(alias="otlpVersion"),
|
|
352
|
+
Optional[OutputServiceNowOTLPVersion], pydantic.Field(alias="otlpVersion")
|
|
368
353
|
] = OutputServiceNowOTLPVersion.ONE_DOT_3_DOT_1
|
|
369
354
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
370
355
|
|
|
@@ -373,22 +358,16 @@ class OutputServiceNow(BaseModel):
|
|
|
373
358
|
] = 2048
|
|
374
359
|
r"""Maximum size, in KB, of the request body"""
|
|
375
360
|
|
|
376
|
-
protocol:
|
|
377
|
-
Optional[OutputServiceNowProtocol], PlainValidator(validate_open_enum(False))
|
|
378
|
-
] = OutputServiceNowProtocol.GRPC
|
|
361
|
+
protocol: Optional[OutputServiceNowProtocol] = OutputServiceNowProtocol.GRPC
|
|
379
362
|
r"""Select a transport option for OpenTelemetry"""
|
|
380
363
|
|
|
381
|
-
compress:
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
] = OutputServiceNowCompressCompression.GZIP
|
|
364
|
+
compress: Optional[OutputServiceNowCompressCompression] = (
|
|
365
|
+
OutputServiceNowCompressCompression.GZIP
|
|
366
|
+
)
|
|
385
367
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
386
368
|
|
|
387
369
|
http_compress: Annotated[
|
|
388
|
-
|
|
389
|
-
Optional[OutputServiceNowHTTPCompressCompression],
|
|
390
|
-
PlainValidator(validate_open_enum(False)),
|
|
391
|
-
],
|
|
370
|
+
Optional[OutputServiceNowHTTPCompressCompression],
|
|
392
371
|
pydantic.Field(alias="httpCompress"),
|
|
393
372
|
] = OutputServiceNowHTTPCompressCompression.GZIP
|
|
394
373
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
@@ -423,10 +402,7 @@ class OutputServiceNow(BaseModel):
|
|
|
423
402
|
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
424
403
|
|
|
425
404
|
failed_request_logging_mode: Annotated[
|
|
426
|
-
|
|
427
|
-
Optional[OutputServiceNowFailedRequestLoggingMode],
|
|
428
|
-
PlainValidator(validate_open_enum(False)),
|
|
429
|
-
],
|
|
405
|
+
Optional[OutputServiceNowFailedRequestLoggingMode],
|
|
430
406
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
431
407
|
] = OutputServiceNowFailedRequestLoggingMode.NONE
|
|
432
408
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -445,10 +421,7 @@ class OutputServiceNow(BaseModel):
|
|
|
445
421
|
r"""Disable to close the connection immediately after sending the outgoing request"""
|
|
446
422
|
|
|
447
423
|
on_backpressure: Annotated[
|
|
448
|
-
|
|
449
|
-
Optional[OutputServiceNowBackpressureBehavior],
|
|
450
|
-
PlainValidator(validate_open_enum(False)),
|
|
451
|
-
],
|
|
424
|
+
Optional[OutputServiceNowBackpressureBehavior],
|
|
452
425
|
pydantic.Field(alias="onBackpressure"),
|
|
453
426
|
] = OutputServiceNowBackpressureBehavior.BLOCK
|
|
454
427
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -511,28 +484,19 @@ class OutputServiceNow(BaseModel):
|
|
|
511
484
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
512
485
|
|
|
513
486
|
pq_compress: Annotated[
|
|
514
|
-
|
|
515
|
-
Optional[OutputServiceNowPqCompressCompression],
|
|
516
|
-
PlainValidator(validate_open_enum(False)),
|
|
517
|
-
],
|
|
487
|
+
Optional[OutputServiceNowPqCompressCompression],
|
|
518
488
|
pydantic.Field(alias="pqCompress"),
|
|
519
489
|
] = OutputServiceNowPqCompressCompression.NONE
|
|
520
490
|
r"""Codec to use to compress the persisted data"""
|
|
521
491
|
|
|
522
492
|
pq_on_backpressure: Annotated[
|
|
523
|
-
|
|
524
|
-
Optional[OutputServiceNowQueueFullBehavior],
|
|
525
|
-
PlainValidator(validate_open_enum(False)),
|
|
526
|
-
],
|
|
493
|
+
Optional[OutputServiceNowQueueFullBehavior],
|
|
527
494
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
528
495
|
] = OutputServiceNowQueueFullBehavior.BLOCK
|
|
529
496
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
530
497
|
|
|
531
498
|
pq_mode: Annotated[
|
|
532
|
-
|
|
533
|
-
Optional[OutputServiceNowMode], PlainValidator(validate_open_enum(False))
|
|
534
|
-
],
|
|
535
|
-
pydantic.Field(alias="pqMode"),
|
|
499
|
+
Optional[OutputServiceNowMode], pydantic.Field(alias="pqMode")
|
|
536
500
|
] = OutputServiceNowMode.ERROR
|
|
537
501
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
538
502
|
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,7 +12,7 @@ class OutputSignalfxType(str, Enum):
|
|
|
15
12
|
SIGNALFX = "signalfx"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSignalfxAuthenticationMethod(str, Enum
|
|
15
|
+
class OutputSignalfxAuthenticationMethod(str, Enum):
|
|
19
16
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
20
17
|
|
|
21
18
|
MANUAL = "manual"
|
|
@@ -33,7 +30,7 @@ class OutputSignalfxExtraHTTPHeader(BaseModel):
|
|
|
33
30
|
name: Optional[str] = None
|
|
34
31
|
|
|
35
32
|
|
|
36
|
-
class OutputSignalfxFailedRequestLoggingMode(str, Enum
|
|
33
|
+
class OutputSignalfxFailedRequestLoggingMode(str, Enum):
|
|
37
34
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
38
35
|
|
|
39
36
|
PAYLOAD = "payload"
|
|
@@ -95,7 +92,7 @@ class OutputSignalfxTimeoutRetrySettings(BaseModel):
|
|
|
95
92
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
96
93
|
|
|
97
94
|
|
|
98
|
-
class OutputSignalfxBackpressureBehavior(str, Enum
|
|
95
|
+
class OutputSignalfxBackpressureBehavior(str, Enum):
|
|
99
96
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
100
97
|
|
|
101
98
|
BLOCK = "block"
|
|
@@ -103,21 +100,21 @@ class OutputSignalfxBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
|
|
|
103
100
|
QUEUE = "queue"
|
|
104
101
|
|
|
105
102
|
|
|
106
|
-
class OutputSignalfxCompression(str, Enum
|
|
103
|
+
class OutputSignalfxCompression(str, Enum):
|
|
107
104
|
r"""Codec to use to compress the persisted data"""
|
|
108
105
|
|
|
109
106
|
NONE = "none"
|
|
110
107
|
GZIP = "gzip"
|
|
111
108
|
|
|
112
109
|
|
|
113
|
-
class OutputSignalfxQueueFullBehavior(str, Enum
|
|
110
|
+
class OutputSignalfxQueueFullBehavior(str, Enum):
|
|
114
111
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
115
112
|
|
|
116
113
|
BLOCK = "block"
|
|
117
114
|
DROP = "drop"
|
|
118
115
|
|
|
119
116
|
|
|
120
|
-
class OutputSignalfxMode(str, Enum
|
|
117
|
+
class OutputSignalfxMode(str, Enum):
|
|
121
118
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
122
119
|
|
|
123
120
|
ERROR = "error"
|
|
@@ -224,11 +221,7 @@ class OutputSignalfx(BaseModel):
|
|
|
224
221
|
r"""Tags for filtering and grouping in @{product}"""
|
|
225
222
|
|
|
226
223
|
auth_type: Annotated[
|
|
227
|
-
|
|
228
|
-
Optional[OutputSignalfxAuthenticationMethod],
|
|
229
|
-
PlainValidator(validate_open_enum(False)),
|
|
230
|
-
],
|
|
231
|
-
pydantic.Field(alias="authType"),
|
|
224
|
+
Optional[OutputSignalfxAuthenticationMethod], pydantic.Field(alias="authType")
|
|
232
225
|
] = OutputSignalfxAuthenticationMethod.MANUAL
|
|
233
226
|
r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
|
|
234
227
|
|
|
@@ -279,10 +272,7 @@ class OutputSignalfx(BaseModel):
|
|
|
279
272
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
280
273
|
|
|
281
274
|
failed_request_logging_mode: Annotated[
|
|
282
|
-
|
|
283
|
-
Optional[OutputSignalfxFailedRequestLoggingMode],
|
|
284
|
-
PlainValidator(validate_open_enum(False)),
|
|
285
|
-
],
|
|
275
|
+
Optional[OutputSignalfxFailedRequestLoggingMode],
|
|
286
276
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
287
277
|
] = OutputSignalfxFailedRequestLoggingMode.NONE
|
|
288
278
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -309,10 +299,7 @@ class OutputSignalfx(BaseModel):
|
|
|
309
299
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
310
300
|
|
|
311
301
|
on_backpressure: Annotated[
|
|
312
|
-
|
|
313
|
-
Optional[OutputSignalfxBackpressureBehavior],
|
|
314
|
-
PlainValidator(validate_open_enum(False)),
|
|
315
|
-
],
|
|
302
|
+
Optional[OutputSignalfxBackpressureBehavior],
|
|
316
303
|
pydantic.Field(alias="onBackpressure"),
|
|
317
304
|
] = OutputSignalfxBackpressureBehavior.BLOCK
|
|
318
305
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -339,29 +326,19 @@ class OutputSignalfx(BaseModel):
|
|
|
339
326
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
340
327
|
|
|
341
328
|
pq_compress: Annotated[
|
|
342
|
-
|
|
343
|
-
Optional[OutputSignalfxCompression],
|
|
344
|
-
PlainValidator(validate_open_enum(False)),
|
|
345
|
-
],
|
|
346
|
-
pydantic.Field(alias="pqCompress"),
|
|
329
|
+
Optional[OutputSignalfxCompression], pydantic.Field(alias="pqCompress")
|
|
347
330
|
] = OutputSignalfxCompression.NONE
|
|
348
331
|
r"""Codec to use to compress the persisted data"""
|
|
349
332
|
|
|
350
333
|
pq_on_backpressure: Annotated[
|
|
351
|
-
|
|
352
|
-
Optional[OutputSignalfxQueueFullBehavior],
|
|
353
|
-
PlainValidator(validate_open_enum(False)),
|
|
354
|
-
],
|
|
334
|
+
Optional[OutputSignalfxQueueFullBehavior],
|
|
355
335
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
356
336
|
] = OutputSignalfxQueueFullBehavior.BLOCK
|
|
357
337
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
358
338
|
|
|
359
|
-
pq_mode: Annotated[
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
],
|
|
363
|
-
pydantic.Field(alias="pqMode"),
|
|
364
|
-
] = OutputSignalfxMode.ERROR
|
|
339
|
+
pq_mode: Annotated[Optional[OutputSignalfxMode], pydantic.Field(alias="pqMode")] = (
|
|
340
|
+
OutputSignalfxMode.ERROR
|
|
341
|
+
)
|
|
365
342
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
366
343
|
|
|
367
344
|
pq_controls: Annotated[
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,7 +12,7 @@ class OutputSnsType(str, Enum):
|
|
|
15
12
|
SNS = "sns"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputSnsAuthenticationMethod(str, Enum
|
|
15
|
+
class OutputSnsAuthenticationMethod(str, Enum):
|
|
19
16
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
20
17
|
|
|
21
18
|
AUTO = "auto"
|
|
@@ -23,14 +20,14 @@ class OutputSnsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
23
20
|
SECRET = "secret"
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputSnsSignatureVersion(str, Enum
|
|
23
|
+
class OutputSnsSignatureVersion(str, Enum):
|
|
27
24
|
r"""Signature version to use for signing SNS requests"""
|
|
28
25
|
|
|
29
26
|
V2 = "v2"
|
|
30
27
|
V4 = "v4"
|
|
31
28
|
|
|
32
29
|
|
|
33
|
-
class OutputSnsBackpressureBehavior(str, Enum
|
|
30
|
+
class OutputSnsBackpressureBehavior(str, Enum):
|
|
34
31
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
35
32
|
|
|
36
33
|
BLOCK = "block"
|
|
@@ -38,21 +35,21 @@ class OutputSnsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
38
35
|
QUEUE = "queue"
|
|
39
36
|
|
|
40
37
|
|
|
41
|
-
class OutputSnsCompression(str, Enum
|
|
38
|
+
class OutputSnsCompression(str, Enum):
|
|
42
39
|
r"""Codec to use to compress the persisted data"""
|
|
43
40
|
|
|
44
41
|
NONE = "none"
|
|
45
42
|
GZIP = "gzip"
|
|
46
43
|
|
|
47
44
|
|
|
48
|
-
class OutputSnsQueueFullBehavior(str, Enum
|
|
45
|
+
class OutputSnsQueueFullBehavior(str, Enum):
|
|
49
46
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
50
47
|
|
|
51
48
|
BLOCK = "block"
|
|
52
49
|
DROP = "drop"
|
|
53
50
|
|
|
54
51
|
|
|
55
|
-
class OutputSnsMode(str, Enum
|
|
52
|
+
class OutputSnsMode(str, Enum):
|
|
56
53
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
57
54
|
|
|
58
55
|
ERROR = "error"
|
|
@@ -158,10 +155,7 @@ class OutputSns(BaseModel):
|
|
|
158
155
|
r"""Maximum number of retries before the output returns an error. Note that not all errors are retryable. The retries use an exponential backoff policy."""
|
|
159
156
|
|
|
160
157
|
aws_authentication_method: Annotated[
|
|
161
|
-
|
|
162
|
-
Optional[OutputSnsAuthenticationMethod],
|
|
163
|
-
PlainValidator(validate_open_enum(False)),
|
|
164
|
-
],
|
|
158
|
+
Optional[OutputSnsAuthenticationMethod],
|
|
165
159
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
166
160
|
] = OutputSnsAuthenticationMethod.AUTO
|
|
167
161
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -177,11 +171,7 @@ class OutputSns(BaseModel):
|
|
|
177
171
|
r"""SNS service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to SNS-compatible endpoint."""
|
|
178
172
|
|
|
179
173
|
signature_version: Annotated[
|
|
180
|
-
|
|
181
|
-
Optional[OutputSnsSignatureVersion],
|
|
182
|
-
PlainValidator(validate_open_enum(False)),
|
|
183
|
-
],
|
|
184
|
-
pydantic.Field(alias="signatureVersion"),
|
|
174
|
+
Optional[OutputSnsSignatureVersion], pydantic.Field(alias="signatureVersion")
|
|
185
175
|
] = OutputSnsSignatureVersion.V4
|
|
186
176
|
r"""Signature version to use for signing SNS requests"""
|
|
187
177
|
|
|
@@ -216,11 +206,7 @@ class OutputSns(BaseModel):
|
|
|
216
206
|
r"""Duration of the assumed role's session, in seconds. Minimum is 900 (15 minutes), default is 3600 (1 hour), and maximum is 43200 (12 hours)."""
|
|
217
207
|
|
|
218
208
|
on_backpressure: Annotated[
|
|
219
|
-
|
|
220
|
-
Optional[OutputSnsBackpressureBehavior],
|
|
221
|
-
PlainValidator(validate_open_enum(False)),
|
|
222
|
-
],
|
|
223
|
-
pydantic.Field(alias="onBackpressure"),
|
|
209
|
+
Optional[OutputSnsBackpressureBehavior], pydantic.Field(alias="onBackpressure")
|
|
224
210
|
] = OutputSnsBackpressureBehavior.BLOCK
|
|
225
211
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
226
212
|
|
|
@@ -245,26 +231,18 @@ class OutputSns(BaseModel):
|
|
|
245
231
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
246
232
|
|
|
247
233
|
pq_compress: Annotated[
|
|
248
|
-
|
|
249
|
-
Optional[OutputSnsCompression], PlainValidator(validate_open_enum(False))
|
|
250
|
-
],
|
|
251
|
-
pydantic.Field(alias="pqCompress"),
|
|
234
|
+
Optional[OutputSnsCompression], pydantic.Field(alias="pqCompress")
|
|
252
235
|
] = OutputSnsCompression.NONE
|
|
253
236
|
r"""Codec to use to compress the persisted data"""
|
|
254
237
|
|
|
255
238
|
pq_on_backpressure: Annotated[
|
|
256
|
-
|
|
257
|
-
Optional[OutputSnsQueueFullBehavior],
|
|
258
|
-
PlainValidator(validate_open_enum(False)),
|
|
259
|
-
],
|
|
260
|
-
pydantic.Field(alias="pqOnBackpressure"),
|
|
239
|
+
Optional[OutputSnsQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
|
|
261
240
|
] = OutputSnsQueueFullBehavior.BLOCK
|
|
262
241
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
263
242
|
|
|
264
|
-
pq_mode: Annotated[
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
] = OutputSnsMode.ERROR
|
|
243
|
+
pq_mode: Annotated[Optional[OutputSnsMode], pydantic.Field(alias="pqMode")] = (
|
|
244
|
+
OutputSnsMode.ERROR
|
|
245
|
+
)
|
|
268
246
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
269
247
|
|
|
270
248
|
pq_controls: Annotated[
|