cribl-control-plane 0.0.50rc2__py3-none-any.whl → 0.0.51__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +5 -3
- cribl_control_plane/errors/healthstatus_error.py +2 -8
- cribl_control_plane/groups_sdk.py +4 -4
- cribl_control_plane/health.py +2 -6
- cribl_control_plane/models/__init__.py +31 -56
- cribl_control_plane/models/appmode.py +13 -0
- cribl_control_plane/models/cacheconnection.py +2 -10
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
- cribl_control_plane/models/cloudprovider.py +1 -2
- cribl_control_plane/models/configgroup.py +4 -24
- cribl_control_plane/models/configgroupcloud.py +2 -6
- cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
- cribl_control_plane/models/createinputhectokenbyidop.py +5 -6
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/cribllakedataset.py +2 -8
- cribl_control_plane/models/datasetmetadata.py +2 -8
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
- cribl_control_plane/models/getsummaryop.py +2 -7
- cribl_control_plane/models/getversionshowop.py +5 -6
- cribl_control_plane/models/gitinfo.py +3 -14
- cribl_control_plane/models/hbcriblinfo.py +3 -24
- cribl_control_plane/models/healthstatus.py +4 -7
- cribl_control_plane/models/heartbeatmetadata.py +0 -3
- cribl_control_plane/models/input.py +63 -65
- cribl_control_plane/models/inputappscope.py +14 -34
- cribl_control_plane/models/inputazureblob.py +6 -17
- cribl_control_plane/models/inputcollection.py +4 -11
- cribl_control_plane/models/inputconfluentcloud.py +32 -41
- cribl_control_plane/models/inputcribl.py +4 -11
- cribl_control_plane/models/inputcriblhttp.py +8 -23
- cribl_control_plane/models/inputcribllakehttp.py +10 -22
- cribl_control_plane/models/inputcriblmetrics.py +4 -12
- cribl_control_plane/models/inputcribltcp.py +8 -23
- cribl_control_plane/models/inputcrowdstrike.py +10 -26
- cribl_control_plane/models/inputdatadogagent.py +8 -24
- cribl_control_plane/models/inputdatagen.py +4 -11
- cribl_control_plane/models/inputedgeprometheus.py +24 -58
- cribl_control_plane/models/inputelastic.py +14 -40
- cribl_control_plane/models/inputeventhub.py +6 -15
- cribl_control_plane/models/inputexec.py +6 -14
- cribl_control_plane/models/inputfile.py +6 -15
- cribl_control_plane/models/inputfirehose.py +8 -23
- cribl_control_plane/models/inputgooglepubsub.py +6 -19
- cribl_control_plane/models/inputgrafana.py +24 -67
- cribl_control_plane/models/inputhttp.py +8 -23
- cribl_control_plane/models/inputhttpraw.py +8 -23
- cribl_control_plane/models/inputjournalfiles.py +4 -12
- cribl_control_plane/models/inputkafka.py +28 -41
- cribl_control_plane/models/inputkinesis.py +14 -38
- cribl_control_plane/models/inputkubeevents.py +4 -11
- cribl_control_plane/models/inputkubelogs.py +8 -16
- cribl_control_plane/models/inputkubemetrics.py +8 -16
- cribl_control_plane/models/inputloki.py +10 -29
- cribl_control_plane/models/inputmetrics.py +8 -23
- cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
- cribl_control_plane/models/inputmsk.py +30 -48
- cribl_control_plane/models/inputnetflow.py +4 -11
- cribl_control_plane/models/inputoffice365mgmt.py +14 -33
- cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
- cribl_control_plane/models/inputoffice365service.py +16 -35
- cribl_control_plane/models/inputopentelemetry.py +16 -38
- cribl_control_plane/models/inputprometheus.py +18 -50
- cribl_control_plane/models/inputprometheusrw.py +10 -30
- cribl_control_plane/models/inputrawudp.py +4 -11
- cribl_control_plane/models/inputs3.py +8 -21
- cribl_control_plane/models/inputs3inventory.py +10 -26
- cribl_control_plane/models/inputsecuritylake.py +10 -27
- cribl_control_plane/models/inputsnmp.py +6 -16
- cribl_control_plane/models/inputsplunk.py +12 -33
- cribl_control_plane/models/inputsplunkhec.py +10 -29
- cribl_control_plane/models/inputsplunksearch.py +14 -33
- cribl_control_plane/models/inputsqs.py +10 -27
- cribl_control_plane/models/inputsyslog.py +16 -43
- cribl_control_plane/models/inputsystemmetrics.py +24 -48
- cribl_control_plane/models/inputsystemstate.py +8 -16
- cribl_control_plane/models/inputtcp.py +10 -29
- cribl_control_plane/models/inputtcpjson.py +10 -29
- cribl_control_plane/models/inputwef.py +14 -37
- cribl_control_plane/models/inputwindowsmetrics.py +24 -44
- cribl_control_plane/models/inputwineventlogs.py +10 -20
- cribl_control_plane/models/inputwiz.py +8 -21
- cribl_control_plane/models/inputwizwebhook.py +8 -23
- cribl_control_plane/models/inputzscalerhec.py +10 -29
- cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
- cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
- cribl_control_plane/models/masterworkerentry.py +2 -7
- cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
- cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeprovidedinfo.py +0 -3
- cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
- cribl_control_plane/models/nodeupgradestate.py +1 -2
- cribl_control_plane/models/nodeupgradestatus.py +5 -13
- cribl_control_plane/models/output.py +79 -84
- cribl_control_plane/models/outputazureblob.py +18 -48
- cribl_control_plane/models/outputazuredataexplorer.py +28 -73
- cribl_control_plane/models/outputazureeventhub.py +18 -40
- cribl_control_plane/models/outputazurelogs.py +12 -35
- cribl_control_plane/models/outputclickhouse.py +20 -55
- cribl_control_plane/models/outputcloudwatch.py +10 -29
- cribl_control_plane/models/outputconfluentcloud.py +44 -71
- cribl_control_plane/models/outputcriblhttp.py +16 -44
- cribl_control_plane/models/outputcribllake.py +16 -46
- cribl_control_plane/models/outputcribltcp.py +18 -45
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
- cribl_control_plane/models/outputdatadog.py +20 -48
- cribl_control_plane/models/outputdataset.py +18 -46
- cribl_control_plane/models/outputdiskspool.py +2 -7
- cribl_control_plane/models/outputdls3.py +24 -68
- cribl_control_plane/models/outputdynatracehttp.py +20 -53
- cribl_control_plane/models/outputdynatraceotlp.py +22 -55
- cribl_control_plane/models/outputelastic.py +18 -43
- cribl_control_plane/models/outputelasticcloud.py +12 -36
- cribl_control_plane/models/outputexabeam.py +10 -29
- cribl_control_plane/models/outputfilesystem.py +14 -39
- cribl_control_plane/models/outputgooglechronicle.py +16 -50
- cribl_control_plane/models/outputgooglecloudlogging.py +18 -50
- cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
- cribl_control_plane/models/outputgooglepubsub.py +10 -31
- cribl_control_plane/models/outputgrafanacloud.py +32 -97
- cribl_control_plane/models/outputgraphite.py +14 -31
- cribl_control_plane/models/outputhoneycomb.py +12 -35
- cribl_control_plane/models/outputhumiohec.py +16 -43
- cribl_control_plane/models/outputinfluxdb.py +16 -42
- cribl_control_plane/models/outputkafka.py +40 -69
- cribl_control_plane/models/outputkinesis.py +16 -40
- cribl_control_plane/models/outputloki.py +16 -41
- cribl_control_plane/models/outputminio.py +24 -65
- cribl_control_plane/models/outputmsk.py +42 -77
- cribl_control_plane/models/outputnewrelic.py +18 -43
- cribl_control_plane/models/outputnewrelicevents.py +14 -41
- cribl_control_plane/models/outputopentelemetry.py +26 -67
- cribl_control_plane/models/outputprometheus.py +12 -35
- cribl_control_plane/models/outputring.py +8 -19
- cribl_control_plane/models/outputs3.py +26 -68
- cribl_control_plane/models/outputsecuritylake.py +18 -52
- cribl_control_plane/models/outputsentinel.py +18 -45
- cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
- cribl_control_plane/models/outputservicenow.py +24 -60
- cribl_control_plane/models/outputsignalfx.py +14 -37
- cribl_control_plane/models/outputsns.py +14 -36
- cribl_control_plane/models/outputsplunk.py +24 -60
- cribl_control_plane/models/outputsplunkhec.py +12 -35
- cribl_control_plane/models/outputsplunklb.py +30 -77
- cribl_control_plane/models/outputsqs.py +16 -41
- cribl_control_plane/models/outputstatsd.py +14 -30
- cribl_control_plane/models/outputstatsdext.py +12 -29
- cribl_control_plane/models/outputsumologic.py +12 -35
- cribl_control_plane/models/outputsyslog.py +24 -58
- cribl_control_plane/models/outputtcpjson.py +20 -52
- cribl_control_plane/models/outputwavefront.py +12 -35
- cribl_control_plane/models/outputwebhook.py +22 -58
- cribl_control_plane/models/outputxsiam.py +14 -35
- cribl_control_plane/models/productscore.py +1 -2
- cribl_control_plane/models/rbacresource.py +1 -2
- cribl_control_plane/models/resourcepolicy.py +2 -4
- cribl_control_plane/models/routecloneconf.py +13 -0
- cribl_control_plane/models/routeconf.py +4 -3
- cribl_control_plane/models/runnablejobcollection.py +13 -30
- cribl_control_plane/models/runnablejobexecutor.py +4 -13
- cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +5 -6
- cribl_control_plane/models/workertypes.py +1 -2
- {cribl_control_plane-0.0.50rc2.dist-info → cribl_control_plane-0.0.51.dist-info}/METADATA +14 -5
- cribl_control_plane-0.0.51.dist-info/RECORD +325 -0
- cribl_control_plane/models/error.py +0 -16
- cribl_control_plane/models/gethealthinfoop.py +0 -17
- cribl_control_plane/models/gitshowresult.py +0 -19
- cribl_control_plane/models/outputdatabricks.py +0 -282
- cribl_control_plane-0.0.50rc2.dist-info/RECORD +0 -327
- {cribl_control_plane-0.0.50rc2.dist-info → cribl_control_plane-0.0.51.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,7 +12,7 @@ class OutputMskType(str, Enum):
|
|
|
15
12
|
MSK = "msk"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputMskAcknowledgments(int, Enum
|
|
15
|
+
class OutputMskAcknowledgments(int, Enum):
|
|
19
16
|
r"""Control the number of required acknowledgments."""
|
|
20
17
|
|
|
21
18
|
ONE = 1
|
|
@@ -23,7 +20,7 @@ class OutputMskAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
23
20
|
MINUS_1 = -1
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class OutputMskRecordDataFormat(str, Enum
|
|
23
|
+
class OutputMskRecordDataFormat(str, Enum):
|
|
27
24
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
28
25
|
|
|
29
26
|
JSON = "json"
|
|
@@ -31,7 +28,7 @@ class OutputMskRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
31
28
|
PROTOBUF = "protobuf"
|
|
32
29
|
|
|
33
30
|
|
|
34
|
-
class OutputMskCompression(str, Enum
|
|
31
|
+
class OutputMskCompression(str, Enum):
|
|
35
32
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
36
33
|
|
|
37
34
|
NONE = "none"
|
|
@@ -40,6 +37,13 @@ class OutputMskCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
40
37
|
LZ4 = "lz4"
|
|
41
38
|
|
|
42
39
|
|
|
40
|
+
class OutputMskSchemaType(str, Enum):
|
|
41
|
+
r"""The schema format used to encode and decode event data"""
|
|
42
|
+
|
|
43
|
+
AVRO = "avro"
|
|
44
|
+
JSON = "json"
|
|
45
|
+
|
|
46
|
+
|
|
43
47
|
class OutputMskAuthTypedDict(TypedDict):
|
|
44
48
|
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
45
49
|
|
|
@@ -59,18 +63,14 @@ class OutputMskAuth(BaseModel):
|
|
|
59
63
|
r"""Select or create a secret that references your credentials"""
|
|
60
64
|
|
|
61
65
|
|
|
62
|
-
class OutputMskKafkaSchemaRegistryMinimumTLSVersion(
|
|
63
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
64
|
-
):
|
|
66
|
+
class OutputMskKafkaSchemaRegistryMinimumTLSVersion(str, Enum):
|
|
65
67
|
TL_SV1 = "TLSv1"
|
|
66
68
|
TL_SV1_1 = "TLSv1.1"
|
|
67
69
|
TL_SV1_2 = "TLSv1.2"
|
|
68
70
|
TL_SV1_3 = "TLSv1.3"
|
|
69
71
|
|
|
70
72
|
|
|
71
|
-
class OutputMskKafkaSchemaRegistryMaximumTLSVersion(
|
|
72
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
-
):
|
|
73
|
+
class OutputMskKafkaSchemaRegistryMaximumTLSVersion(str, Enum):
|
|
74
74
|
TL_SV1 = "TLSv1"
|
|
75
75
|
TL_SV1_1 = "TLSv1.1"
|
|
76
76
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -130,18 +130,12 @@ class OutputMskKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
|
|
|
130
130
|
r"""Passphrase to use to decrypt private key"""
|
|
131
131
|
|
|
132
132
|
min_version: Annotated[
|
|
133
|
-
|
|
134
|
-
Optional[OutputMskKafkaSchemaRegistryMinimumTLSVersion],
|
|
135
|
-
PlainValidator(validate_open_enum(False)),
|
|
136
|
-
],
|
|
133
|
+
Optional[OutputMskKafkaSchemaRegistryMinimumTLSVersion],
|
|
137
134
|
pydantic.Field(alias="minVersion"),
|
|
138
135
|
] = None
|
|
139
136
|
|
|
140
137
|
max_version: Annotated[
|
|
141
|
-
|
|
142
|
-
Optional[OutputMskKafkaSchemaRegistryMaximumTLSVersion],
|
|
143
|
-
PlainValidator(validate_open_enum(False)),
|
|
144
|
-
],
|
|
138
|
+
Optional[OutputMskKafkaSchemaRegistryMaximumTLSVersion],
|
|
145
139
|
pydantic.Field(alias="maxVersion"),
|
|
146
140
|
] = None
|
|
147
141
|
|
|
@@ -150,6 +144,8 @@ class OutputMskKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
|
|
|
150
144
|
disabled: NotRequired[bool]
|
|
151
145
|
schema_registry_url: NotRequired[str]
|
|
152
146
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
147
|
+
schema_type: NotRequired[OutputMskSchemaType]
|
|
148
|
+
r"""The schema format used to encode and decode event data"""
|
|
153
149
|
connection_timeout: NotRequired[float]
|
|
154
150
|
r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
|
|
155
151
|
request_timeout: NotRequired[float]
|
|
@@ -173,6 +169,11 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
173
169
|
] = "http://localhost:8081"
|
|
174
170
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
175
171
|
|
|
172
|
+
schema_type: Annotated[
|
|
173
|
+
Optional[OutputMskSchemaType], pydantic.Field(alias="schemaType")
|
|
174
|
+
] = OutputMskSchemaType.AVRO
|
|
175
|
+
r"""The schema format used to encode and decode event data"""
|
|
176
|
+
|
|
176
177
|
connection_timeout: Annotated[
|
|
177
178
|
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
178
179
|
] = 30000
|
|
@@ -202,7 +203,7 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
202
203
|
r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
|
|
203
204
|
|
|
204
205
|
|
|
205
|
-
class OutputMskAuthenticationMethod(str, Enum
|
|
206
|
+
class OutputMskAuthenticationMethod(str, Enum):
|
|
206
207
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
207
208
|
|
|
208
209
|
AUTO = "auto"
|
|
@@ -210,21 +211,21 @@ class OutputMskAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
210
211
|
SECRET = "secret"
|
|
211
212
|
|
|
212
213
|
|
|
213
|
-
class OutputMskSignatureVersion(str, Enum
|
|
214
|
+
class OutputMskSignatureVersion(str, Enum):
|
|
214
215
|
r"""Signature version to use for signing MSK cluster requests"""
|
|
215
216
|
|
|
216
217
|
V2 = "v2"
|
|
217
218
|
V4 = "v4"
|
|
218
219
|
|
|
219
220
|
|
|
220
|
-
class OutputMskMinimumTLSVersion(str, Enum
|
|
221
|
+
class OutputMskMinimumTLSVersion(str, Enum):
|
|
221
222
|
TL_SV1 = "TLSv1"
|
|
222
223
|
TL_SV1_1 = "TLSv1.1"
|
|
223
224
|
TL_SV1_2 = "TLSv1.2"
|
|
224
225
|
TL_SV1_3 = "TLSv1.3"
|
|
225
226
|
|
|
226
227
|
|
|
227
|
-
class OutputMskMaximumTLSVersion(str, Enum
|
|
228
|
+
class OutputMskMaximumTLSVersion(str, Enum):
|
|
228
229
|
TL_SV1 = "TLSv1"
|
|
229
230
|
TL_SV1_1 = "TLSv1.1"
|
|
230
231
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -284,23 +285,15 @@ class OutputMskTLSSettingsClientSide(BaseModel):
|
|
|
284
285
|
r"""Passphrase to use to decrypt private key"""
|
|
285
286
|
|
|
286
287
|
min_version: Annotated[
|
|
287
|
-
|
|
288
|
-
Optional[OutputMskMinimumTLSVersion],
|
|
289
|
-
PlainValidator(validate_open_enum(False)),
|
|
290
|
-
],
|
|
291
|
-
pydantic.Field(alias="minVersion"),
|
|
288
|
+
Optional[OutputMskMinimumTLSVersion], pydantic.Field(alias="minVersion")
|
|
292
289
|
] = None
|
|
293
290
|
|
|
294
291
|
max_version: Annotated[
|
|
295
|
-
|
|
296
|
-
Optional[OutputMskMaximumTLSVersion],
|
|
297
|
-
PlainValidator(validate_open_enum(False)),
|
|
298
|
-
],
|
|
299
|
-
pydantic.Field(alias="maxVersion"),
|
|
292
|
+
Optional[OutputMskMaximumTLSVersion], pydantic.Field(alias="maxVersion")
|
|
300
293
|
] = None
|
|
301
294
|
|
|
302
295
|
|
|
303
|
-
class OutputMskBackpressureBehavior(str, Enum
|
|
296
|
+
class OutputMskBackpressureBehavior(str, Enum):
|
|
304
297
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
305
298
|
|
|
306
299
|
BLOCK = "block"
|
|
@@ -308,21 +301,21 @@ class OutputMskBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
308
301
|
QUEUE = "queue"
|
|
309
302
|
|
|
310
303
|
|
|
311
|
-
class OutputMskPqCompressCompression(str, Enum
|
|
304
|
+
class OutputMskPqCompressCompression(str, Enum):
|
|
312
305
|
r"""Codec to use to compress the persisted data"""
|
|
313
306
|
|
|
314
307
|
NONE = "none"
|
|
315
308
|
GZIP = "gzip"
|
|
316
309
|
|
|
317
310
|
|
|
318
|
-
class OutputMskQueueFullBehavior(str, Enum
|
|
311
|
+
class OutputMskQueueFullBehavior(str, Enum):
|
|
319
312
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
320
313
|
|
|
321
314
|
BLOCK = "block"
|
|
322
315
|
DROP = "drop"
|
|
323
316
|
|
|
324
317
|
|
|
325
|
-
class OutputMskMode(str, Enum
|
|
318
|
+
class OutputMskMode(str, Enum):
|
|
326
319
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
327
320
|
|
|
328
321
|
ERROR = "error"
|
|
@@ -459,23 +452,15 @@ class OutputMsk(BaseModel):
|
|
|
459
452
|
streamtags: Optional[List[str]] = None
|
|
460
453
|
r"""Tags for filtering and grouping in @{product}"""
|
|
461
454
|
|
|
462
|
-
ack:
|
|
463
|
-
Optional[OutputMskAcknowledgments], PlainValidator(validate_open_enum(True))
|
|
464
|
-
] = OutputMskAcknowledgments.ONE
|
|
455
|
+
ack: Optional[OutputMskAcknowledgments] = OutputMskAcknowledgments.ONE
|
|
465
456
|
r"""Control the number of required acknowledgments."""
|
|
466
457
|
|
|
467
458
|
format_: Annotated[
|
|
468
|
-
|
|
469
|
-
Optional[OutputMskRecordDataFormat],
|
|
470
|
-
PlainValidator(validate_open_enum(False)),
|
|
471
|
-
],
|
|
472
|
-
pydantic.Field(alias="format"),
|
|
459
|
+
Optional[OutputMskRecordDataFormat], pydantic.Field(alias="format")
|
|
473
460
|
] = OutputMskRecordDataFormat.JSON
|
|
474
461
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
475
462
|
|
|
476
|
-
compression:
|
|
477
|
-
Optional[OutputMskCompression], PlainValidator(validate_open_enum(False))
|
|
478
|
-
] = OutputMskCompression.GZIP
|
|
463
|
+
compression: Optional[OutputMskCompression] = OutputMskCompression.GZIP
|
|
479
464
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
480
465
|
|
|
481
466
|
max_record_size_kb: Annotated[
|
|
@@ -533,10 +518,7 @@ class OutputMsk(BaseModel):
|
|
|
533
518
|
r"""Specifies a time window during which @{product} can reauthenticate if needed. Creates the window measuring backward from the moment when credentials are set to expire."""
|
|
534
519
|
|
|
535
520
|
aws_authentication_method: Annotated[
|
|
536
|
-
|
|
537
|
-
Optional[OutputMskAuthenticationMethod],
|
|
538
|
-
PlainValidator(validate_open_enum(False)),
|
|
539
|
-
],
|
|
521
|
+
Optional[OutputMskAuthenticationMethod],
|
|
540
522
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
541
523
|
] = OutputMskAuthenticationMethod.AUTO
|
|
542
524
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -549,11 +531,7 @@ class OutputMsk(BaseModel):
|
|
|
549
531
|
r"""MSK cluster service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to MSK cluster-compatible endpoint."""
|
|
550
532
|
|
|
551
533
|
signature_version: Annotated[
|
|
552
|
-
|
|
553
|
-
Optional[OutputMskSignatureVersion],
|
|
554
|
-
PlainValidator(validate_open_enum(False)),
|
|
555
|
-
],
|
|
556
|
-
pydantic.Field(alias="signatureVersion"),
|
|
534
|
+
Optional[OutputMskSignatureVersion], pydantic.Field(alias="signatureVersion")
|
|
557
535
|
] = OutputMskSignatureVersion.V4
|
|
558
536
|
r"""Signature version to use for signing MSK cluster requests"""
|
|
559
537
|
|
|
@@ -590,11 +568,7 @@ class OutputMsk(BaseModel):
|
|
|
590
568
|
tls: Optional[OutputMskTLSSettingsClientSide] = None
|
|
591
569
|
|
|
592
570
|
on_backpressure: Annotated[
|
|
593
|
-
|
|
594
|
-
Optional[OutputMskBackpressureBehavior],
|
|
595
|
-
PlainValidator(validate_open_enum(False)),
|
|
596
|
-
],
|
|
597
|
-
pydantic.Field(alias="onBackpressure"),
|
|
571
|
+
Optional[OutputMskBackpressureBehavior], pydantic.Field(alias="onBackpressure")
|
|
598
572
|
] = OutputMskBackpressureBehavior.BLOCK
|
|
599
573
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
600
574
|
|
|
@@ -624,27 +598,18 @@ class OutputMsk(BaseModel):
|
|
|
624
598
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
625
599
|
|
|
626
600
|
pq_compress: Annotated[
|
|
627
|
-
|
|
628
|
-
Optional[OutputMskPqCompressCompression],
|
|
629
|
-
PlainValidator(validate_open_enum(False)),
|
|
630
|
-
],
|
|
631
|
-
pydantic.Field(alias="pqCompress"),
|
|
601
|
+
Optional[OutputMskPqCompressCompression], pydantic.Field(alias="pqCompress")
|
|
632
602
|
] = OutputMskPqCompressCompression.NONE
|
|
633
603
|
r"""Codec to use to compress the persisted data"""
|
|
634
604
|
|
|
635
605
|
pq_on_backpressure: Annotated[
|
|
636
|
-
|
|
637
|
-
Optional[OutputMskQueueFullBehavior],
|
|
638
|
-
PlainValidator(validate_open_enum(False)),
|
|
639
|
-
],
|
|
640
|
-
pydantic.Field(alias="pqOnBackpressure"),
|
|
606
|
+
Optional[OutputMskQueueFullBehavior], pydantic.Field(alias="pqOnBackpressure")
|
|
641
607
|
] = OutputMskQueueFullBehavior.BLOCK
|
|
642
608
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
643
609
|
|
|
644
|
-
pq_mode: Annotated[
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
] = OutputMskMode.ERROR
|
|
610
|
+
pq_mode: Annotated[Optional[OutputMskMode], pydantic.Field(alias="pqMode")] = (
|
|
611
|
+
OutputMskMode.ERROR
|
|
612
|
+
)
|
|
648
613
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
649
614
|
|
|
650
615
|
pq_controls: Annotated[
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,7 +12,7 @@ class OutputNewrelicType(str, Enum):
|
|
|
15
12
|
NEWRELIC = "newrelic"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputNewrelicRegion(str, Enum
|
|
15
|
+
class OutputNewrelicRegion(str, Enum):
|
|
19
16
|
r"""Which New Relic region endpoint to use."""
|
|
20
17
|
|
|
21
18
|
US = "US"
|
|
@@ -23,7 +20,7 @@ class OutputNewrelicRegion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
23
20
|
CUSTOM = "Custom"
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class FieldName(str, Enum
|
|
23
|
+
class FieldName(str, Enum):
|
|
27
24
|
SERVICE = "service"
|
|
28
25
|
HOSTNAME = "hostname"
|
|
29
26
|
TIMESTAMP = "timestamp"
|
|
@@ -37,7 +34,7 @@ class OutputNewrelicMetadatumTypedDict(TypedDict):
|
|
|
37
34
|
|
|
38
35
|
|
|
39
36
|
class OutputNewrelicMetadatum(BaseModel):
|
|
40
|
-
name:
|
|
37
|
+
name: FieldName
|
|
41
38
|
|
|
42
39
|
value: str
|
|
43
40
|
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
@@ -54,7 +51,7 @@ class OutputNewrelicExtraHTTPHeader(BaseModel):
|
|
|
54
51
|
name: Optional[str] = None
|
|
55
52
|
|
|
56
53
|
|
|
57
|
-
class OutputNewrelicFailedRequestLoggingMode(str, Enum
|
|
54
|
+
class OutputNewrelicFailedRequestLoggingMode(str, Enum):
|
|
58
55
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
59
56
|
|
|
60
57
|
PAYLOAD = "payload"
|
|
@@ -116,7 +113,7 @@ class OutputNewrelicTimeoutRetrySettings(BaseModel):
|
|
|
116
113
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
117
114
|
|
|
118
115
|
|
|
119
|
-
class OutputNewrelicBackpressureBehavior(str, Enum
|
|
116
|
+
class OutputNewrelicBackpressureBehavior(str, Enum):
|
|
120
117
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
121
118
|
|
|
122
119
|
BLOCK = "block"
|
|
@@ -124,28 +121,28 @@ class OutputNewrelicBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
|
|
|
124
121
|
QUEUE = "queue"
|
|
125
122
|
|
|
126
123
|
|
|
127
|
-
class OutputNewrelicAuthenticationMethod(str, Enum
|
|
124
|
+
class OutputNewrelicAuthenticationMethod(str, Enum):
|
|
128
125
|
r"""Enter API key directly, or select a stored secret"""
|
|
129
126
|
|
|
130
127
|
MANUAL = "manual"
|
|
131
128
|
SECRET = "secret"
|
|
132
129
|
|
|
133
130
|
|
|
134
|
-
class OutputNewrelicCompression(str, Enum
|
|
131
|
+
class OutputNewrelicCompression(str, Enum):
|
|
135
132
|
r"""Codec to use to compress the persisted data"""
|
|
136
133
|
|
|
137
134
|
NONE = "none"
|
|
138
135
|
GZIP = "gzip"
|
|
139
136
|
|
|
140
137
|
|
|
141
|
-
class OutputNewrelicQueueFullBehavior(str, Enum
|
|
138
|
+
class OutputNewrelicQueueFullBehavior(str, Enum):
|
|
142
139
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
143
140
|
|
|
144
141
|
BLOCK = "block"
|
|
145
142
|
DROP = "drop"
|
|
146
143
|
|
|
147
144
|
|
|
148
|
-
class OutputNewrelicMode(str, Enum
|
|
145
|
+
class OutputNewrelicMode(str, Enum):
|
|
149
146
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
150
147
|
|
|
151
148
|
ERROR = "error"
|
|
@@ -260,9 +257,7 @@ class OutputNewrelic(BaseModel):
|
|
|
260
257
|
streamtags: Optional[List[str]] = None
|
|
261
258
|
r"""Tags for filtering and grouping in @{product}"""
|
|
262
259
|
|
|
263
|
-
region:
|
|
264
|
-
Optional[OutputNewrelicRegion], PlainValidator(validate_open_enum(False))
|
|
265
|
-
] = OutputNewrelicRegion.US
|
|
260
|
+
region: Optional[OutputNewrelicRegion] = OutputNewrelicRegion.US
|
|
266
261
|
r"""Which New Relic region endpoint to use."""
|
|
267
262
|
|
|
268
263
|
log_type: Annotated[Optional[str], pydantic.Field(alias="logType")] = ""
|
|
@@ -318,10 +313,7 @@ class OutputNewrelic(BaseModel):
|
|
|
318
313
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
319
314
|
|
|
320
315
|
failed_request_logging_mode: Annotated[
|
|
321
|
-
|
|
322
|
-
Optional[OutputNewrelicFailedRequestLoggingMode],
|
|
323
|
-
PlainValidator(validate_open_enum(False)),
|
|
324
|
-
],
|
|
316
|
+
Optional[OutputNewrelicFailedRequestLoggingMode],
|
|
325
317
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
326
318
|
] = OutputNewrelicFailedRequestLoggingMode.NONE
|
|
327
319
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -348,20 +340,13 @@ class OutputNewrelic(BaseModel):
|
|
|
348
340
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
349
341
|
|
|
350
342
|
on_backpressure: Annotated[
|
|
351
|
-
|
|
352
|
-
Optional[OutputNewrelicBackpressureBehavior],
|
|
353
|
-
PlainValidator(validate_open_enum(False)),
|
|
354
|
-
],
|
|
343
|
+
Optional[OutputNewrelicBackpressureBehavior],
|
|
355
344
|
pydantic.Field(alias="onBackpressure"),
|
|
356
345
|
] = OutputNewrelicBackpressureBehavior.BLOCK
|
|
357
346
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
358
347
|
|
|
359
348
|
auth_type: Annotated[
|
|
360
|
-
|
|
361
|
-
Optional[OutputNewrelicAuthenticationMethod],
|
|
362
|
-
PlainValidator(validate_open_enum(False)),
|
|
363
|
-
],
|
|
364
|
-
pydantic.Field(alias="authType"),
|
|
349
|
+
Optional[OutputNewrelicAuthenticationMethod], pydantic.Field(alias="authType")
|
|
365
350
|
] = OutputNewrelicAuthenticationMethod.MANUAL
|
|
366
351
|
r"""Enter API key directly, or select a stored secret"""
|
|
367
352
|
|
|
@@ -388,29 +373,19 @@ class OutputNewrelic(BaseModel):
|
|
|
388
373
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
389
374
|
|
|
390
375
|
pq_compress: Annotated[
|
|
391
|
-
|
|
392
|
-
Optional[OutputNewrelicCompression],
|
|
393
|
-
PlainValidator(validate_open_enum(False)),
|
|
394
|
-
],
|
|
395
|
-
pydantic.Field(alias="pqCompress"),
|
|
376
|
+
Optional[OutputNewrelicCompression], pydantic.Field(alias="pqCompress")
|
|
396
377
|
] = OutputNewrelicCompression.NONE
|
|
397
378
|
r"""Codec to use to compress the persisted data"""
|
|
398
379
|
|
|
399
380
|
pq_on_backpressure: Annotated[
|
|
400
|
-
|
|
401
|
-
Optional[OutputNewrelicQueueFullBehavior],
|
|
402
|
-
PlainValidator(validate_open_enum(False)),
|
|
403
|
-
],
|
|
381
|
+
Optional[OutputNewrelicQueueFullBehavior],
|
|
404
382
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
405
383
|
] = OutputNewrelicQueueFullBehavior.BLOCK
|
|
406
384
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
407
385
|
|
|
408
|
-
pq_mode: Annotated[
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
],
|
|
412
|
-
pydantic.Field(alias="pqMode"),
|
|
413
|
-
] = OutputNewrelicMode.ERROR
|
|
386
|
+
pq_mode: Annotated[Optional[OutputNewrelicMode], pydantic.Field(alias="pqMode")] = (
|
|
387
|
+
OutputNewrelicMode.ERROR
|
|
388
|
+
)
|
|
414
389
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
415
390
|
|
|
416
391
|
pq_controls: Annotated[
|
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import utils
|
|
5
4
|
from cribl_control_plane.types import BaseModel
|
|
6
|
-
from cribl_control_plane.utils import validate_open_enum
|
|
7
5
|
from enum import Enum
|
|
8
6
|
import pydantic
|
|
9
|
-
from pydantic.functional_validators import PlainValidator
|
|
10
7
|
from typing import List, Optional
|
|
11
8
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
9
|
|
|
@@ -15,7 +12,7 @@ class OutputNewrelicEventsType(str, Enum):
|
|
|
15
12
|
NEWRELIC_EVENTS = "newrelic_events"
|
|
16
13
|
|
|
17
14
|
|
|
18
|
-
class OutputNewrelicEventsRegion(str, Enum
|
|
15
|
+
class OutputNewrelicEventsRegion(str, Enum):
|
|
19
16
|
r"""Which New Relic region endpoint to use."""
|
|
20
17
|
|
|
21
18
|
US = "US"
|
|
@@ -34,9 +31,7 @@ class OutputNewrelicEventsExtraHTTPHeader(BaseModel):
|
|
|
34
31
|
name: Optional[str] = None
|
|
35
32
|
|
|
36
33
|
|
|
37
|
-
class OutputNewrelicEventsFailedRequestLoggingMode(
|
|
38
|
-
str, Enum, metaclass=utils.OpenEnumMeta
|
|
39
|
-
):
|
|
34
|
+
class OutputNewrelicEventsFailedRequestLoggingMode(str, Enum):
|
|
40
35
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
41
36
|
|
|
42
37
|
PAYLOAD = "payload"
|
|
@@ -98,7 +93,7 @@ class OutputNewrelicEventsTimeoutRetrySettings(BaseModel):
|
|
|
98
93
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
99
94
|
|
|
100
95
|
|
|
101
|
-
class OutputNewrelicEventsBackpressureBehavior(str, Enum
|
|
96
|
+
class OutputNewrelicEventsBackpressureBehavior(str, Enum):
|
|
102
97
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
103
98
|
|
|
104
99
|
BLOCK = "block"
|
|
@@ -106,28 +101,28 @@ class OutputNewrelicEventsBackpressureBehavior(str, Enum, metaclass=utils.OpenEn
|
|
|
106
101
|
QUEUE = "queue"
|
|
107
102
|
|
|
108
103
|
|
|
109
|
-
class OutputNewrelicEventsAuthenticationMethod(str, Enum
|
|
104
|
+
class OutputNewrelicEventsAuthenticationMethod(str, Enum):
|
|
110
105
|
r"""Enter API key directly, or select a stored secret"""
|
|
111
106
|
|
|
112
107
|
MANUAL = "manual"
|
|
113
108
|
SECRET = "secret"
|
|
114
109
|
|
|
115
110
|
|
|
116
|
-
class OutputNewrelicEventsCompression(str, Enum
|
|
111
|
+
class OutputNewrelicEventsCompression(str, Enum):
|
|
117
112
|
r"""Codec to use to compress the persisted data"""
|
|
118
113
|
|
|
119
114
|
NONE = "none"
|
|
120
115
|
GZIP = "gzip"
|
|
121
116
|
|
|
122
117
|
|
|
123
|
-
class OutputNewrelicEventsQueueFullBehavior(str, Enum
|
|
118
|
+
class OutputNewrelicEventsQueueFullBehavior(str, Enum):
|
|
124
119
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
125
120
|
|
|
126
121
|
BLOCK = "block"
|
|
127
122
|
DROP = "drop"
|
|
128
123
|
|
|
129
124
|
|
|
130
|
-
class OutputNewrelicEventsMode(str, Enum
|
|
125
|
+
class OutputNewrelicEventsMode(str, Enum):
|
|
131
126
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
132
127
|
|
|
133
128
|
ERROR = "error"
|
|
@@ -248,9 +243,7 @@ class OutputNewrelicEvents(BaseModel):
|
|
|
248
243
|
streamtags: Optional[List[str]] = None
|
|
249
244
|
r"""Tags for filtering and grouping in @{product}"""
|
|
250
245
|
|
|
251
|
-
region:
|
|
252
|
-
Optional[OutputNewrelicEventsRegion], PlainValidator(validate_open_enum(False))
|
|
253
|
-
] = OutputNewrelicEventsRegion.US
|
|
246
|
+
region: Optional[OutputNewrelicEventsRegion] = OutputNewrelicEventsRegion.US
|
|
254
247
|
r"""Which New Relic region endpoint to use."""
|
|
255
248
|
|
|
256
249
|
concurrency: Optional[float] = 5
|
|
@@ -297,10 +290,7 @@ class OutputNewrelicEvents(BaseModel):
|
|
|
297
290
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
298
291
|
|
|
299
292
|
failed_request_logging_mode: Annotated[
|
|
300
|
-
|
|
301
|
-
Optional[OutputNewrelicEventsFailedRequestLoggingMode],
|
|
302
|
-
PlainValidator(validate_open_enum(False)),
|
|
303
|
-
],
|
|
293
|
+
Optional[OutputNewrelicEventsFailedRequestLoggingMode],
|
|
304
294
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
305
295
|
] = OutputNewrelicEventsFailedRequestLoggingMode.NONE
|
|
306
296
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -327,19 +317,13 @@ class OutputNewrelicEvents(BaseModel):
|
|
|
327
317
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
328
318
|
|
|
329
319
|
on_backpressure: Annotated[
|
|
330
|
-
|
|
331
|
-
Optional[OutputNewrelicEventsBackpressureBehavior],
|
|
332
|
-
PlainValidator(validate_open_enum(False)),
|
|
333
|
-
],
|
|
320
|
+
Optional[OutputNewrelicEventsBackpressureBehavior],
|
|
334
321
|
pydantic.Field(alias="onBackpressure"),
|
|
335
322
|
] = OutputNewrelicEventsBackpressureBehavior.BLOCK
|
|
336
323
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
337
324
|
|
|
338
325
|
auth_type: Annotated[
|
|
339
|
-
|
|
340
|
-
Optional[OutputNewrelicEventsAuthenticationMethod],
|
|
341
|
-
PlainValidator(validate_open_enum(False)),
|
|
342
|
-
],
|
|
326
|
+
Optional[OutputNewrelicEventsAuthenticationMethod],
|
|
343
327
|
pydantic.Field(alias="authType"),
|
|
344
328
|
] = OutputNewrelicEventsAuthenticationMethod.MANUAL
|
|
345
329
|
r"""Enter API key directly, or select a stored secret"""
|
|
@@ -362,29 +346,18 @@ class OutputNewrelicEvents(BaseModel):
|
|
|
362
346
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
363
347
|
|
|
364
348
|
pq_compress: Annotated[
|
|
365
|
-
|
|
366
|
-
Optional[OutputNewrelicEventsCompression],
|
|
367
|
-
PlainValidator(validate_open_enum(False)),
|
|
368
|
-
],
|
|
369
|
-
pydantic.Field(alias="pqCompress"),
|
|
349
|
+
Optional[OutputNewrelicEventsCompression], pydantic.Field(alias="pqCompress")
|
|
370
350
|
] = OutputNewrelicEventsCompression.NONE
|
|
371
351
|
r"""Codec to use to compress the persisted data"""
|
|
372
352
|
|
|
373
353
|
pq_on_backpressure: Annotated[
|
|
374
|
-
|
|
375
|
-
Optional[OutputNewrelicEventsQueueFullBehavior],
|
|
376
|
-
PlainValidator(validate_open_enum(False)),
|
|
377
|
-
],
|
|
354
|
+
Optional[OutputNewrelicEventsQueueFullBehavior],
|
|
378
355
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
379
356
|
] = OutputNewrelicEventsQueueFullBehavior.BLOCK
|
|
380
357
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
381
358
|
|
|
382
359
|
pq_mode: Annotated[
|
|
383
|
-
|
|
384
|
-
Optional[OutputNewrelicEventsMode],
|
|
385
|
-
PlainValidator(validate_open_enum(False)),
|
|
386
|
-
],
|
|
387
|
-
pydantic.Field(alias="pqMode"),
|
|
360
|
+
Optional[OutputNewrelicEventsMode], pydantic.Field(alias="pqMode")
|
|
388
361
|
] = OutputNewrelicEventsMode.ERROR
|
|
389
362
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
390
363
|
|