cribl-control-plane 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/__init__.py +17 -0
- cribl_control_plane/_hooks/__init__.py +5 -0
- cribl_control_plane/_hooks/clientcredentials.py +211 -0
- cribl_control_plane/_hooks/registration.py +13 -0
- cribl_control_plane/_hooks/sdkhooks.py +81 -0
- cribl_control_plane/_hooks/types.py +112 -0
- cribl_control_plane/_version.py +15 -0
- cribl_control_plane/auth_sdk.py +184 -0
- cribl_control_plane/basesdk.py +358 -0
- cribl_control_plane/errors/__init__.py +60 -0
- cribl_control_plane/errors/apierror.py +38 -0
- cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
- cribl_control_plane/errors/error.py +24 -0
- cribl_control_plane/errors/healthstatus_error.py +38 -0
- cribl_control_plane/errors/no_response_error.py +13 -0
- cribl_control_plane/errors/responsevalidationerror.py +25 -0
- cribl_control_plane/health.py +166 -0
- cribl_control_plane/httpclient.py +126 -0
- cribl_control_plane/models/__init__.py +7305 -0
- cribl_control_plane/models/addhectokenrequest.py +34 -0
- cribl_control_plane/models/authtoken.py +13 -0
- cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
- cribl_control_plane/models/createinputop.py +24 -0
- cribl_control_plane/models/createoutputop.py +24 -0
- cribl_control_plane/models/createoutputtestbyidop.py +46 -0
- cribl_control_plane/models/criblevent.py +14 -0
- cribl_control_plane/models/deleteinputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getinputbyidop.py +37 -0
- cribl_control_plane/models/getoutputbyidop.py +37 -0
- cribl_control_plane/models/getoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
- cribl_control_plane/models/healthstatus.py +36 -0
- cribl_control_plane/models/input.py +199 -0
- cribl_control_plane/models/inputappscope.py +448 -0
- cribl_control_plane/models/inputazureblob.py +308 -0
- cribl_control_plane/models/inputcollection.py +208 -0
- cribl_control_plane/models/inputconfluentcloud.py +585 -0
- cribl_control_plane/models/inputcribl.py +165 -0
- cribl_control_plane/models/inputcriblhttp.py +341 -0
- cribl_control_plane/models/inputcribllakehttp.py +342 -0
- cribl_control_plane/models/inputcriblmetrics.py +175 -0
- cribl_control_plane/models/inputcribltcp.py +299 -0
- cribl_control_plane/models/inputcrowdstrike.py +410 -0
- cribl_control_plane/models/inputdatadogagent.py +364 -0
- cribl_control_plane/models/inputdatagen.py +180 -0
- cribl_control_plane/models/inputedgeprometheus.py +551 -0
- cribl_control_plane/models/inputelastic.py +494 -0
- cribl_control_plane/models/inputeventhub.py +360 -0
- cribl_control_plane/models/inputexec.py +213 -0
- cribl_control_plane/models/inputfile.py +259 -0
- cribl_control_plane/models/inputfirehose.py +341 -0
- cribl_control_plane/models/inputgooglepubsub.py +247 -0
- cribl_control_plane/models/inputgrafana_union.py +1247 -0
- cribl_control_plane/models/inputhttp.py +403 -0
- cribl_control_plane/models/inputhttpraw.py +407 -0
- cribl_control_plane/models/inputjournalfiles.py +208 -0
- cribl_control_plane/models/inputkafka.py +581 -0
- cribl_control_plane/models/inputkinesis.py +363 -0
- cribl_control_plane/models/inputkubeevents.py +182 -0
- cribl_control_plane/models/inputkubelogs.py +256 -0
- cribl_control_plane/models/inputkubemetrics.py +233 -0
- cribl_control_plane/models/inputloki.py +468 -0
- cribl_control_plane/models/inputmetrics.py +290 -0
- cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
- cribl_control_plane/models/inputmsk.py +654 -0
- cribl_control_plane/models/inputnetflow.py +224 -0
- cribl_control_plane/models/inputoffice365mgmt.py +384 -0
- cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
- cribl_control_plane/models/inputoffice365service.py +377 -0
- cribl_control_plane/models/inputopentelemetry.py +516 -0
- cribl_control_plane/models/inputprometheus.py +464 -0
- cribl_control_plane/models/inputprometheusrw.py +470 -0
- cribl_control_plane/models/inputrawudp.py +207 -0
- cribl_control_plane/models/inputs3.py +416 -0
- cribl_control_plane/models/inputs3inventory.py +440 -0
- cribl_control_plane/models/inputsecuritylake.py +425 -0
- cribl_control_plane/models/inputsnmp.py +274 -0
- cribl_control_plane/models/inputsplunk.py +387 -0
- cribl_control_plane/models/inputsplunkhec.py +478 -0
- cribl_control_plane/models/inputsplunksearch.py +537 -0
- cribl_control_plane/models/inputsqs.py +320 -0
- cribl_control_plane/models/inputsyslog_union.py +759 -0
- cribl_control_plane/models/inputsystemmetrics.py +533 -0
- cribl_control_plane/models/inputsystemstate.py +417 -0
- cribl_control_plane/models/inputtcp.py +359 -0
- cribl_control_plane/models/inputtcpjson.py +334 -0
- cribl_control_plane/models/inputwef.py +498 -0
- cribl_control_plane/models/inputwindowsmetrics.py +457 -0
- cribl_control_plane/models/inputwineventlogs.py +222 -0
- cribl_control_plane/models/inputwiz.py +334 -0
- cribl_control_plane/models/inputzscalerhec.py +439 -0
- cribl_control_plane/models/listinputop.py +24 -0
- cribl_control_plane/models/listoutputop.py +24 -0
- cribl_control_plane/models/logininfo.py +16 -0
- cribl_control_plane/models/output.py +229 -0
- cribl_control_plane/models/outputazureblob.py +471 -0
- cribl_control_plane/models/outputazuredataexplorer.py +660 -0
- cribl_control_plane/models/outputazureeventhub.py +321 -0
- cribl_control_plane/models/outputazurelogs.py +386 -0
- cribl_control_plane/models/outputclickhouse.py +650 -0
- cribl_control_plane/models/outputcloudwatch.py +273 -0
- cribl_control_plane/models/outputconfluentcloud.py +591 -0
- cribl_control_plane/models/outputcriblhttp.py +494 -0
- cribl_control_plane/models/outputcribllake.py +396 -0
- cribl_control_plane/models/outputcribltcp.py +387 -0
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
- cribl_control_plane/models/outputdatadog.py +472 -0
- cribl_control_plane/models/outputdataset.py +437 -0
- cribl_control_plane/models/outputdefault.py +55 -0
- cribl_control_plane/models/outputdevnull.py +50 -0
- cribl_control_plane/models/outputdiskspool.py +89 -0
- cribl_control_plane/models/outputdls3.py +560 -0
- cribl_control_plane/models/outputdynatracehttp.py +454 -0
- cribl_control_plane/models/outputdynatraceotlp.py +486 -0
- cribl_control_plane/models/outputelastic.py +494 -0
- cribl_control_plane/models/outputelasticcloud.py +407 -0
- cribl_control_plane/models/outputexabeam.py +297 -0
- cribl_control_plane/models/outputfilesystem.py +357 -0
- cribl_control_plane/models/outputgooglechronicle.py +486 -0
- cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
- cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
- cribl_control_plane/models/outputgooglepubsub.py +274 -0
- cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
- cribl_control_plane/models/outputgraphite.py +225 -0
- cribl_control_plane/models/outputhoneycomb.py +369 -0
- cribl_control_plane/models/outputhumiohec.py +389 -0
- cribl_control_plane/models/outputinfluxdb.py +523 -0
- cribl_control_plane/models/outputkafka.py +581 -0
- cribl_control_plane/models/outputkinesis.py +312 -0
- cribl_control_plane/models/outputloki.py +425 -0
- cribl_control_plane/models/outputminio.py +512 -0
- cribl_control_plane/models/outputmsk.py +654 -0
- cribl_control_plane/models/outputnetflow.py +80 -0
- cribl_control_plane/models/outputnewrelic.py +424 -0
- cribl_control_plane/models/outputnewrelicevents.py +401 -0
- cribl_control_plane/models/outputopentelemetry.py +669 -0
- cribl_control_plane/models/outputprometheus.py +485 -0
- cribl_control_plane/models/outputring.py +121 -0
- cribl_control_plane/models/outputrouter.py +83 -0
- cribl_control_plane/models/outputs3.py +556 -0
- cribl_control_plane/models/outputsamplesresponse.py +14 -0
- cribl_control_plane/models/outputsecuritylake.py +505 -0
- cribl_control_plane/models/outputsentinel.py +488 -0
- cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
- cribl_control_plane/models/outputservicenow.py +543 -0
- cribl_control_plane/models/outputsignalfx.py +369 -0
- cribl_control_plane/models/outputsnmp.py +80 -0
- cribl_control_plane/models/outputsns.py +274 -0
- cribl_control_plane/models/outputsplunk.py +383 -0
- cribl_control_plane/models/outputsplunkhec.py +434 -0
- cribl_control_plane/models/outputsplunklb.py +558 -0
- cribl_control_plane/models/outputsqs.py +328 -0
- cribl_control_plane/models/outputstatsd.py +224 -0
- cribl_control_plane/models/outputstatsdext.py +225 -0
- cribl_control_plane/models/outputsumologic.py +378 -0
- cribl_control_plane/models/outputsyslog.py +415 -0
- cribl_control_plane/models/outputtcpjson.py +413 -0
- cribl_control_plane/models/outputtestrequest.py +15 -0
- cribl_control_plane/models/outputtestresponse.py +29 -0
- cribl_control_plane/models/outputwavefront.py +369 -0
- cribl_control_plane/models/outputwebhook.py +689 -0
- cribl_control_plane/models/outputxsiam.py +415 -0
- cribl_control_plane/models/schemeclientoauth.py +24 -0
- cribl_control_plane/models/security.py +36 -0
- cribl_control_plane/models/updatehectokenrequest.py +31 -0
- cribl_control_plane/models/updateinputbyidop.py +44 -0
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
- cribl_control_plane/models/updateoutputbyidop.py +44 -0
- cribl_control_plane/outputs.py +1615 -0
- cribl_control_plane/py.typed +1 -0
- cribl_control_plane/sdk.py +164 -0
- cribl_control_plane/sdkconfiguration.py +36 -0
- cribl_control_plane/sources.py +1355 -0
- cribl_control_plane/types/__init__.py +21 -0
- cribl_control_plane/types/basemodel.py +39 -0
- cribl_control_plane/utils/__init__.py +187 -0
- cribl_control_plane/utils/annotations.py +55 -0
- cribl_control_plane/utils/datetimes.py +23 -0
- cribl_control_plane/utils/enums.py +74 -0
- cribl_control_plane/utils/eventstreaming.py +238 -0
- cribl_control_plane/utils/forms.py +223 -0
- cribl_control_plane/utils/headers.py +136 -0
- cribl_control_plane/utils/logger.py +27 -0
- cribl_control_plane/utils/metadata.py +118 -0
- cribl_control_plane/utils/queryparams.py +205 -0
- cribl_control_plane/utils/requestbodies.py +66 -0
- cribl_control_plane/utils/retries.py +217 -0
- cribl_control_plane/utils/security.py +207 -0
- cribl_control_plane/utils/serializers.py +249 -0
- cribl_control_plane/utils/unmarshal_json_response.py +24 -0
- cribl_control_plane/utils/url.py +155 -0
- cribl_control_plane/utils/values.py +137 -0
- cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
- cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
- cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,654 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OutputMskType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
15
|
+
MSK = "msk"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OutputMskAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
|
+
r"""Control the number of required acknowledgments."""
|
|
20
|
+
|
|
21
|
+
ONE = 1
|
|
22
|
+
ZERO = 0
|
|
23
|
+
MINUS_1 = -1
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class OutputMskRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
|
+
r"""Format to use to serialize events before writing to Kafka."""
|
|
28
|
+
|
|
29
|
+
JSON = "json"
|
|
30
|
+
RAW = "raw"
|
|
31
|
+
PROTOBUF = "protobuf"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class OutputMskCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
35
|
+
r"""Codec to use to compress the data before sending to Kafka"""
|
|
36
|
+
|
|
37
|
+
NONE = "none"
|
|
38
|
+
GZIP = "gzip"
|
|
39
|
+
SNAPPY = "snappy"
|
|
40
|
+
LZ4 = "lz4"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class OutputMskAuthTypedDict(TypedDict):
|
|
44
|
+
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
45
|
+
|
|
46
|
+
disabled: NotRequired[bool]
|
|
47
|
+
credentials_secret: NotRequired[str]
|
|
48
|
+
r"""Select or create a secret that references your credentials"""
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class OutputMskAuth(BaseModel):
|
|
52
|
+
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
53
|
+
|
|
54
|
+
disabled: Optional[bool] = True
|
|
55
|
+
|
|
56
|
+
credentials_secret: Annotated[
|
|
57
|
+
Optional[str], pydantic.Field(alias="credentialsSecret")
|
|
58
|
+
] = None
|
|
59
|
+
r"""Select or create a secret that references your credentials"""
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class OutputMskKafkaSchemaRegistryMinimumTLSVersion(
|
|
63
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
64
|
+
):
|
|
65
|
+
TL_SV1 = "TLSv1"
|
|
66
|
+
TL_SV1_1 = "TLSv1.1"
|
|
67
|
+
TL_SV1_2 = "TLSv1.2"
|
|
68
|
+
TL_SV1_3 = "TLSv1.3"
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class OutputMskKafkaSchemaRegistryMaximumTLSVersion(
|
|
72
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
+
):
|
|
74
|
+
TL_SV1 = "TLSv1"
|
|
75
|
+
TL_SV1_1 = "TLSv1.1"
|
|
76
|
+
TL_SV1_2 = "TLSv1.2"
|
|
77
|
+
TL_SV1_3 = "TLSv1.3"
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class OutputMskKafkaSchemaRegistryTLSSettingsClientSideTypedDict(TypedDict):
|
|
81
|
+
disabled: NotRequired[bool]
|
|
82
|
+
reject_unauthorized: NotRequired[bool]
|
|
83
|
+
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another
|
|
84
|
+
trusted CA (such as the system's). Defaults to Enabled. Overrides the toggle from Advanced Settings, when also present.
|
|
85
|
+
"""
|
|
86
|
+
servername: NotRequired[str]
|
|
87
|
+
r"""Server name for the SNI (Server Name Indication) TLS extension. It must be a host name, and not an IP address."""
|
|
88
|
+
certificate_name: NotRequired[str]
|
|
89
|
+
r"""The name of the predefined certificate"""
|
|
90
|
+
ca_path: NotRequired[str]
|
|
91
|
+
r"""Path on client in which to find CA certificates to verify the server's cert. PEM format. Can reference $ENV_VARS."""
|
|
92
|
+
priv_key_path: NotRequired[str]
|
|
93
|
+
r"""Path on client in which to find the private key to use. PEM format. Can reference $ENV_VARS."""
|
|
94
|
+
cert_path: NotRequired[str]
|
|
95
|
+
r"""Path on client in which to find certificates to use. PEM format. Can reference $ENV_VARS."""
|
|
96
|
+
passphrase: NotRequired[str]
|
|
97
|
+
r"""Passphrase to use to decrypt private key"""
|
|
98
|
+
min_version: NotRequired[OutputMskKafkaSchemaRegistryMinimumTLSVersion]
|
|
99
|
+
max_version: NotRequired[OutputMskKafkaSchemaRegistryMaximumTLSVersion]
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class OutputMskKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
|
|
103
|
+
disabled: Optional[bool] = True
|
|
104
|
+
|
|
105
|
+
reject_unauthorized: Annotated[
|
|
106
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
107
|
+
] = True
|
|
108
|
+
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another
|
|
109
|
+
trusted CA (such as the system's). Defaults to Enabled. Overrides the toggle from Advanced Settings, when also present.
|
|
110
|
+
"""
|
|
111
|
+
|
|
112
|
+
servername: Optional[str] = None
|
|
113
|
+
r"""Server name for the SNI (Server Name Indication) TLS extension. It must be a host name, and not an IP address."""
|
|
114
|
+
|
|
115
|
+
certificate_name: Annotated[
|
|
116
|
+
Optional[str], pydantic.Field(alias="certificateName")
|
|
117
|
+
] = None
|
|
118
|
+
r"""The name of the predefined certificate"""
|
|
119
|
+
|
|
120
|
+
ca_path: Annotated[Optional[str], pydantic.Field(alias="caPath")] = None
|
|
121
|
+
r"""Path on client in which to find CA certificates to verify the server's cert. PEM format. Can reference $ENV_VARS."""
|
|
122
|
+
|
|
123
|
+
priv_key_path: Annotated[Optional[str], pydantic.Field(alias="privKeyPath")] = None
|
|
124
|
+
r"""Path on client in which to find the private key to use. PEM format. Can reference $ENV_VARS."""
|
|
125
|
+
|
|
126
|
+
cert_path: Annotated[Optional[str], pydantic.Field(alias="certPath")] = None
|
|
127
|
+
r"""Path on client in which to find certificates to use. PEM format. Can reference $ENV_VARS."""
|
|
128
|
+
|
|
129
|
+
passphrase: Optional[str] = None
|
|
130
|
+
r"""Passphrase to use to decrypt private key"""
|
|
131
|
+
|
|
132
|
+
min_version: Annotated[
|
|
133
|
+
Annotated[
|
|
134
|
+
Optional[OutputMskKafkaSchemaRegistryMinimumTLSVersion],
|
|
135
|
+
PlainValidator(validate_open_enum(False)),
|
|
136
|
+
],
|
|
137
|
+
pydantic.Field(alias="minVersion"),
|
|
138
|
+
] = None
|
|
139
|
+
|
|
140
|
+
max_version: Annotated[
|
|
141
|
+
Annotated[
|
|
142
|
+
Optional[OutputMskKafkaSchemaRegistryMaximumTLSVersion],
|
|
143
|
+
PlainValidator(validate_open_enum(False)),
|
|
144
|
+
],
|
|
145
|
+
pydantic.Field(alias="maxVersion"),
|
|
146
|
+
] = None
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class OutputMskKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
|
|
150
|
+
disabled: NotRequired[bool]
|
|
151
|
+
schema_registry_url: NotRequired[str]
|
|
152
|
+
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
153
|
+
connection_timeout: NotRequired[float]
|
|
154
|
+
r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
|
|
155
|
+
request_timeout: NotRequired[float]
|
|
156
|
+
r"""Maximum time to wait for the Schema Registry to respond to a request"""
|
|
157
|
+
max_retries: NotRequired[float]
|
|
158
|
+
r"""Maximum number of times to try fetching schemas from the Schema Registry"""
|
|
159
|
+
auth: NotRequired[OutputMskAuthTypedDict]
|
|
160
|
+
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
161
|
+
tls: NotRequired[OutputMskKafkaSchemaRegistryTLSSettingsClientSideTypedDict]
|
|
162
|
+
default_key_schema_id: NotRequired[float]
|
|
163
|
+
r"""Used when __keySchemaIdOut is not present, to transform key values, leave blank if key transformation is not required by default."""
|
|
164
|
+
default_value_schema_id: NotRequired[float]
|
|
165
|
+
r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
|
|
169
|
+
disabled: Optional[bool] = True
|
|
170
|
+
|
|
171
|
+
schema_registry_url: Annotated[
|
|
172
|
+
Optional[str], pydantic.Field(alias="schemaRegistryURL")
|
|
173
|
+
] = "http://localhost:8081"
|
|
174
|
+
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
175
|
+
|
|
176
|
+
connection_timeout: Annotated[
|
|
177
|
+
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
178
|
+
] = 30000
|
|
179
|
+
r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
|
|
180
|
+
|
|
181
|
+
request_timeout: Annotated[
|
|
182
|
+
Optional[float], pydantic.Field(alias="requestTimeout")
|
|
183
|
+
] = 30000
|
|
184
|
+
r"""Maximum time to wait for the Schema Registry to respond to a request"""
|
|
185
|
+
|
|
186
|
+
max_retries: Annotated[Optional[float], pydantic.Field(alias="maxRetries")] = 1
|
|
187
|
+
r"""Maximum number of times to try fetching schemas from the Schema Registry"""
|
|
188
|
+
|
|
189
|
+
auth: Optional[OutputMskAuth] = None
|
|
190
|
+
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
191
|
+
|
|
192
|
+
tls: Optional[OutputMskKafkaSchemaRegistryTLSSettingsClientSide] = None
|
|
193
|
+
|
|
194
|
+
default_key_schema_id: Annotated[
|
|
195
|
+
Optional[float], pydantic.Field(alias="defaultKeySchemaId")
|
|
196
|
+
] = None
|
|
197
|
+
r"""Used when __keySchemaIdOut is not present, to transform key values, leave blank if key transformation is not required by default."""
|
|
198
|
+
|
|
199
|
+
default_value_schema_id: Annotated[
|
|
200
|
+
Optional[float], pydantic.Field(alias="defaultValueSchemaId")
|
|
201
|
+
] = None
|
|
202
|
+
r"""Used when __valueSchemaIdOut is not present, to transform _raw, leave blank if value transformation is not required by default."""
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
class OutputMskAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
206
|
+
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
207
|
+
|
|
208
|
+
AUTO = "auto"
|
|
209
|
+
MANUAL = "manual"
|
|
210
|
+
SECRET = "secret"
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
class OutputMskSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
214
|
+
r"""Signature version to use for signing MSK cluster requests"""
|
|
215
|
+
|
|
216
|
+
V2 = "v2"
|
|
217
|
+
V4 = "v4"
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
class OutputMskMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
221
|
+
TL_SV1 = "TLSv1"
|
|
222
|
+
TL_SV1_1 = "TLSv1.1"
|
|
223
|
+
TL_SV1_2 = "TLSv1.2"
|
|
224
|
+
TL_SV1_3 = "TLSv1.3"
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
class OutputMskMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
228
|
+
TL_SV1 = "TLSv1"
|
|
229
|
+
TL_SV1_1 = "TLSv1.1"
|
|
230
|
+
TL_SV1_2 = "TLSv1.2"
|
|
231
|
+
TL_SV1_3 = "TLSv1.3"
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
class OutputMskTLSSettingsClientSideTypedDict(TypedDict):
|
|
235
|
+
disabled: NotRequired[bool]
|
|
236
|
+
reject_unauthorized: NotRequired[bool]
|
|
237
|
+
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another
|
|
238
|
+
trusted CA (such as the system's). Defaults to Enabled. Overrides the toggle from Advanced Settings, when also present.
|
|
239
|
+
"""
|
|
240
|
+
servername: NotRequired[str]
|
|
241
|
+
r"""Server name for the SNI (Server Name Indication) TLS extension. It must be a host name, and not an IP address."""
|
|
242
|
+
certificate_name: NotRequired[str]
|
|
243
|
+
r"""The name of the predefined certificate"""
|
|
244
|
+
ca_path: NotRequired[str]
|
|
245
|
+
r"""Path on client in which to find CA certificates to verify the server's cert. PEM format. Can reference $ENV_VARS."""
|
|
246
|
+
priv_key_path: NotRequired[str]
|
|
247
|
+
r"""Path on client in which to find the private key to use. PEM format. Can reference $ENV_VARS."""
|
|
248
|
+
cert_path: NotRequired[str]
|
|
249
|
+
r"""Path on client in which to find certificates to use. PEM format. Can reference $ENV_VARS."""
|
|
250
|
+
passphrase: NotRequired[str]
|
|
251
|
+
r"""Passphrase to use to decrypt private key"""
|
|
252
|
+
min_version: NotRequired[OutputMskMinimumTLSVersion]
|
|
253
|
+
max_version: NotRequired[OutputMskMaximumTLSVersion]
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
class OutputMskTLSSettingsClientSide(BaseModel):
|
|
257
|
+
disabled: Optional[bool] = False
|
|
258
|
+
|
|
259
|
+
reject_unauthorized: Annotated[
|
|
260
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
261
|
+
] = True
|
|
262
|
+
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another
|
|
263
|
+
trusted CA (such as the system's). Defaults to Enabled. Overrides the toggle from Advanced Settings, when also present.
|
|
264
|
+
"""
|
|
265
|
+
|
|
266
|
+
servername: Optional[str] = None
|
|
267
|
+
r"""Server name for the SNI (Server Name Indication) TLS extension. It must be a host name, and not an IP address."""
|
|
268
|
+
|
|
269
|
+
certificate_name: Annotated[
|
|
270
|
+
Optional[str], pydantic.Field(alias="certificateName")
|
|
271
|
+
] = None
|
|
272
|
+
r"""The name of the predefined certificate"""
|
|
273
|
+
|
|
274
|
+
ca_path: Annotated[Optional[str], pydantic.Field(alias="caPath")] = None
|
|
275
|
+
r"""Path on client in which to find CA certificates to verify the server's cert. PEM format. Can reference $ENV_VARS."""
|
|
276
|
+
|
|
277
|
+
priv_key_path: Annotated[Optional[str], pydantic.Field(alias="privKeyPath")] = None
|
|
278
|
+
r"""Path on client in which to find the private key to use. PEM format. Can reference $ENV_VARS."""
|
|
279
|
+
|
|
280
|
+
cert_path: Annotated[Optional[str], pydantic.Field(alias="certPath")] = None
|
|
281
|
+
r"""Path on client in which to find certificates to use. PEM format. Can reference $ENV_VARS."""
|
|
282
|
+
|
|
283
|
+
passphrase: Optional[str] = None
|
|
284
|
+
r"""Passphrase to use to decrypt private key"""
|
|
285
|
+
|
|
286
|
+
min_version: Annotated[
|
|
287
|
+
Annotated[
|
|
288
|
+
Optional[OutputMskMinimumTLSVersion],
|
|
289
|
+
PlainValidator(validate_open_enum(False)),
|
|
290
|
+
],
|
|
291
|
+
pydantic.Field(alias="minVersion"),
|
|
292
|
+
] = None
|
|
293
|
+
|
|
294
|
+
max_version: Annotated[
|
|
295
|
+
Annotated[
|
|
296
|
+
Optional[OutputMskMaximumTLSVersion],
|
|
297
|
+
PlainValidator(validate_open_enum(False)),
|
|
298
|
+
],
|
|
299
|
+
pydantic.Field(alias="maxVersion"),
|
|
300
|
+
] = None
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
class OutputMskBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
304
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
305
|
+
|
|
306
|
+
BLOCK = "block"
|
|
307
|
+
DROP = "drop"
|
|
308
|
+
QUEUE = "queue"
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
class OutputMskPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
312
|
+
r"""Codec to use to compress the persisted data"""
|
|
313
|
+
|
|
314
|
+
NONE = "none"
|
|
315
|
+
GZIP = "gzip"
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
class OutputMskQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
319
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
320
|
+
|
|
321
|
+
BLOCK = "block"
|
|
322
|
+
DROP = "drop"
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
class OutputMskMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
326
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
327
|
+
|
|
328
|
+
ERROR = "error"
|
|
329
|
+
BACKPRESSURE = "backpressure"
|
|
330
|
+
ALWAYS = "always"
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
class OutputMskPqControlsTypedDict(TypedDict):
|
|
334
|
+
pass
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
class OutputMskPqControls(BaseModel):
|
|
338
|
+
pass
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
class OutputMskTypedDict(TypedDict):
|
|
342
|
+
brokers: List[str]
|
|
343
|
+
r"""Enter each Kafka bootstrap server you want to use. Specify hostname and port, e.g., mykafkabroker:9092, or just hostname, in which case @{product} will assign port 9092."""
|
|
344
|
+
topic: str
|
|
345
|
+
r"""The topic to publish events to. Can be overridden using the __topicOut field."""
|
|
346
|
+
region: str
|
|
347
|
+
r"""Region where the MSK cluster is located"""
|
|
348
|
+
id: NotRequired[str]
|
|
349
|
+
r"""Unique ID for this output"""
|
|
350
|
+
type: NotRequired[OutputMskType]
|
|
351
|
+
pipeline: NotRequired[str]
|
|
352
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
353
|
+
system_fields: NotRequired[List[str]]
|
|
354
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
355
|
+
environment: NotRequired[str]
|
|
356
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
357
|
+
streamtags: NotRequired[List[str]]
|
|
358
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
359
|
+
ack: NotRequired[OutputMskAcknowledgments]
|
|
360
|
+
r"""Control the number of required acknowledgments."""
|
|
361
|
+
format_: NotRequired[OutputMskRecordDataFormat]
|
|
362
|
+
r"""Format to use to serialize events before writing to Kafka."""
|
|
363
|
+
compression: NotRequired[OutputMskCompression]
|
|
364
|
+
r"""Codec to use to compress the data before sending to Kafka"""
|
|
365
|
+
max_record_size_kb: NotRequired[float]
|
|
366
|
+
r"""Maximum size of each record batch before compression. The value must not exceed the Kafka brokers' message.max.bytes setting."""
|
|
367
|
+
flush_event_count: NotRequired[float]
|
|
368
|
+
r"""The maximum number of events you want the Destination to allow in a batch before forcing a flush"""
|
|
369
|
+
flush_period_sec: NotRequired[float]
|
|
370
|
+
r"""The maximum amount of time you want the Destination to wait before forcing a flush. Shorter intervals tend to result in smaller batches being sent."""
|
|
371
|
+
kafka_schema_registry: NotRequired[
|
|
372
|
+
OutputMskKafkaSchemaRegistryAuthenticationTypedDict
|
|
373
|
+
]
|
|
374
|
+
connection_timeout: NotRequired[float]
|
|
375
|
+
r"""Maximum time to wait for a connection to complete successfully"""
|
|
376
|
+
request_timeout: NotRequired[float]
|
|
377
|
+
r"""Maximum time to wait for Kafka to respond to a request"""
|
|
378
|
+
max_retries: NotRequired[float]
|
|
379
|
+
r"""If messages are failing, you can set the maximum number of retries as high as 100 to prevent loss of data"""
|
|
380
|
+
max_back_off: NotRequired[float]
|
|
381
|
+
r"""The maximum wait time for a retry, in milliseconds. Default (and minimum) is 30,000 ms (30 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
382
|
+
initial_backoff: NotRequired[float]
|
|
383
|
+
r"""Initial value used to calculate the retry, in milliseconds. Maximum is 600,000 ms (10 minutes)."""
|
|
384
|
+
backoff_rate: NotRequired[float]
|
|
385
|
+
r"""Set the backoff multiplier (2-20) to control the retry frequency for failed messages. For faster retries, use a lower multiplier. For slower retries with more delay between attempts, use a higher multiplier. The multiplier is used in an exponential backoff formula; see the Kafka [documentation](https://kafka.js.org/docs/retry-detailed) for details."""
|
|
386
|
+
authentication_timeout: NotRequired[float]
|
|
387
|
+
r"""Maximum time to wait for Kafka to respond to an authentication request"""
|
|
388
|
+
reauthentication_threshold: NotRequired[float]
|
|
389
|
+
r"""Specifies a time window during which @{product} can reauthenticate if needed. Creates the window measuring backward from the moment when credentials are set to expire."""
|
|
390
|
+
aws_authentication_method: NotRequired[OutputMskAuthenticationMethod]
|
|
391
|
+
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
392
|
+
aws_secret_key: NotRequired[str]
|
|
393
|
+
endpoint: NotRequired[str]
|
|
394
|
+
r"""MSK cluster service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to MSK cluster-compatible endpoint."""
|
|
395
|
+
signature_version: NotRequired[OutputMskSignatureVersion]
|
|
396
|
+
r"""Signature version to use for signing MSK cluster requests"""
|
|
397
|
+
reuse_connections: NotRequired[bool]
|
|
398
|
+
r"""Reuse connections between requests, which can improve performance"""
|
|
399
|
+
reject_unauthorized: NotRequired[bool]
|
|
400
|
+
r"""Reject certificates that cannot be verified against a valid CA, such as self-signed certificates"""
|
|
401
|
+
enable_assume_role: NotRequired[bool]
|
|
402
|
+
r"""Use Assume Role credentials to access MSK"""
|
|
403
|
+
assume_role_arn: NotRequired[str]
|
|
404
|
+
r"""Amazon Resource Name (ARN) of the role to assume"""
|
|
405
|
+
assume_role_external_id: NotRequired[str]
|
|
406
|
+
r"""External ID to use when assuming role"""
|
|
407
|
+
duration_seconds: NotRequired[float]
|
|
408
|
+
r"""Duration of the assumed role's session, in seconds. Minimum is 900 (15 minutes), default is 3600 (1 hour), and maximum is 43200 (12 hours)."""
|
|
409
|
+
tls: NotRequired[OutputMskTLSSettingsClientSideTypedDict]
|
|
410
|
+
on_backpressure: NotRequired[OutputMskBackpressureBehavior]
|
|
411
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
412
|
+
description: NotRequired[str]
|
|
413
|
+
aws_api_key: NotRequired[str]
|
|
414
|
+
aws_secret: NotRequired[str]
|
|
415
|
+
r"""Select or create a stored secret that references your access key and secret key"""
|
|
416
|
+
protobuf_library_id: NotRequired[str]
|
|
417
|
+
r"""Select a set of Protobuf definitions for the events you want to send"""
|
|
418
|
+
pq_max_file_size: NotRequired[str]
|
|
419
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
420
|
+
pq_max_size: NotRequired[str]
|
|
421
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
422
|
+
pq_path: NotRequired[str]
|
|
423
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
424
|
+
pq_compress: NotRequired[OutputMskPqCompressCompression]
|
|
425
|
+
r"""Codec to use to compress the persisted data"""
|
|
426
|
+
pq_on_backpressure: NotRequired[OutputMskQueueFullBehavior]
|
|
427
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
428
|
+
pq_mode: NotRequired[OutputMskMode]
|
|
429
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
430
|
+
pq_controls: NotRequired[OutputMskPqControlsTypedDict]
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
class OutputMsk(BaseModel):
|
|
434
|
+
brokers: List[str]
|
|
435
|
+
r"""Enter each Kafka bootstrap server you want to use. Specify hostname and port, e.g., mykafkabroker:9092, or just hostname, in which case @{product} will assign port 9092."""
|
|
436
|
+
|
|
437
|
+
topic: str
|
|
438
|
+
r"""The topic to publish events to. Can be overridden using the __topicOut field."""
|
|
439
|
+
|
|
440
|
+
region: str
|
|
441
|
+
r"""Region where the MSK cluster is located"""
|
|
442
|
+
|
|
443
|
+
id: Optional[str] = None
|
|
444
|
+
r"""Unique ID for this output"""
|
|
445
|
+
|
|
446
|
+
type: Annotated[
|
|
447
|
+
Optional[OutputMskType], PlainValidator(validate_open_enum(False))
|
|
448
|
+
] = None
|
|
449
|
+
|
|
450
|
+
pipeline: Optional[str] = None
|
|
451
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
452
|
+
|
|
453
|
+
system_fields: Annotated[
|
|
454
|
+
Optional[List[str]], pydantic.Field(alias="systemFields")
|
|
455
|
+
] = None
|
|
456
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
457
|
+
|
|
458
|
+
environment: Optional[str] = None
|
|
459
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
460
|
+
|
|
461
|
+
streamtags: Optional[List[str]] = None
|
|
462
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
463
|
+
|
|
464
|
+
ack: Annotated[
|
|
465
|
+
Optional[OutputMskAcknowledgments], PlainValidator(validate_open_enum(True))
|
|
466
|
+
] = OutputMskAcknowledgments.ONE
|
|
467
|
+
r"""Control the number of required acknowledgments."""
|
|
468
|
+
|
|
469
|
+
format_: Annotated[
|
|
470
|
+
Annotated[
|
|
471
|
+
Optional[OutputMskRecordDataFormat],
|
|
472
|
+
PlainValidator(validate_open_enum(False)),
|
|
473
|
+
],
|
|
474
|
+
pydantic.Field(alias="format"),
|
|
475
|
+
] = OutputMskRecordDataFormat.JSON
|
|
476
|
+
r"""Format to use to serialize events before writing to Kafka."""
|
|
477
|
+
|
|
478
|
+
compression: Annotated[
|
|
479
|
+
Optional[OutputMskCompression], PlainValidator(validate_open_enum(False))
|
|
480
|
+
] = OutputMskCompression.GZIP
|
|
481
|
+
r"""Codec to use to compress the data before sending to Kafka"""
|
|
482
|
+
|
|
483
|
+
max_record_size_kb: Annotated[
|
|
484
|
+
Optional[float], pydantic.Field(alias="maxRecordSizeKB")
|
|
485
|
+
] = 768
|
|
486
|
+
r"""Maximum size of each record batch before compression. The value must not exceed the Kafka brokers' message.max.bytes setting."""
|
|
487
|
+
|
|
488
|
+
flush_event_count: Annotated[
|
|
489
|
+
Optional[float], pydantic.Field(alias="flushEventCount")
|
|
490
|
+
] = 1000
|
|
491
|
+
r"""The maximum number of events you want the Destination to allow in a batch before forcing a flush"""
|
|
492
|
+
|
|
493
|
+
flush_period_sec: Annotated[
|
|
494
|
+
Optional[float], pydantic.Field(alias="flushPeriodSec")
|
|
495
|
+
] = 1
|
|
496
|
+
r"""The maximum amount of time you want the Destination to wait before forcing a flush. Shorter intervals tend to result in smaller batches being sent."""
|
|
497
|
+
|
|
498
|
+
kafka_schema_registry: Annotated[
|
|
499
|
+
Optional[OutputMskKafkaSchemaRegistryAuthentication],
|
|
500
|
+
pydantic.Field(alias="kafkaSchemaRegistry"),
|
|
501
|
+
] = None
|
|
502
|
+
|
|
503
|
+
connection_timeout: Annotated[
|
|
504
|
+
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
505
|
+
] = 10000
|
|
506
|
+
r"""Maximum time to wait for a connection to complete successfully"""
|
|
507
|
+
|
|
508
|
+
request_timeout: Annotated[
|
|
509
|
+
Optional[float], pydantic.Field(alias="requestTimeout")
|
|
510
|
+
] = 60000
|
|
511
|
+
r"""Maximum time to wait for Kafka to respond to a request"""
|
|
512
|
+
|
|
513
|
+
max_retries: Annotated[Optional[float], pydantic.Field(alias="maxRetries")] = 5
|
|
514
|
+
r"""If messages are failing, you can set the maximum number of retries as high as 100 to prevent loss of data"""
|
|
515
|
+
|
|
516
|
+
max_back_off: Annotated[Optional[float], pydantic.Field(alias="maxBackOff")] = 30000
|
|
517
|
+
r"""The maximum wait time for a retry, in milliseconds. Default (and minimum) is 30,000 ms (30 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
518
|
+
|
|
519
|
+
initial_backoff: Annotated[
|
|
520
|
+
Optional[float], pydantic.Field(alias="initialBackoff")
|
|
521
|
+
] = 300
|
|
522
|
+
r"""Initial value used to calculate the retry, in milliseconds. Maximum is 600,000 ms (10 minutes)."""
|
|
523
|
+
|
|
524
|
+
backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
|
|
525
|
+
r"""Set the backoff multiplier (2-20) to control the retry frequency for failed messages. For faster retries, use a lower multiplier. For slower retries with more delay between attempts, use a higher multiplier. The multiplier is used in an exponential backoff formula; see the Kafka [documentation](https://kafka.js.org/docs/retry-detailed) for details."""
|
|
526
|
+
|
|
527
|
+
authentication_timeout: Annotated[
|
|
528
|
+
Optional[float], pydantic.Field(alias="authenticationTimeout")
|
|
529
|
+
] = 10000
|
|
530
|
+
r"""Maximum time to wait for Kafka to respond to an authentication request"""
|
|
531
|
+
|
|
532
|
+
reauthentication_threshold: Annotated[
|
|
533
|
+
Optional[float], pydantic.Field(alias="reauthenticationThreshold")
|
|
534
|
+
] = 10000
|
|
535
|
+
r"""Specifies a time window during which @{product} can reauthenticate if needed. Creates the window measuring backward from the moment when credentials are set to expire."""
|
|
536
|
+
|
|
537
|
+
aws_authentication_method: Annotated[
|
|
538
|
+
Annotated[
|
|
539
|
+
Optional[OutputMskAuthenticationMethod],
|
|
540
|
+
PlainValidator(validate_open_enum(False)),
|
|
541
|
+
],
|
|
542
|
+
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
543
|
+
] = OutputMskAuthenticationMethod.AUTO
|
|
544
|
+
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
545
|
+
|
|
546
|
+
aws_secret_key: Annotated[Optional[str], pydantic.Field(alias="awsSecretKey")] = (
|
|
547
|
+
None
|
|
548
|
+
)
|
|
549
|
+
|
|
550
|
+
endpoint: Optional[str] = None
|
|
551
|
+
r"""MSK cluster service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to MSK cluster-compatible endpoint."""
|
|
552
|
+
|
|
553
|
+
signature_version: Annotated[
|
|
554
|
+
Annotated[
|
|
555
|
+
Optional[OutputMskSignatureVersion],
|
|
556
|
+
PlainValidator(validate_open_enum(False)),
|
|
557
|
+
],
|
|
558
|
+
pydantic.Field(alias="signatureVersion"),
|
|
559
|
+
] = OutputMskSignatureVersion.V4
|
|
560
|
+
r"""Signature version to use for signing MSK cluster requests"""
|
|
561
|
+
|
|
562
|
+
reuse_connections: Annotated[
|
|
563
|
+
Optional[bool], pydantic.Field(alias="reuseConnections")
|
|
564
|
+
] = True
|
|
565
|
+
r"""Reuse connections between requests, which can improve performance"""
|
|
566
|
+
|
|
567
|
+
reject_unauthorized: Annotated[
|
|
568
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
569
|
+
] = True
|
|
570
|
+
r"""Reject certificates that cannot be verified against a valid CA, such as self-signed certificates"""
|
|
571
|
+
|
|
572
|
+
enable_assume_role: Annotated[
|
|
573
|
+
Optional[bool], pydantic.Field(alias="enableAssumeRole")
|
|
574
|
+
] = False
|
|
575
|
+
r"""Use Assume Role credentials to access MSK"""
|
|
576
|
+
|
|
577
|
+
assume_role_arn: Annotated[Optional[str], pydantic.Field(alias="assumeRoleArn")] = (
|
|
578
|
+
None
|
|
579
|
+
)
|
|
580
|
+
r"""Amazon Resource Name (ARN) of the role to assume"""
|
|
581
|
+
|
|
582
|
+
assume_role_external_id: Annotated[
|
|
583
|
+
Optional[str], pydantic.Field(alias="assumeRoleExternalId")
|
|
584
|
+
] = None
|
|
585
|
+
r"""External ID to use when assuming role"""
|
|
586
|
+
|
|
587
|
+
duration_seconds: Annotated[
|
|
588
|
+
Optional[float], pydantic.Field(alias="durationSeconds")
|
|
589
|
+
] = 3600
|
|
590
|
+
r"""Duration of the assumed role's session, in seconds. Minimum is 900 (15 minutes), default is 3600 (1 hour), and maximum is 43200 (12 hours)."""
|
|
591
|
+
|
|
592
|
+
tls: Optional[OutputMskTLSSettingsClientSide] = None
|
|
593
|
+
|
|
594
|
+
on_backpressure: Annotated[
|
|
595
|
+
Annotated[
|
|
596
|
+
Optional[OutputMskBackpressureBehavior],
|
|
597
|
+
PlainValidator(validate_open_enum(False)),
|
|
598
|
+
],
|
|
599
|
+
pydantic.Field(alias="onBackpressure"),
|
|
600
|
+
] = OutputMskBackpressureBehavior.BLOCK
|
|
601
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
602
|
+
|
|
603
|
+
description: Optional[str] = None
|
|
604
|
+
|
|
605
|
+
aws_api_key: Annotated[Optional[str], pydantic.Field(alias="awsApiKey")] = None
|
|
606
|
+
|
|
607
|
+
aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
|
|
608
|
+
r"""Select or create a stored secret that references your access key and secret key"""
|
|
609
|
+
|
|
610
|
+
protobuf_library_id: Annotated[
|
|
611
|
+
Optional[str], pydantic.Field(alias="protobufLibraryId")
|
|
612
|
+
] = None
|
|
613
|
+
r"""Select a set of Protobuf definitions for the events you want to send"""
|
|
614
|
+
|
|
615
|
+
pq_max_file_size: Annotated[
|
|
616
|
+
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
617
|
+
] = "1 MB"
|
|
618
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
619
|
+
|
|
620
|
+
pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
|
|
621
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
622
|
+
|
|
623
|
+
pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
|
|
624
|
+
"$CRIBL_HOME/state/queues"
|
|
625
|
+
)
|
|
626
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
627
|
+
|
|
628
|
+
pq_compress: Annotated[
|
|
629
|
+
Annotated[
|
|
630
|
+
Optional[OutputMskPqCompressCompression],
|
|
631
|
+
PlainValidator(validate_open_enum(False)),
|
|
632
|
+
],
|
|
633
|
+
pydantic.Field(alias="pqCompress"),
|
|
634
|
+
] = OutputMskPqCompressCompression.NONE
|
|
635
|
+
r"""Codec to use to compress the persisted data"""
|
|
636
|
+
|
|
637
|
+
pq_on_backpressure: Annotated[
|
|
638
|
+
Annotated[
|
|
639
|
+
Optional[OutputMskQueueFullBehavior],
|
|
640
|
+
PlainValidator(validate_open_enum(False)),
|
|
641
|
+
],
|
|
642
|
+
pydantic.Field(alias="pqOnBackpressure"),
|
|
643
|
+
] = OutputMskQueueFullBehavior.BLOCK
|
|
644
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
645
|
+
|
|
646
|
+
pq_mode: Annotated[
|
|
647
|
+
Annotated[Optional[OutputMskMode], PlainValidator(validate_open_enum(False))],
|
|
648
|
+
pydantic.Field(alias="pqMode"),
|
|
649
|
+
] = OutputMskMode.ERROR
|
|
650
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
651
|
+
|
|
652
|
+
pq_controls: Annotated[
|
|
653
|
+
Optional[OutputMskPqControls], pydantic.Field(alias="pqControls")
|
|
654
|
+
] = None
|