cribl-control-plane 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/__init__.py +17 -0
- cribl_control_plane/_hooks/__init__.py +5 -0
- cribl_control_plane/_hooks/clientcredentials.py +211 -0
- cribl_control_plane/_hooks/registration.py +13 -0
- cribl_control_plane/_hooks/sdkhooks.py +81 -0
- cribl_control_plane/_hooks/types.py +112 -0
- cribl_control_plane/_version.py +15 -0
- cribl_control_plane/auth_sdk.py +184 -0
- cribl_control_plane/basesdk.py +358 -0
- cribl_control_plane/errors/__init__.py +60 -0
- cribl_control_plane/errors/apierror.py +38 -0
- cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
- cribl_control_plane/errors/error.py +24 -0
- cribl_control_plane/errors/healthstatus_error.py +38 -0
- cribl_control_plane/errors/no_response_error.py +13 -0
- cribl_control_plane/errors/responsevalidationerror.py +25 -0
- cribl_control_plane/health.py +166 -0
- cribl_control_plane/httpclient.py +126 -0
- cribl_control_plane/models/__init__.py +7305 -0
- cribl_control_plane/models/addhectokenrequest.py +34 -0
- cribl_control_plane/models/authtoken.py +13 -0
- cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
- cribl_control_plane/models/createinputop.py +24 -0
- cribl_control_plane/models/createoutputop.py +24 -0
- cribl_control_plane/models/createoutputtestbyidop.py +46 -0
- cribl_control_plane/models/criblevent.py +14 -0
- cribl_control_plane/models/deleteinputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getinputbyidop.py +37 -0
- cribl_control_plane/models/getoutputbyidop.py +37 -0
- cribl_control_plane/models/getoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
- cribl_control_plane/models/healthstatus.py +36 -0
- cribl_control_plane/models/input.py +199 -0
- cribl_control_plane/models/inputappscope.py +448 -0
- cribl_control_plane/models/inputazureblob.py +308 -0
- cribl_control_plane/models/inputcollection.py +208 -0
- cribl_control_plane/models/inputconfluentcloud.py +585 -0
- cribl_control_plane/models/inputcribl.py +165 -0
- cribl_control_plane/models/inputcriblhttp.py +341 -0
- cribl_control_plane/models/inputcribllakehttp.py +342 -0
- cribl_control_plane/models/inputcriblmetrics.py +175 -0
- cribl_control_plane/models/inputcribltcp.py +299 -0
- cribl_control_plane/models/inputcrowdstrike.py +410 -0
- cribl_control_plane/models/inputdatadogagent.py +364 -0
- cribl_control_plane/models/inputdatagen.py +180 -0
- cribl_control_plane/models/inputedgeprometheus.py +551 -0
- cribl_control_plane/models/inputelastic.py +494 -0
- cribl_control_plane/models/inputeventhub.py +360 -0
- cribl_control_plane/models/inputexec.py +213 -0
- cribl_control_plane/models/inputfile.py +259 -0
- cribl_control_plane/models/inputfirehose.py +341 -0
- cribl_control_plane/models/inputgooglepubsub.py +247 -0
- cribl_control_plane/models/inputgrafana_union.py +1247 -0
- cribl_control_plane/models/inputhttp.py +403 -0
- cribl_control_plane/models/inputhttpraw.py +407 -0
- cribl_control_plane/models/inputjournalfiles.py +208 -0
- cribl_control_plane/models/inputkafka.py +581 -0
- cribl_control_plane/models/inputkinesis.py +363 -0
- cribl_control_plane/models/inputkubeevents.py +182 -0
- cribl_control_plane/models/inputkubelogs.py +256 -0
- cribl_control_plane/models/inputkubemetrics.py +233 -0
- cribl_control_plane/models/inputloki.py +468 -0
- cribl_control_plane/models/inputmetrics.py +290 -0
- cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
- cribl_control_plane/models/inputmsk.py +654 -0
- cribl_control_plane/models/inputnetflow.py +224 -0
- cribl_control_plane/models/inputoffice365mgmt.py +384 -0
- cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
- cribl_control_plane/models/inputoffice365service.py +377 -0
- cribl_control_plane/models/inputopentelemetry.py +516 -0
- cribl_control_plane/models/inputprometheus.py +464 -0
- cribl_control_plane/models/inputprometheusrw.py +470 -0
- cribl_control_plane/models/inputrawudp.py +207 -0
- cribl_control_plane/models/inputs3.py +416 -0
- cribl_control_plane/models/inputs3inventory.py +440 -0
- cribl_control_plane/models/inputsecuritylake.py +425 -0
- cribl_control_plane/models/inputsnmp.py +274 -0
- cribl_control_plane/models/inputsplunk.py +387 -0
- cribl_control_plane/models/inputsplunkhec.py +478 -0
- cribl_control_plane/models/inputsplunksearch.py +537 -0
- cribl_control_plane/models/inputsqs.py +320 -0
- cribl_control_plane/models/inputsyslog_union.py +759 -0
- cribl_control_plane/models/inputsystemmetrics.py +533 -0
- cribl_control_plane/models/inputsystemstate.py +417 -0
- cribl_control_plane/models/inputtcp.py +359 -0
- cribl_control_plane/models/inputtcpjson.py +334 -0
- cribl_control_plane/models/inputwef.py +498 -0
- cribl_control_plane/models/inputwindowsmetrics.py +457 -0
- cribl_control_plane/models/inputwineventlogs.py +222 -0
- cribl_control_plane/models/inputwiz.py +334 -0
- cribl_control_plane/models/inputzscalerhec.py +439 -0
- cribl_control_plane/models/listinputop.py +24 -0
- cribl_control_plane/models/listoutputop.py +24 -0
- cribl_control_plane/models/logininfo.py +16 -0
- cribl_control_plane/models/output.py +229 -0
- cribl_control_plane/models/outputazureblob.py +471 -0
- cribl_control_plane/models/outputazuredataexplorer.py +660 -0
- cribl_control_plane/models/outputazureeventhub.py +321 -0
- cribl_control_plane/models/outputazurelogs.py +386 -0
- cribl_control_plane/models/outputclickhouse.py +650 -0
- cribl_control_plane/models/outputcloudwatch.py +273 -0
- cribl_control_plane/models/outputconfluentcloud.py +591 -0
- cribl_control_plane/models/outputcriblhttp.py +494 -0
- cribl_control_plane/models/outputcribllake.py +396 -0
- cribl_control_plane/models/outputcribltcp.py +387 -0
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
- cribl_control_plane/models/outputdatadog.py +472 -0
- cribl_control_plane/models/outputdataset.py +437 -0
- cribl_control_plane/models/outputdefault.py +55 -0
- cribl_control_plane/models/outputdevnull.py +50 -0
- cribl_control_plane/models/outputdiskspool.py +89 -0
- cribl_control_plane/models/outputdls3.py +560 -0
- cribl_control_plane/models/outputdynatracehttp.py +454 -0
- cribl_control_plane/models/outputdynatraceotlp.py +486 -0
- cribl_control_plane/models/outputelastic.py +494 -0
- cribl_control_plane/models/outputelasticcloud.py +407 -0
- cribl_control_plane/models/outputexabeam.py +297 -0
- cribl_control_plane/models/outputfilesystem.py +357 -0
- cribl_control_plane/models/outputgooglechronicle.py +486 -0
- cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
- cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
- cribl_control_plane/models/outputgooglepubsub.py +274 -0
- cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
- cribl_control_plane/models/outputgraphite.py +225 -0
- cribl_control_plane/models/outputhoneycomb.py +369 -0
- cribl_control_plane/models/outputhumiohec.py +389 -0
- cribl_control_plane/models/outputinfluxdb.py +523 -0
- cribl_control_plane/models/outputkafka.py +581 -0
- cribl_control_plane/models/outputkinesis.py +312 -0
- cribl_control_plane/models/outputloki.py +425 -0
- cribl_control_plane/models/outputminio.py +512 -0
- cribl_control_plane/models/outputmsk.py +654 -0
- cribl_control_plane/models/outputnetflow.py +80 -0
- cribl_control_plane/models/outputnewrelic.py +424 -0
- cribl_control_plane/models/outputnewrelicevents.py +401 -0
- cribl_control_plane/models/outputopentelemetry.py +669 -0
- cribl_control_plane/models/outputprometheus.py +485 -0
- cribl_control_plane/models/outputring.py +121 -0
- cribl_control_plane/models/outputrouter.py +83 -0
- cribl_control_plane/models/outputs3.py +556 -0
- cribl_control_plane/models/outputsamplesresponse.py +14 -0
- cribl_control_plane/models/outputsecuritylake.py +505 -0
- cribl_control_plane/models/outputsentinel.py +488 -0
- cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
- cribl_control_plane/models/outputservicenow.py +543 -0
- cribl_control_plane/models/outputsignalfx.py +369 -0
- cribl_control_plane/models/outputsnmp.py +80 -0
- cribl_control_plane/models/outputsns.py +274 -0
- cribl_control_plane/models/outputsplunk.py +383 -0
- cribl_control_plane/models/outputsplunkhec.py +434 -0
- cribl_control_plane/models/outputsplunklb.py +558 -0
- cribl_control_plane/models/outputsqs.py +328 -0
- cribl_control_plane/models/outputstatsd.py +224 -0
- cribl_control_plane/models/outputstatsdext.py +225 -0
- cribl_control_plane/models/outputsumologic.py +378 -0
- cribl_control_plane/models/outputsyslog.py +415 -0
- cribl_control_plane/models/outputtcpjson.py +413 -0
- cribl_control_plane/models/outputtestrequest.py +15 -0
- cribl_control_plane/models/outputtestresponse.py +29 -0
- cribl_control_plane/models/outputwavefront.py +369 -0
- cribl_control_plane/models/outputwebhook.py +689 -0
- cribl_control_plane/models/outputxsiam.py +415 -0
- cribl_control_plane/models/schemeclientoauth.py +24 -0
- cribl_control_plane/models/security.py +36 -0
- cribl_control_plane/models/updatehectokenrequest.py +31 -0
- cribl_control_plane/models/updateinputbyidop.py +44 -0
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
- cribl_control_plane/models/updateoutputbyidop.py +44 -0
- cribl_control_plane/outputs.py +1615 -0
- cribl_control_plane/py.typed +1 -0
- cribl_control_plane/sdk.py +164 -0
- cribl_control_plane/sdkconfiguration.py +36 -0
- cribl_control_plane/sources.py +1355 -0
- cribl_control_plane/types/__init__.py +21 -0
- cribl_control_plane/types/basemodel.py +39 -0
- cribl_control_plane/utils/__init__.py +187 -0
- cribl_control_plane/utils/annotations.py +55 -0
- cribl_control_plane/utils/datetimes.py +23 -0
- cribl_control_plane/utils/enums.py +74 -0
- cribl_control_plane/utils/eventstreaming.py +238 -0
- cribl_control_plane/utils/forms.py +223 -0
- cribl_control_plane/utils/headers.py +136 -0
- cribl_control_plane/utils/logger.py +27 -0
- cribl_control_plane/utils/metadata.py +118 -0
- cribl_control_plane/utils/queryparams.py +205 -0
- cribl_control_plane/utils/requestbodies.py +66 -0
- cribl_control_plane/utils/retries.py +217 -0
- cribl_control_plane/utils/security.py +207 -0
- cribl_control_plane/utils/serializers.py +249 -0
- cribl_control_plane/utils/unmarshal_json_response.py +24 -0
- cribl_control_plane/utils/url.py +155 -0
- cribl_control_plane/utils/values.py +137 -0
- cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
- cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
- cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,660 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OutputAzureDataExplorerType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
15
|
+
AZURE_DATA_EXPLORER = "azure_data_explorer"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class IngestionMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
|
+
BATCHING = "batching"
|
|
20
|
+
STREAMING = "streaming"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class MicrosoftEntraIDAuthenticationEndpoint(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
24
|
+
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
25
|
+
|
|
26
|
+
HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
|
|
27
|
+
HTTPS_LOGIN_MICROSOFTONLINE_US = "https://login.microsoftonline.us"
|
|
28
|
+
HTTPS_LOGIN_PARTNER_MICROSOFTONLINE_CN = "https://login.partner.microsoftonline.cn"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class OutputAzureDataExplorerAuthenticationMethod(
|
|
32
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
33
|
+
):
|
|
34
|
+
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
35
|
+
|
|
36
|
+
CLIENT_SECRET = "clientSecret"
|
|
37
|
+
CLIENT_TEXT_SECRET = "clientTextSecret"
|
|
38
|
+
CERTIFICATE = "certificate"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class OutputAzureDataExplorerCertificateTypedDict(TypedDict):
|
|
42
|
+
certificate_name: NotRequired[str]
|
|
43
|
+
r"""The certificate you registered as credentials for your app in the Azure portal"""
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class OutputAzureDataExplorerCertificate(BaseModel):
|
|
47
|
+
certificate_name: Annotated[
|
|
48
|
+
Optional[str], pydantic.Field(alias="certificateName")
|
|
49
|
+
] = None
|
|
50
|
+
r"""The certificate you registered as credentials for your app in the Azure portal"""
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class OutputAzureDataExplorerBackpressureBehavior(
|
|
54
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
55
|
+
):
|
|
56
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
57
|
+
|
|
58
|
+
BLOCK = "block"
|
|
59
|
+
DROP = "drop"
|
|
60
|
+
QUEUE = "queue"
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
64
|
+
r"""Format of the output data"""
|
|
65
|
+
|
|
66
|
+
JSON = "json"
|
|
67
|
+
RAW = "raw"
|
|
68
|
+
PARQUET = "parquet"
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class OutputAzureDataExplorerDiskSpaceProtection(
|
|
72
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
+
):
|
|
74
|
+
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
75
|
+
|
|
76
|
+
BLOCK = "block"
|
|
77
|
+
DROP = "drop"
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class PrefixOptional(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
81
|
+
DROP_BY = "dropBy"
|
|
82
|
+
INGEST_BY = "ingestBy"
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class ExtentTagTypedDict(TypedDict):
|
|
86
|
+
value: str
|
|
87
|
+
prefix: NotRequired[PrefixOptional]
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class ExtentTag(BaseModel):
|
|
91
|
+
value: str
|
|
92
|
+
|
|
93
|
+
prefix: Annotated[
|
|
94
|
+
Optional[PrefixOptional], PlainValidator(validate_open_enum(False))
|
|
95
|
+
] = None
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class IngestIfNotExistTypedDict(TypedDict):
|
|
99
|
+
value: str
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class IngestIfNotExist(BaseModel):
|
|
103
|
+
value: str
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class ReportLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
107
|
+
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
108
|
+
|
|
109
|
+
FAILURES_ONLY = "failuresOnly"
|
|
110
|
+
DO_NOT_REPORT = "doNotReport"
|
|
111
|
+
FAILURES_AND_SUCCESSES = "failuresAndSuccesses"
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class ReportMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
115
|
+
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
116
|
+
|
|
117
|
+
QUEUE = "queue"
|
|
118
|
+
TABLE = "table"
|
|
119
|
+
QUEUE_AND_TABLE = "queueAndTable"
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class AdditionalPropertyTypedDict(TypedDict):
|
|
123
|
+
key: str
|
|
124
|
+
value: str
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class AdditionalProperty(BaseModel):
|
|
128
|
+
key: str
|
|
129
|
+
|
|
130
|
+
value: str
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class OutputAzureDataExplorerResponseRetrySettingTypedDict(TypedDict):
|
|
134
|
+
http_status: float
|
|
135
|
+
r"""The HTTP response status code that will trigger retries"""
|
|
136
|
+
initial_backoff: NotRequired[float]
|
|
137
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
138
|
+
backoff_rate: NotRequired[float]
|
|
139
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
140
|
+
max_backoff: NotRequired[float]
|
|
141
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class OutputAzureDataExplorerResponseRetrySetting(BaseModel):
|
|
145
|
+
http_status: Annotated[float, pydantic.Field(alias="httpStatus")]
|
|
146
|
+
r"""The HTTP response status code that will trigger retries"""
|
|
147
|
+
|
|
148
|
+
initial_backoff: Annotated[
|
|
149
|
+
Optional[float], pydantic.Field(alias="initialBackoff")
|
|
150
|
+
] = 1000
|
|
151
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
152
|
+
|
|
153
|
+
backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
|
|
154
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
155
|
+
|
|
156
|
+
max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
|
|
157
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
class OutputAzureDataExplorerTimeoutRetrySettingsTypedDict(TypedDict):
|
|
161
|
+
timeout_retry: NotRequired[bool]
|
|
162
|
+
initial_backoff: NotRequired[float]
|
|
163
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
164
|
+
backoff_rate: NotRequired[float]
|
|
165
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
166
|
+
max_backoff: NotRequired[float]
|
|
167
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
|
|
171
|
+
timeout_retry: Annotated[Optional[bool], pydantic.Field(alias="timeoutRetry")] = (
|
|
172
|
+
False
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
initial_backoff: Annotated[
|
|
176
|
+
Optional[float], pydantic.Field(alias="initialBackoff")
|
|
177
|
+
] = 1000
|
|
178
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
179
|
+
|
|
180
|
+
backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
|
|
181
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
182
|
+
|
|
183
|
+
max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
|
|
184
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
class OutputAzureDataExplorerCompressCompression(
|
|
188
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
189
|
+
):
|
|
190
|
+
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
191
|
+
|
|
192
|
+
NONE = "none"
|
|
193
|
+
GZIP = "gzip"
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
class OutputAzureDataExplorerPqCompressCompression(
|
|
197
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
198
|
+
):
|
|
199
|
+
r"""Codec to use to compress the persisted data"""
|
|
200
|
+
|
|
201
|
+
NONE = "none"
|
|
202
|
+
GZIP = "gzip"
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
class OutputAzureDataExplorerQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
206
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
207
|
+
|
|
208
|
+
BLOCK = "block"
|
|
209
|
+
DROP = "drop"
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
213
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
214
|
+
|
|
215
|
+
ERROR = "error"
|
|
216
|
+
BACKPRESSURE = "backpressure"
|
|
217
|
+
ALWAYS = "always"
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
class OutputAzureDataExplorerPqControlsTypedDict(TypedDict):
|
|
221
|
+
pass
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
class OutputAzureDataExplorerPqControls(BaseModel):
|
|
225
|
+
pass
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
class OutputAzureDataExplorerTypedDict(TypedDict):
|
|
229
|
+
cluster_url: str
|
|
230
|
+
r"""The base URI for your cluster. Typically, `https://<cluster>.<region>.kusto.windows.net`."""
|
|
231
|
+
database: str
|
|
232
|
+
r"""Name of the database containing the table where data will be ingested"""
|
|
233
|
+
table: str
|
|
234
|
+
r"""Name of the table to ingest data into"""
|
|
235
|
+
tenant_id: str
|
|
236
|
+
r"""Directory ID (tenant identifier) in Azure Active Directory"""
|
|
237
|
+
client_id: str
|
|
238
|
+
r"""client_id to pass in the OAuth request parameter"""
|
|
239
|
+
scope: str
|
|
240
|
+
r"""Scope to pass in the OAuth request parameter"""
|
|
241
|
+
id: NotRequired[str]
|
|
242
|
+
r"""Unique ID for this output"""
|
|
243
|
+
type: NotRequired[OutputAzureDataExplorerType]
|
|
244
|
+
pipeline: NotRequired[str]
|
|
245
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
246
|
+
system_fields: NotRequired[List[str]]
|
|
247
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
248
|
+
environment: NotRequired[str]
|
|
249
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
250
|
+
streamtags: NotRequired[List[str]]
|
|
251
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
252
|
+
validate_database_settings: NotRequired[bool]
|
|
253
|
+
r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
|
|
254
|
+
ingest_mode: NotRequired[IngestionMode]
|
|
255
|
+
oauth_endpoint: NotRequired[MicrosoftEntraIDAuthenticationEndpoint]
|
|
256
|
+
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
257
|
+
oauth_type: NotRequired[OutputAzureDataExplorerAuthenticationMethod]
|
|
258
|
+
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
259
|
+
description: NotRequired[str]
|
|
260
|
+
client_secret: NotRequired[str]
|
|
261
|
+
r"""The client secret that you generated for your app in the Azure portal"""
|
|
262
|
+
text_secret: NotRequired[str]
|
|
263
|
+
r"""Select or create a stored text secret"""
|
|
264
|
+
certificate: NotRequired[OutputAzureDataExplorerCertificateTypedDict]
|
|
265
|
+
ingest_url: NotRequired[str]
|
|
266
|
+
r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
|
|
267
|
+
on_backpressure: NotRequired[OutputAzureDataExplorerBackpressureBehavior]
|
|
268
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
269
|
+
is_mapping_obj: NotRequired[bool]
|
|
270
|
+
r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
|
|
271
|
+
format_: NotRequired[OutputAzureDataExplorerDataFormat]
|
|
272
|
+
r"""Format of the output data"""
|
|
273
|
+
stage_path: NotRequired[str]
|
|
274
|
+
r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant and stable storage."""
|
|
275
|
+
file_name_suffix: NotRequired[str]
|
|
276
|
+
r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
|
|
277
|
+
max_file_size_mb: NotRequired[float]
|
|
278
|
+
r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
|
|
279
|
+
max_file_open_time_sec: NotRequired[float]
|
|
280
|
+
r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
|
|
281
|
+
max_file_idle_time_sec: NotRequired[float]
|
|
282
|
+
r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
|
|
283
|
+
max_open_files: NotRequired[float]
|
|
284
|
+
r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
|
|
285
|
+
max_concurrent_file_parts: NotRequired[float]
|
|
286
|
+
r"""Maximum number of parts to upload in parallel per file"""
|
|
287
|
+
on_disk_full_backpressure: NotRequired[OutputAzureDataExplorerDiskSpaceProtection]
|
|
288
|
+
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
289
|
+
add_id_to_stage_path: NotRequired[bool]
|
|
290
|
+
r"""Add the Output ID value to staging location"""
|
|
291
|
+
remove_empty_dirs: NotRequired[bool]
|
|
292
|
+
r"""Remove empty staging directories after moving files"""
|
|
293
|
+
deadletter_enabled: NotRequired[bool]
|
|
294
|
+
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
295
|
+
timeout_sec: NotRequired[float]
|
|
296
|
+
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
297
|
+
flush_immediately: NotRequired[bool]
|
|
298
|
+
r"""Bypass the data management service's aggregation mechanism"""
|
|
299
|
+
retain_blob_on_success: NotRequired[bool]
|
|
300
|
+
r"""Prevent blob deletion after ingestion is complete"""
|
|
301
|
+
extent_tags: NotRequired[List[ExtentTagTypedDict]]
|
|
302
|
+
r"""Strings or tags associated with the extent (ingested data shard)"""
|
|
303
|
+
ingest_if_not_exists: NotRequired[List[IngestIfNotExistTypedDict]]
|
|
304
|
+
r"""Prevents duplicate ingestion by verifying whether an extent with the specified ingest-by tag already exists"""
|
|
305
|
+
report_level: NotRequired[ReportLevel]
|
|
306
|
+
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
307
|
+
report_method: NotRequired[ReportMethod]
|
|
308
|
+
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
309
|
+
additional_properties: NotRequired[List[AdditionalPropertyTypedDict]]
|
|
310
|
+
r"""Optionally, enter additional configuration properties to send to the ingestion service"""
|
|
311
|
+
response_retry_settings: NotRequired[
|
|
312
|
+
List[OutputAzureDataExplorerResponseRetrySettingTypedDict]
|
|
313
|
+
]
|
|
314
|
+
r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
|
|
315
|
+
timeout_retry_settings: NotRequired[
|
|
316
|
+
OutputAzureDataExplorerTimeoutRetrySettingsTypedDict
|
|
317
|
+
]
|
|
318
|
+
response_honor_retry_after_header: NotRequired[bool]
|
|
319
|
+
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
320
|
+
compress: NotRequired[OutputAzureDataExplorerCompressCompression]
|
|
321
|
+
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
322
|
+
mapping_ref: NotRequired[str]
|
|
323
|
+
r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
|
|
324
|
+
concurrency: NotRequired[float]
|
|
325
|
+
r"""Maximum number of ongoing requests before blocking"""
|
|
326
|
+
max_payload_size_kb: NotRequired[float]
|
|
327
|
+
r"""Maximum size, in KB, of the request body"""
|
|
328
|
+
max_payload_events: NotRequired[float]
|
|
329
|
+
r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
|
|
330
|
+
flush_period_sec: NotRequired[float]
|
|
331
|
+
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
332
|
+
reject_unauthorized: NotRequired[bool]
|
|
333
|
+
r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
|
|
334
|
+
Enabled by default. When this setting is also present in TLS Settings (Client Side),
|
|
335
|
+
that value will take precedence.
|
|
336
|
+
"""
|
|
337
|
+
use_round_robin_dns: NotRequired[bool]
|
|
338
|
+
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
339
|
+
keep_alive: NotRequired[bool]
|
|
340
|
+
r"""Disable to close the connection immediately after sending the outgoing request"""
|
|
341
|
+
pq_max_file_size: NotRequired[str]
|
|
342
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
343
|
+
pq_max_size: NotRequired[str]
|
|
344
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
345
|
+
pq_path: NotRequired[str]
|
|
346
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
347
|
+
pq_compress: NotRequired[OutputAzureDataExplorerPqCompressCompression]
|
|
348
|
+
r"""Codec to use to compress the persisted data"""
|
|
349
|
+
pq_on_backpressure: NotRequired[OutputAzureDataExplorerQueueFullBehavior]
|
|
350
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
351
|
+
pq_mode: NotRequired[OutputAzureDataExplorerMode]
|
|
352
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
353
|
+
pq_controls: NotRequired[OutputAzureDataExplorerPqControlsTypedDict]
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
class OutputAzureDataExplorer(BaseModel):
|
|
357
|
+
cluster_url: Annotated[str, pydantic.Field(alias="clusterUrl")]
|
|
358
|
+
r"""The base URI for your cluster. Typically, `https://<cluster>.<region>.kusto.windows.net`."""
|
|
359
|
+
|
|
360
|
+
database: str
|
|
361
|
+
r"""Name of the database containing the table where data will be ingested"""
|
|
362
|
+
|
|
363
|
+
table: str
|
|
364
|
+
r"""Name of the table to ingest data into"""
|
|
365
|
+
|
|
366
|
+
tenant_id: Annotated[str, pydantic.Field(alias="tenantId")]
|
|
367
|
+
r"""Directory ID (tenant identifier) in Azure Active Directory"""
|
|
368
|
+
|
|
369
|
+
client_id: Annotated[str, pydantic.Field(alias="clientId")]
|
|
370
|
+
r"""client_id to pass in the OAuth request parameter"""
|
|
371
|
+
|
|
372
|
+
scope: str
|
|
373
|
+
r"""Scope to pass in the OAuth request parameter"""
|
|
374
|
+
|
|
375
|
+
id: Optional[str] = None
|
|
376
|
+
r"""Unique ID for this output"""
|
|
377
|
+
|
|
378
|
+
type: Annotated[
|
|
379
|
+
Optional[OutputAzureDataExplorerType], PlainValidator(validate_open_enum(False))
|
|
380
|
+
] = None
|
|
381
|
+
|
|
382
|
+
pipeline: Optional[str] = None
|
|
383
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
384
|
+
|
|
385
|
+
system_fields: Annotated[
|
|
386
|
+
Optional[List[str]], pydantic.Field(alias="systemFields")
|
|
387
|
+
] = None
|
|
388
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
389
|
+
|
|
390
|
+
environment: Optional[str] = None
|
|
391
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
392
|
+
|
|
393
|
+
streamtags: Optional[List[str]] = None
|
|
394
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
395
|
+
|
|
396
|
+
validate_database_settings: Annotated[
|
|
397
|
+
Optional[bool], pydantic.Field(alias="validateDatabaseSettings")
|
|
398
|
+
] = True
|
|
399
|
+
r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
|
|
400
|
+
|
|
401
|
+
ingest_mode: Annotated[
|
|
402
|
+
Annotated[Optional[IngestionMode], PlainValidator(validate_open_enum(False))],
|
|
403
|
+
pydantic.Field(alias="ingestMode"),
|
|
404
|
+
] = IngestionMode.BATCHING
|
|
405
|
+
|
|
406
|
+
oauth_endpoint: Annotated[
|
|
407
|
+
Annotated[
|
|
408
|
+
Optional[MicrosoftEntraIDAuthenticationEndpoint],
|
|
409
|
+
PlainValidator(validate_open_enum(False)),
|
|
410
|
+
],
|
|
411
|
+
pydantic.Field(alias="oauthEndpoint"),
|
|
412
|
+
] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
|
|
413
|
+
r"""Endpoint used to acquire authentication tokens from Azure"""
|
|
414
|
+
|
|
415
|
+
oauth_type: Annotated[
|
|
416
|
+
Annotated[
|
|
417
|
+
Optional[OutputAzureDataExplorerAuthenticationMethod],
|
|
418
|
+
PlainValidator(validate_open_enum(False)),
|
|
419
|
+
],
|
|
420
|
+
pydantic.Field(alias="oauthType"),
|
|
421
|
+
] = OutputAzureDataExplorerAuthenticationMethod.CLIENT_SECRET
|
|
422
|
+
r"""The type of OAuth 2.0 client credentials grant flow to use"""
|
|
423
|
+
|
|
424
|
+
description: Optional[str] = None
|
|
425
|
+
|
|
426
|
+
client_secret: Annotated[Optional[str], pydantic.Field(alias="clientSecret")] = None
|
|
427
|
+
r"""The client secret that you generated for your app in the Azure portal"""
|
|
428
|
+
|
|
429
|
+
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
430
|
+
r"""Select or create a stored text secret"""
|
|
431
|
+
|
|
432
|
+
certificate: Optional[OutputAzureDataExplorerCertificate] = None
|
|
433
|
+
|
|
434
|
+
ingest_url: Annotated[Optional[str], pydantic.Field(alias="ingestUrl")] = None
|
|
435
|
+
r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
|
|
436
|
+
|
|
437
|
+
on_backpressure: Annotated[
|
|
438
|
+
Annotated[
|
|
439
|
+
Optional[OutputAzureDataExplorerBackpressureBehavior],
|
|
440
|
+
PlainValidator(validate_open_enum(False)),
|
|
441
|
+
],
|
|
442
|
+
pydantic.Field(alias="onBackpressure"),
|
|
443
|
+
] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
|
|
444
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
445
|
+
|
|
446
|
+
is_mapping_obj: Annotated[Optional[bool], pydantic.Field(alias="isMappingObj")] = (
|
|
447
|
+
False
|
|
448
|
+
)
|
|
449
|
+
r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
|
|
450
|
+
|
|
451
|
+
format_: Annotated[
|
|
452
|
+
Annotated[
|
|
453
|
+
Optional[OutputAzureDataExplorerDataFormat],
|
|
454
|
+
PlainValidator(validate_open_enum(False)),
|
|
455
|
+
],
|
|
456
|
+
pydantic.Field(alias="format"),
|
|
457
|
+
] = OutputAzureDataExplorerDataFormat.JSON
|
|
458
|
+
r"""Format of the output data"""
|
|
459
|
+
|
|
460
|
+
stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
|
|
461
|
+
"$CRIBL_HOME/state/outputs/staging"
|
|
462
|
+
)
|
|
463
|
+
r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant and stable storage."""
|
|
464
|
+
|
|
465
|
+
file_name_suffix: Annotated[
|
|
466
|
+
Optional[str], pydantic.Field(alias="fileNameSuffix")
|
|
467
|
+
] = '`.${C.env["CRIBL_WORKER_ID"]}.${__format}${__compression === "gzip" ? ".gz" : ""}`'
|
|
468
|
+
r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
|
|
469
|
+
|
|
470
|
+
max_file_size_mb: Annotated[
|
|
471
|
+
Optional[float], pydantic.Field(alias="maxFileSizeMB")
|
|
472
|
+
] = 32
|
|
473
|
+
r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
|
|
474
|
+
|
|
475
|
+
max_file_open_time_sec: Annotated[
|
|
476
|
+
Optional[float], pydantic.Field(alias="maxFileOpenTimeSec")
|
|
477
|
+
] = 300
|
|
478
|
+
r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
|
|
479
|
+
|
|
480
|
+
max_file_idle_time_sec: Annotated[
|
|
481
|
+
Optional[float], pydantic.Field(alias="maxFileIdleTimeSec")
|
|
482
|
+
] = 30
|
|
483
|
+
r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
|
|
484
|
+
|
|
485
|
+
max_open_files: Annotated[Optional[float], pydantic.Field(alias="maxOpenFiles")] = (
|
|
486
|
+
100
|
|
487
|
+
)
|
|
488
|
+
r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
|
|
489
|
+
|
|
490
|
+
max_concurrent_file_parts: Annotated[
|
|
491
|
+
Optional[float], pydantic.Field(alias="maxConcurrentFileParts")
|
|
492
|
+
] = 1
|
|
493
|
+
r"""Maximum number of parts to upload in parallel per file"""
|
|
494
|
+
|
|
495
|
+
on_disk_full_backpressure: Annotated[
|
|
496
|
+
Annotated[
|
|
497
|
+
Optional[OutputAzureDataExplorerDiskSpaceProtection],
|
|
498
|
+
PlainValidator(validate_open_enum(False)),
|
|
499
|
+
],
|
|
500
|
+
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
501
|
+
] = OutputAzureDataExplorerDiskSpaceProtection.BLOCK
|
|
502
|
+
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
503
|
+
|
|
504
|
+
add_id_to_stage_path: Annotated[
|
|
505
|
+
Optional[bool], pydantic.Field(alias="addIdToStagePath")
|
|
506
|
+
] = True
|
|
507
|
+
r"""Add the Output ID value to staging location"""
|
|
508
|
+
|
|
509
|
+
remove_empty_dirs: Annotated[
|
|
510
|
+
Optional[bool], pydantic.Field(alias="removeEmptyDirs")
|
|
511
|
+
] = True
|
|
512
|
+
r"""Remove empty staging directories after moving files"""
|
|
513
|
+
|
|
514
|
+
deadletter_enabled: Annotated[
|
|
515
|
+
Optional[bool], pydantic.Field(alias="deadletterEnabled")
|
|
516
|
+
] = False
|
|
517
|
+
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
518
|
+
|
|
519
|
+
timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 30
|
|
520
|
+
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
521
|
+
|
|
522
|
+
flush_immediately: Annotated[
|
|
523
|
+
Optional[bool], pydantic.Field(alias="flushImmediately")
|
|
524
|
+
] = False
|
|
525
|
+
r"""Bypass the data management service's aggregation mechanism"""
|
|
526
|
+
|
|
527
|
+
retain_blob_on_success: Annotated[
|
|
528
|
+
Optional[bool], pydantic.Field(alias="retainBlobOnSuccess")
|
|
529
|
+
] = False
|
|
530
|
+
r"""Prevent blob deletion after ingestion is complete"""
|
|
531
|
+
|
|
532
|
+
extent_tags: Annotated[
|
|
533
|
+
Optional[List[ExtentTag]], pydantic.Field(alias="extentTags")
|
|
534
|
+
] = None
|
|
535
|
+
r"""Strings or tags associated with the extent (ingested data shard)"""
|
|
536
|
+
|
|
537
|
+
ingest_if_not_exists: Annotated[
|
|
538
|
+
Optional[List[IngestIfNotExist]], pydantic.Field(alias="ingestIfNotExists")
|
|
539
|
+
] = None
|
|
540
|
+
r"""Prevents duplicate ingestion by verifying whether an extent with the specified ingest-by tag already exists"""
|
|
541
|
+
|
|
542
|
+
report_level: Annotated[
|
|
543
|
+
Annotated[Optional[ReportLevel], PlainValidator(validate_open_enum(False))],
|
|
544
|
+
pydantic.Field(alias="reportLevel"),
|
|
545
|
+
] = ReportLevel.FAILURES_ONLY
|
|
546
|
+
r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
|
|
547
|
+
|
|
548
|
+
report_method: Annotated[
|
|
549
|
+
Annotated[Optional[ReportMethod], PlainValidator(validate_open_enum(False))],
|
|
550
|
+
pydantic.Field(alias="reportMethod"),
|
|
551
|
+
] = ReportMethod.QUEUE
|
|
552
|
+
r"""Target of the ingestion status reporting. Defaults to Queue."""
|
|
553
|
+
|
|
554
|
+
additional_properties: Annotated[
|
|
555
|
+
Optional[List[AdditionalProperty]], pydantic.Field(alias="additionalProperties")
|
|
556
|
+
] = None
|
|
557
|
+
r"""Optionally, enter additional configuration properties to send to the ingestion service"""
|
|
558
|
+
|
|
559
|
+
response_retry_settings: Annotated[
|
|
560
|
+
Optional[List[OutputAzureDataExplorerResponseRetrySetting]],
|
|
561
|
+
pydantic.Field(alias="responseRetrySettings"),
|
|
562
|
+
] = None
|
|
563
|
+
r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
|
|
564
|
+
|
|
565
|
+
timeout_retry_settings: Annotated[
|
|
566
|
+
Optional[OutputAzureDataExplorerTimeoutRetrySettings],
|
|
567
|
+
pydantic.Field(alias="timeoutRetrySettings"),
|
|
568
|
+
] = None
|
|
569
|
+
|
|
570
|
+
response_honor_retry_after_header: Annotated[
|
|
571
|
+
Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
|
|
572
|
+
] = False
|
|
573
|
+
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
574
|
+
|
|
575
|
+
compress: Annotated[
|
|
576
|
+
Optional[OutputAzureDataExplorerCompressCompression],
|
|
577
|
+
PlainValidator(validate_open_enum(False)),
|
|
578
|
+
] = OutputAzureDataExplorerCompressCompression.GZIP
|
|
579
|
+
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
580
|
+
|
|
581
|
+
mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
|
|
582
|
+
r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
|
|
583
|
+
|
|
584
|
+
concurrency: Optional[float] = 5
|
|
585
|
+
r"""Maximum number of ongoing requests before blocking"""
|
|
586
|
+
|
|
587
|
+
max_payload_size_kb: Annotated[
|
|
588
|
+
Optional[float], pydantic.Field(alias="maxPayloadSizeKB")
|
|
589
|
+
] = 4096
|
|
590
|
+
r"""Maximum size, in KB, of the request body"""
|
|
591
|
+
|
|
592
|
+
max_payload_events: Annotated[
|
|
593
|
+
Optional[float], pydantic.Field(alias="maxPayloadEvents")
|
|
594
|
+
] = 0
|
|
595
|
+
r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
|
|
596
|
+
|
|
597
|
+
flush_period_sec: Annotated[
|
|
598
|
+
Optional[float], pydantic.Field(alias="flushPeriodSec")
|
|
599
|
+
] = 1
|
|
600
|
+
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
601
|
+
|
|
602
|
+
reject_unauthorized: Annotated[
|
|
603
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
604
|
+
] = True
|
|
605
|
+
r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
|
|
606
|
+
Enabled by default. When this setting is also present in TLS Settings (Client Side),
|
|
607
|
+
that value will take precedence.
|
|
608
|
+
"""
|
|
609
|
+
|
|
610
|
+
use_round_robin_dns: Annotated[
|
|
611
|
+
Optional[bool], pydantic.Field(alias="useRoundRobinDns")
|
|
612
|
+
] = False
|
|
613
|
+
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
614
|
+
|
|
615
|
+
keep_alive: Annotated[Optional[bool], pydantic.Field(alias="keepAlive")] = True
|
|
616
|
+
r"""Disable to close the connection immediately after sending the outgoing request"""
|
|
617
|
+
|
|
618
|
+
pq_max_file_size: Annotated[
|
|
619
|
+
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
620
|
+
] = "1 MB"
|
|
621
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
622
|
+
|
|
623
|
+
pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
|
|
624
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
625
|
+
|
|
626
|
+
pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
|
|
627
|
+
"$CRIBL_HOME/state/queues"
|
|
628
|
+
)
|
|
629
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
630
|
+
|
|
631
|
+
pq_compress: Annotated[
|
|
632
|
+
Annotated[
|
|
633
|
+
Optional[OutputAzureDataExplorerPqCompressCompression],
|
|
634
|
+
PlainValidator(validate_open_enum(False)),
|
|
635
|
+
],
|
|
636
|
+
pydantic.Field(alias="pqCompress"),
|
|
637
|
+
] = OutputAzureDataExplorerPqCompressCompression.NONE
|
|
638
|
+
r"""Codec to use to compress the persisted data"""
|
|
639
|
+
|
|
640
|
+
pq_on_backpressure: Annotated[
|
|
641
|
+
Annotated[
|
|
642
|
+
Optional[OutputAzureDataExplorerQueueFullBehavior],
|
|
643
|
+
PlainValidator(validate_open_enum(False)),
|
|
644
|
+
],
|
|
645
|
+
pydantic.Field(alias="pqOnBackpressure"),
|
|
646
|
+
] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
|
|
647
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
648
|
+
|
|
649
|
+
pq_mode: Annotated[
|
|
650
|
+
Annotated[
|
|
651
|
+
Optional[OutputAzureDataExplorerMode],
|
|
652
|
+
PlainValidator(validate_open_enum(False)),
|
|
653
|
+
],
|
|
654
|
+
pydantic.Field(alias="pqMode"),
|
|
655
|
+
] = OutputAzureDataExplorerMode.ERROR
|
|
656
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
657
|
+
|
|
658
|
+
pq_controls: Annotated[
|
|
659
|
+
Optional[OutputAzureDataExplorerPqControls], pydantic.Field(alias="pqControls")
|
|
660
|
+
] = None
|