cribl-control-plane 0.0.50__py3-none-any.whl → 0.0.50rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -5
- cribl_control_plane/errors/healthstatus_error.py +8 -2
- cribl_control_plane/groups_sdk.py +4 -4
- cribl_control_plane/health.py +6 -2
- cribl_control_plane/models/__init__.py +56 -31
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +24 -4
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/getversionshowop.py +6 -5
- cribl_control_plane/models/gitinfo.py +14 -3
- cribl_control_plane/models/gitshowresult.py +19 -0
- cribl_control_plane/models/hbcriblinfo.py +24 -3
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/heartbeatmetadata.py +3 -0
- cribl_control_plane/models/input.py +65 -63
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +41 -32
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +41 -28
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
- cribl_control_plane/models/inputmsk.py +48 -30
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeprovidedinfo.py +3 -0
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/output.py +84 -79
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +73 -28
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +35 -12
- cribl_control_plane/models/outputclickhouse.py +55 -20
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +71 -44
- cribl_control_plane/models/outputcriblhttp.py +44 -16
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
- cribl_control_plane/models/outputdatabricks.py +282 -0
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +53 -20
- cribl_control_plane/models/outputdynatraceotlp.py +55 -22
- cribl_control_plane/models/outputelastic.py +43 -18
- cribl_control_plane/models/outputelasticcloud.py +36 -12
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +50 -18
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +97 -32
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +35 -12
- cribl_control_plane/models/outputhumiohec.py +43 -16
- cribl_control_plane/models/outputinfluxdb.py +42 -16
- cribl_control_plane/models/outputkafka.py +69 -40
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +77 -42
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +41 -14
- cribl_control_plane/models/outputopentelemetry.py +67 -26
- cribl_control_plane/models/outputprometheus.py +35 -12
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
- cribl_control_plane/models/outputservicenow.py +60 -24
- cribl_control_plane/models/outputsignalfx.py +37 -14
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +35 -12
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +35 -12
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +35 -14
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/routeconf.py +3 -4
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/METADATA +1 -1
- cribl_control_plane-0.0.50rc2.dist-info/RECORD +327 -0
- cribl_control_plane/models/appmode.py +0 -13
- cribl_control_plane/models/routecloneconf.py +0 -13
- cribl_control_plane-0.0.50.dist-info/RECORD +0 -325
- {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OutputDatabricksType(str, Enum):
|
|
15
|
+
DATABRICKS = "databricks"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OutputDatabricksDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
|
+
r"""Format of the output data"""
|
|
20
|
+
|
|
21
|
+
JSON = "json"
|
|
22
|
+
RAW = "raw"
|
|
23
|
+
PARQUET = "parquet"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class OutputDatabricksBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
28
|
+
|
|
29
|
+
BLOCK = "block"
|
|
30
|
+
DROP = "drop"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class OutputDatabricksDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
|
+
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
35
|
+
|
|
36
|
+
BLOCK = "block"
|
|
37
|
+
DROP = "drop"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class OutputDatabricksAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
41
|
+
r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
|
|
42
|
+
|
|
43
|
+
MANUAL = "manual"
|
|
44
|
+
SECRET = "secret"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class OutputDatabricksTypedDict(TypedDict):
|
|
48
|
+
type: OutputDatabricksType
|
|
49
|
+
login_url: str
|
|
50
|
+
r"""URL for Unity Catalog OAuth token endpoint (example: 'https://your-workspace.cloud.databricks.com/oauth/token')"""
|
|
51
|
+
client_id: str
|
|
52
|
+
r"""JavaScript expression to compute the OAuth client ID for Unity Catalog authentication. Can be a constant."""
|
|
53
|
+
id: NotRequired[str]
|
|
54
|
+
r"""Unique ID for this output"""
|
|
55
|
+
pipeline: NotRequired[str]
|
|
56
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
57
|
+
system_fields: NotRequired[List[str]]
|
|
58
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
59
|
+
environment: NotRequired[str]
|
|
60
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
61
|
+
streamtags: NotRequired[List[str]]
|
|
62
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
63
|
+
dest_path: NotRequired[str]
|
|
64
|
+
r"""Optional path to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myEventsVolumePath-${C.vars.myVar}`"""
|
|
65
|
+
stage_path: NotRequired[str]
|
|
66
|
+
r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant, stable storage."""
|
|
67
|
+
add_id_to_stage_path: NotRequired[bool]
|
|
68
|
+
r"""Add the Output ID value to staging location"""
|
|
69
|
+
remove_empty_dirs: NotRequired[bool]
|
|
70
|
+
r"""Remove empty staging directories after moving files"""
|
|
71
|
+
partition_expr: NotRequired[str]
|
|
72
|
+
r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
|
|
73
|
+
format_: NotRequired[OutputDatabricksDataFormat]
|
|
74
|
+
r"""Format of the output data"""
|
|
75
|
+
base_file_name: NotRequired[str]
|
|
76
|
+
r"""JavaScript expression to define the output filename prefix (can be constant)"""
|
|
77
|
+
file_name_suffix: NotRequired[str]
|
|
78
|
+
r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
|
|
79
|
+
max_file_size_mb: NotRequired[float]
|
|
80
|
+
r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
|
|
81
|
+
max_file_open_time_sec: NotRequired[float]
|
|
82
|
+
r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
|
|
83
|
+
max_file_idle_time_sec: NotRequired[float]
|
|
84
|
+
r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
|
|
85
|
+
max_open_files: NotRequired[float]
|
|
86
|
+
r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
|
|
87
|
+
header_line: NotRequired[str]
|
|
88
|
+
r"""If set, this line will be written to the beginning of each output file"""
|
|
89
|
+
write_high_water_mark: NotRequired[float]
|
|
90
|
+
r"""Buffer size used to write to a file"""
|
|
91
|
+
on_backpressure: NotRequired[OutputDatabricksBackpressureBehavior]
|
|
92
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
93
|
+
deadletter_enabled: NotRequired[bool]
|
|
94
|
+
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
95
|
+
on_disk_full_backpressure: NotRequired[OutputDatabricksDiskSpaceProtection]
|
|
96
|
+
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
97
|
+
unity_auth_method: NotRequired[OutputDatabricksAuthenticationMethod]
|
|
98
|
+
r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
|
|
99
|
+
scope: NotRequired[str]
|
|
100
|
+
r"""OAuth scope for Unity Catalog authentication"""
|
|
101
|
+
token_timeout_secs: NotRequired[float]
|
|
102
|
+
r"""How often the OAuth token should be refreshed"""
|
|
103
|
+
default_catalog: NotRequired[str]
|
|
104
|
+
r"""Name of the catalog to use for the output"""
|
|
105
|
+
default_schema: NotRequired[str]
|
|
106
|
+
r"""Name of the catalog schema to use for the output"""
|
|
107
|
+
events_volume_name: NotRequired[str]
|
|
108
|
+
r"""Name of the events volume in Databricks"""
|
|
109
|
+
over_write_files: NotRequired[bool]
|
|
110
|
+
r"""Uploaded files should be overwritten if they already exist. If disabled, upload will fail if a file already exists."""
|
|
111
|
+
description: NotRequired[str]
|
|
112
|
+
client_secret: NotRequired[str]
|
|
113
|
+
r"""JavaScript expression to compute the OAuth client secret for Unity Catalog authentication. Can be a constant."""
|
|
114
|
+
client_text_secret: NotRequired[str]
|
|
115
|
+
r"""Select or create a stored text secret"""
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class OutputDatabricks(BaseModel):
|
|
119
|
+
type: OutputDatabricksType
|
|
120
|
+
|
|
121
|
+
login_url: Annotated[str, pydantic.Field(alias="loginUrl")]
|
|
122
|
+
r"""URL for Unity Catalog OAuth token endpoint (example: 'https://your-workspace.cloud.databricks.com/oauth/token')"""
|
|
123
|
+
|
|
124
|
+
client_id: Annotated[str, pydantic.Field(alias="clientId")]
|
|
125
|
+
r"""JavaScript expression to compute the OAuth client ID for Unity Catalog authentication. Can be a constant."""
|
|
126
|
+
|
|
127
|
+
id: Optional[str] = None
|
|
128
|
+
r"""Unique ID for this output"""
|
|
129
|
+
|
|
130
|
+
pipeline: Optional[str] = None
|
|
131
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
132
|
+
|
|
133
|
+
system_fields: Annotated[
|
|
134
|
+
Optional[List[str]], pydantic.Field(alias="systemFields")
|
|
135
|
+
] = None
|
|
136
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
137
|
+
|
|
138
|
+
environment: Optional[str] = None
|
|
139
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
140
|
+
|
|
141
|
+
streamtags: Optional[List[str]] = None
|
|
142
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
143
|
+
|
|
144
|
+
dest_path: Annotated[Optional[str], pydantic.Field(alias="destPath")] = ""
|
|
145
|
+
r"""Optional path to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myEventsVolumePath-${C.vars.myVar}`"""
|
|
146
|
+
|
|
147
|
+
stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
|
|
148
|
+
"$CRIBL_HOME/state/outputs/staging"
|
|
149
|
+
)
|
|
150
|
+
r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant, stable storage."""
|
|
151
|
+
|
|
152
|
+
add_id_to_stage_path: Annotated[
|
|
153
|
+
Optional[bool], pydantic.Field(alias="addIdToStagePath")
|
|
154
|
+
] = True
|
|
155
|
+
r"""Add the Output ID value to staging location"""
|
|
156
|
+
|
|
157
|
+
remove_empty_dirs: Annotated[
|
|
158
|
+
Optional[bool], pydantic.Field(alias="removeEmptyDirs")
|
|
159
|
+
] = True
|
|
160
|
+
r"""Remove empty staging directories after moving files"""
|
|
161
|
+
|
|
162
|
+
partition_expr: Annotated[Optional[str], pydantic.Field(alias="partitionExpr")] = (
|
|
163
|
+
"C.Time.strftime(_time ? _time : Date.now()/1000, '%Y/%m/%d')"
|
|
164
|
+
)
|
|
165
|
+
r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
|
|
166
|
+
|
|
167
|
+
format_: Annotated[
|
|
168
|
+
Annotated[
|
|
169
|
+
Optional[OutputDatabricksDataFormat],
|
|
170
|
+
PlainValidator(validate_open_enum(False)),
|
|
171
|
+
],
|
|
172
|
+
pydantic.Field(alias="format"),
|
|
173
|
+
] = OutputDatabricksDataFormat.JSON
|
|
174
|
+
r"""Format of the output data"""
|
|
175
|
+
|
|
176
|
+
base_file_name: Annotated[Optional[str], pydantic.Field(alias="baseFileName")] = (
|
|
177
|
+
"`CriblOut`"
|
|
178
|
+
)
|
|
179
|
+
r"""JavaScript expression to define the output filename prefix (can be constant)"""
|
|
180
|
+
|
|
181
|
+
file_name_suffix: Annotated[
|
|
182
|
+
Optional[str], pydantic.Field(alias="fileNameSuffix")
|
|
183
|
+
] = '`.${C.env["CRIBL_WORKER_ID"]}.${__format}${__compression === "gzip" ? ".gz" : ""}`'
|
|
184
|
+
r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
|
|
185
|
+
|
|
186
|
+
max_file_size_mb: Annotated[
|
|
187
|
+
Optional[float], pydantic.Field(alias="maxFileSizeMB")
|
|
188
|
+
] = 32
|
|
189
|
+
r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
|
|
190
|
+
|
|
191
|
+
max_file_open_time_sec: Annotated[
|
|
192
|
+
Optional[float], pydantic.Field(alias="maxFileOpenTimeSec")
|
|
193
|
+
] = 300
|
|
194
|
+
r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
|
|
195
|
+
|
|
196
|
+
max_file_idle_time_sec: Annotated[
|
|
197
|
+
Optional[float], pydantic.Field(alias="maxFileIdleTimeSec")
|
|
198
|
+
] = 30
|
|
199
|
+
r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
|
|
200
|
+
|
|
201
|
+
max_open_files: Annotated[Optional[float], pydantic.Field(alias="maxOpenFiles")] = (
|
|
202
|
+
100
|
|
203
|
+
)
|
|
204
|
+
r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
|
|
205
|
+
|
|
206
|
+
header_line: Annotated[Optional[str], pydantic.Field(alias="headerLine")] = ""
|
|
207
|
+
r"""If set, this line will be written to the beginning of each output file"""
|
|
208
|
+
|
|
209
|
+
write_high_water_mark: Annotated[
|
|
210
|
+
Optional[float], pydantic.Field(alias="writeHighWaterMark")
|
|
211
|
+
] = 64
|
|
212
|
+
r"""Buffer size used to write to a file"""
|
|
213
|
+
|
|
214
|
+
on_backpressure: Annotated[
|
|
215
|
+
Annotated[
|
|
216
|
+
Optional[OutputDatabricksBackpressureBehavior],
|
|
217
|
+
PlainValidator(validate_open_enum(False)),
|
|
218
|
+
],
|
|
219
|
+
pydantic.Field(alias="onBackpressure"),
|
|
220
|
+
] = OutputDatabricksBackpressureBehavior.BLOCK
|
|
221
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
222
|
+
|
|
223
|
+
deadletter_enabled: Annotated[
|
|
224
|
+
Optional[bool], pydantic.Field(alias="deadletterEnabled")
|
|
225
|
+
] = False
|
|
226
|
+
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
227
|
+
|
|
228
|
+
on_disk_full_backpressure: Annotated[
|
|
229
|
+
Annotated[
|
|
230
|
+
Optional[OutputDatabricksDiskSpaceProtection],
|
|
231
|
+
PlainValidator(validate_open_enum(False)),
|
|
232
|
+
],
|
|
233
|
+
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
234
|
+
] = OutputDatabricksDiskSpaceProtection.BLOCK
|
|
235
|
+
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
236
|
+
|
|
237
|
+
unity_auth_method: Annotated[
|
|
238
|
+
Annotated[
|
|
239
|
+
Optional[OutputDatabricksAuthenticationMethod],
|
|
240
|
+
PlainValidator(validate_open_enum(False)),
|
|
241
|
+
],
|
|
242
|
+
pydantic.Field(alias="unityAuthMethod"),
|
|
243
|
+
] = OutputDatabricksAuthenticationMethod.MANUAL
|
|
244
|
+
r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
|
|
245
|
+
|
|
246
|
+
scope: Optional[str] = "all-apis"
|
|
247
|
+
r"""OAuth scope for Unity Catalog authentication"""
|
|
248
|
+
|
|
249
|
+
token_timeout_secs: Annotated[
|
|
250
|
+
Optional[float], pydantic.Field(alias="tokenTimeoutSecs")
|
|
251
|
+
] = 3600
|
|
252
|
+
r"""How often the OAuth token should be refreshed"""
|
|
253
|
+
|
|
254
|
+
default_catalog: Annotated[
|
|
255
|
+
Optional[str], pydantic.Field(alias="defaultCatalog")
|
|
256
|
+
] = "main"
|
|
257
|
+
r"""Name of the catalog to use for the output"""
|
|
258
|
+
|
|
259
|
+
default_schema: Annotated[Optional[str], pydantic.Field(alias="defaultSchema")] = (
|
|
260
|
+
"external"
|
|
261
|
+
)
|
|
262
|
+
r"""Name of the catalog schema to use for the output"""
|
|
263
|
+
|
|
264
|
+
events_volume_name: Annotated[
|
|
265
|
+
Optional[str], pydantic.Field(alias="eventsVolumeName")
|
|
266
|
+
] = "events"
|
|
267
|
+
r"""Name of the events volume in Databricks"""
|
|
268
|
+
|
|
269
|
+
over_write_files: Annotated[
|
|
270
|
+
Optional[bool], pydantic.Field(alias="overWriteFiles")
|
|
271
|
+
] = False
|
|
272
|
+
r"""Uploaded files should be overwritten if they already exist. If disabled, upload will fail if a file already exists."""
|
|
273
|
+
|
|
274
|
+
description: Optional[str] = None
|
|
275
|
+
|
|
276
|
+
client_secret: Annotated[Optional[str], pydantic.Field(alias="clientSecret")] = None
|
|
277
|
+
r"""JavaScript expression to compute the OAuth client secret for Unity Catalog authentication. Can be a constant."""
|
|
278
|
+
|
|
279
|
+
client_text_secret: Annotated[
|
|
280
|
+
Optional[str], pydantic.Field(alias="clientTextSecret")
|
|
281
|
+
] = None
|
|
282
|
+
r"""Select or create a stored text secret"""
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,14 +15,14 @@ class OutputDatadogType(str, Enum):
|
|
|
12
15
|
DATADOG = "datadog"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class SendLogsAs(str, Enum):
|
|
18
|
+
class SendLogsAs(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""The content type to use when sending logs"""
|
|
17
20
|
|
|
18
21
|
TEXT = "text"
|
|
19
22
|
JSON = "json"
|
|
20
23
|
|
|
21
24
|
|
|
22
|
-
class OutputDatadogSeverity(str, Enum):
|
|
25
|
+
class OutputDatadogSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
23
26
|
r"""Default value for message severity. When you send logs as JSON objects, the event's '__severity' field (if set) will override this value."""
|
|
24
27
|
|
|
25
28
|
EMERGENCY = "emergency"
|
|
@@ -32,7 +35,7 @@ class OutputDatadogSeverity(str, Enum):
|
|
|
32
35
|
DEBUG = "debug"
|
|
33
36
|
|
|
34
37
|
|
|
35
|
-
class DatadogSite(str, Enum):
|
|
38
|
+
class DatadogSite(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
36
39
|
r"""Datadog site to which events should be sent"""
|
|
37
40
|
|
|
38
41
|
US = "us"
|
|
@@ -55,7 +58,7 @@ class OutputDatadogExtraHTTPHeader(BaseModel):
|
|
|
55
58
|
name: Optional[str] = None
|
|
56
59
|
|
|
57
60
|
|
|
58
|
-
class OutputDatadogFailedRequestLoggingMode(str, Enum):
|
|
61
|
+
class OutputDatadogFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
59
62
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
60
63
|
|
|
61
64
|
PAYLOAD = "payload"
|
|
@@ -117,7 +120,7 @@ class OutputDatadogTimeoutRetrySettings(BaseModel):
|
|
|
117
120
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
118
121
|
|
|
119
122
|
|
|
120
|
-
class OutputDatadogBackpressureBehavior(str, Enum):
|
|
123
|
+
class OutputDatadogBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
121
124
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
122
125
|
|
|
123
126
|
BLOCK = "block"
|
|
@@ -125,28 +128,28 @@ class OutputDatadogBackpressureBehavior(str, Enum):
|
|
|
125
128
|
QUEUE = "queue"
|
|
126
129
|
|
|
127
130
|
|
|
128
|
-
class OutputDatadogAuthenticationMethod(str, Enum):
|
|
131
|
+
class OutputDatadogAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
129
132
|
r"""Enter API key directly, or select a stored secret"""
|
|
130
133
|
|
|
131
134
|
MANUAL = "manual"
|
|
132
135
|
SECRET = "secret"
|
|
133
136
|
|
|
134
137
|
|
|
135
|
-
class OutputDatadogCompression(str, Enum):
|
|
138
|
+
class OutputDatadogCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
136
139
|
r"""Codec to use to compress the persisted data"""
|
|
137
140
|
|
|
138
141
|
NONE = "none"
|
|
139
142
|
GZIP = "gzip"
|
|
140
143
|
|
|
141
144
|
|
|
142
|
-
class OutputDatadogQueueFullBehavior(str, Enum):
|
|
145
|
+
class OutputDatadogQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
143
146
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
144
147
|
|
|
145
148
|
BLOCK = "block"
|
|
146
149
|
DROP = "drop"
|
|
147
150
|
|
|
148
151
|
|
|
149
|
-
class OutputDatadogMode(str, Enum):
|
|
152
|
+
class OutputDatadogMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
150
153
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
151
154
|
|
|
152
155
|
ERROR = "error"
|
|
@@ -276,7 +279,8 @@ class OutputDatadog(BaseModel):
|
|
|
276
279
|
r"""Tags for filtering and grouping in @{product}"""
|
|
277
280
|
|
|
278
281
|
content_type: Annotated[
|
|
279
|
-
Optional[SendLogsAs],
|
|
282
|
+
Annotated[Optional[SendLogsAs], PlainValidator(validate_open_enum(False))],
|
|
283
|
+
pydantic.Field(alias="contentType"),
|
|
280
284
|
] = SendLogsAs.JSON
|
|
281
285
|
r"""The content type to use when sending logs"""
|
|
282
286
|
|
|
@@ -303,10 +307,14 @@ class OutputDatadog(BaseModel):
|
|
|
303
307
|
] = False
|
|
304
308
|
r"""Allow API key to be set from the event's '__agent_api_key' field"""
|
|
305
309
|
|
|
306
|
-
severity:
|
|
310
|
+
severity: Annotated[
|
|
311
|
+
Optional[OutputDatadogSeverity], PlainValidator(validate_open_enum(False))
|
|
312
|
+
] = None
|
|
307
313
|
r"""Default value for message severity. When you send logs as JSON objects, the event's '__severity' field (if set) will override this value."""
|
|
308
314
|
|
|
309
|
-
site:
|
|
315
|
+
site: Annotated[
|
|
316
|
+
Optional[DatadogSite], PlainValidator(validate_open_enum(False))
|
|
317
|
+
] = DatadogSite.US
|
|
310
318
|
r"""Datadog site to which events should be sent"""
|
|
311
319
|
|
|
312
320
|
send_counters_as_count: Annotated[
|
|
@@ -358,7 +366,10 @@ class OutputDatadog(BaseModel):
|
|
|
358
366
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
359
367
|
|
|
360
368
|
failed_request_logging_mode: Annotated[
|
|
361
|
-
|
|
369
|
+
Annotated[
|
|
370
|
+
Optional[OutputDatadogFailedRequestLoggingMode],
|
|
371
|
+
PlainValidator(validate_open_enum(False)),
|
|
372
|
+
],
|
|
362
373
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
363
374
|
] = OutputDatadogFailedRequestLoggingMode.NONE
|
|
364
375
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -385,13 +396,20 @@ class OutputDatadog(BaseModel):
|
|
|
385
396
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
386
397
|
|
|
387
398
|
on_backpressure: Annotated[
|
|
388
|
-
|
|
399
|
+
Annotated[
|
|
400
|
+
Optional[OutputDatadogBackpressureBehavior],
|
|
401
|
+
PlainValidator(validate_open_enum(False)),
|
|
402
|
+
],
|
|
389
403
|
pydantic.Field(alias="onBackpressure"),
|
|
390
404
|
] = OutputDatadogBackpressureBehavior.BLOCK
|
|
391
405
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
392
406
|
|
|
393
407
|
auth_type: Annotated[
|
|
394
|
-
|
|
408
|
+
Annotated[
|
|
409
|
+
Optional[OutputDatadogAuthenticationMethod],
|
|
410
|
+
PlainValidator(validate_open_enum(False)),
|
|
411
|
+
],
|
|
412
|
+
pydantic.Field(alias="authType"),
|
|
395
413
|
] = OutputDatadogAuthenticationMethod.MANUAL
|
|
396
414
|
r"""Enter API key directly, or select a stored secret"""
|
|
397
415
|
|
|
@@ -418,19 +436,29 @@ class OutputDatadog(BaseModel):
|
|
|
418
436
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
419
437
|
|
|
420
438
|
pq_compress: Annotated[
|
|
421
|
-
|
|
439
|
+
Annotated[
|
|
440
|
+
Optional[OutputDatadogCompression],
|
|
441
|
+
PlainValidator(validate_open_enum(False)),
|
|
442
|
+
],
|
|
443
|
+
pydantic.Field(alias="pqCompress"),
|
|
422
444
|
] = OutputDatadogCompression.NONE
|
|
423
445
|
r"""Codec to use to compress the persisted data"""
|
|
424
446
|
|
|
425
447
|
pq_on_backpressure: Annotated[
|
|
426
|
-
|
|
448
|
+
Annotated[
|
|
449
|
+
Optional[OutputDatadogQueueFullBehavior],
|
|
450
|
+
PlainValidator(validate_open_enum(False)),
|
|
451
|
+
],
|
|
427
452
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
428
453
|
] = OutputDatadogQueueFullBehavior.BLOCK
|
|
429
454
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
430
455
|
|
|
431
|
-
pq_mode: Annotated[
|
|
432
|
-
|
|
433
|
-
|
|
456
|
+
pq_mode: Annotated[
|
|
457
|
+
Annotated[
|
|
458
|
+
Optional[OutputDatadogMode], PlainValidator(validate_open_enum(False))
|
|
459
|
+
],
|
|
460
|
+
pydantic.Field(alias="pqMode"),
|
|
461
|
+
] = OutputDatadogMode.ERROR
|
|
434
462
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
435
463
|
|
|
436
464
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputDatasetType(str, Enum):
|
|
|
12
15
|
DATASET = "dataset"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputDatasetSeverity(str, Enum):
|
|
18
|
+
class OutputDatasetSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Default value for event severity. If the `sev` or `__severity` fields are set on an event, the first one matching will override this value."""
|
|
17
20
|
|
|
18
21
|
FINEST = "finest"
|
|
@@ -78,7 +81,7 @@ class OutputDatasetTimeoutRetrySettings(BaseModel):
|
|
|
78
81
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
79
82
|
|
|
80
83
|
|
|
81
|
-
class DataSetSite(str, Enum):
|
|
84
|
+
class DataSetSite(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
82
85
|
r"""DataSet site to which events should be sent"""
|
|
83
86
|
|
|
84
87
|
US = "us"
|
|
@@ -97,7 +100,7 @@ class OutputDatasetExtraHTTPHeader(BaseModel):
|
|
|
97
100
|
name: Optional[str] = None
|
|
98
101
|
|
|
99
102
|
|
|
100
|
-
class OutputDatasetFailedRequestLoggingMode(str, Enum):
|
|
103
|
+
class OutputDatasetFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
101
104
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
102
105
|
|
|
103
106
|
PAYLOAD = "payload"
|
|
@@ -105,7 +108,7 @@ class OutputDatasetFailedRequestLoggingMode(str, Enum):
|
|
|
105
108
|
NONE = "none"
|
|
106
109
|
|
|
107
110
|
|
|
108
|
-
class OutputDatasetBackpressureBehavior(str, Enum):
|
|
111
|
+
class OutputDatasetBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
109
112
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
110
113
|
|
|
111
114
|
BLOCK = "block"
|
|
@@ -113,28 +116,28 @@ class OutputDatasetBackpressureBehavior(str, Enum):
|
|
|
113
116
|
QUEUE = "queue"
|
|
114
117
|
|
|
115
118
|
|
|
116
|
-
class OutputDatasetAuthenticationMethod(str, Enum):
|
|
119
|
+
class OutputDatasetAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
117
120
|
r"""Enter API key directly, or select a stored secret"""
|
|
118
121
|
|
|
119
122
|
MANUAL = "manual"
|
|
120
123
|
SECRET = "secret"
|
|
121
124
|
|
|
122
125
|
|
|
123
|
-
class OutputDatasetCompression(str, Enum):
|
|
126
|
+
class OutputDatasetCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
124
127
|
r"""Codec to use to compress the persisted data"""
|
|
125
128
|
|
|
126
129
|
NONE = "none"
|
|
127
130
|
GZIP = "gzip"
|
|
128
131
|
|
|
129
132
|
|
|
130
|
-
class OutputDatasetQueueFullBehavior(str, Enum):
|
|
133
|
+
class OutputDatasetQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
131
134
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
132
135
|
|
|
133
136
|
BLOCK = "block"
|
|
134
137
|
DROP = "drop"
|
|
135
138
|
|
|
136
139
|
|
|
137
|
-
class OutputDatasetMode(str, Enum):
|
|
140
|
+
class OutputDatasetMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
138
141
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
139
142
|
|
|
140
143
|
ERROR = "error"
|
|
@@ -272,7 +275,10 @@ class OutputDataset(BaseModel):
|
|
|
272
275
|
r"""Name of the event field that contains the timestamp. If not specified, defaults to `ts`, `_time`, or `Date.now()`, in that order."""
|
|
273
276
|
|
|
274
277
|
default_severity: Annotated[
|
|
275
|
-
|
|
278
|
+
Annotated[
|
|
279
|
+
Optional[OutputDatasetSeverity], PlainValidator(validate_open_enum(False))
|
|
280
|
+
],
|
|
281
|
+
pydantic.Field(alias="defaultSeverity"),
|
|
276
282
|
] = OutputDatasetSeverity.INFO
|
|
277
283
|
r"""Default value for event severity. If the `sev` or `__severity` fields are set on an event, the first one matching will override this value."""
|
|
278
284
|
|
|
@@ -292,7 +298,9 @@ class OutputDataset(BaseModel):
|
|
|
292
298
|
] = False
|
|
293
299
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
294
300
|
|
|
295
|
-
site:
|
|
301
|
+
site: Annotated[
|
|
302
|
+
Optional[DataSetSite], PlainValidator(validate_open_enum(False))
|
|
303
|
+
] = DataSetSite.US
|
|
296
304
|
r"""DataSet site to which events should be sent"""
|
|
297
305
|
|
|
298
306
|
concurrency: Optional[float] = 5
|
|
@@ -339,7 +347,10 @@ class OutputDataset(BaseModel):
|
|
|
339
347
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
340
348
|
|
|
341
349
|
failed_request_logging_mode: Annotated[
|
|
342
|
-
|
|
350
|
+
Annotated[
|
|
351
|
+
Optional[OutputDatasetFailedRequestLoggingMode],
|
|
352
|
+
PlainValidator(validate_open_enum(False)),
|
|
353
|
+
],
|
|
343
354
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
344
355
|
] = OutputDatasetFailedRequestLoggingMode.NONE
|
|
345
356
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -350,13 +361,20 @@ class OutputDataset(BaseModel):
|
|
|
350
361
|
r"""List of headers that are safe to log in plain text"""
|
|
351
362
|
|
|
352
363
|
on_backpressure: Annotated[
|
|
353
|
-
|
|
364
|
+
Annotated[
|
|
365
|
+
Optional[OutputDatasetBackpressureBehavior],
|
|
366
|
+
PlainValidator(validate_open_enum(False)),
|
|
367
|
+
],
|
|
354
368
|
pydantic.Field(alias="onBackpressure"),
|
|
355
369
|
] = OutputDatasetBackpressureBehavior.BLOCK
|
|
356
370
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
357
371
|
|
|
358
372
|
auth_type: Annotated[
|
|
359
|
-
|
|
373
|
+
Annotated[
|
|
374
|
+
Optional[OutputDatasetAuthenticationMethod],
|
|
375
|
+
PlainValidator(validate_open_enum(False)),
|
|
376
|
+
],
|
|
377
|
+
pydantic.Field(alias="authType"),
|
|
360
378
|
] = OutputDatasetAuthenticationMethod.MANUAL
|
|
361
379
|
r"""Enter API key directly, or select a stored secret"""
|
|
362
380
|
|
|
@@ -383,19 +401,29 @@ class OutputDataset(BaseModel):
|
|
|
383
401
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
384
402
|
|
|
385
403
|
pq_compress: Annotated[
|
|
386
|
-
|
|
404
|
+
Annotated[
|
|
405
|
+
Optional[OutputDatasetCompression],
|
|
406
|
+
PlainValidator(validate_open_enum(False)),
|
|
407
|
+
],
|
|
408
|
+
pydantic.Field(alias="pqCompress"),
|
|
387
409
|
] = OutputDatasetCompression.NONE
|
|
388
410
|
r"""Codec to use to compress the persisted data"""
|
|
389
411
|
|
|
390
412
|
pq_on_backpressure: Annotated[
|
|
391
|
-
|
|
413
|
+
Annotated[
|
|
414
|
+
Optional[OutputDatasetQueueFullBehavior],
|
|
415
|
+
PlainValidator(validate_open_enum(False)),
|
|
416
|
+
],
|
|
392
417
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
393
418
|
] = OutputDatasetQueueFullBehavior.BLOCK
|
|
394
419
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
395
420
|
|
|
396
|
-
pq_mode: Annotated[
|
|
397
|
-
|
|
398
|
-
|
|
421
|
+
pq_mode: Annotated[
|
|
422
|
+
Annotated[
|
|
423
|
+
Optional[OutputDatasetMode], PlainValidator(validate_open_enum(False))
|
|
424
|
+
],
|
|
425
|
+
pydantic.Field(alias="pqMode"),
|
|
426
|
+
] = OutputDatasetMode.ERROR
|
|
399
427
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
400
428
|
|
|
401
429
|
pq_controls: Annotated[
|