cribl-control-plane 0.2.0a1__py3-none-any.whl → 0.2.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -3
- cribl_control_plane/groups_sdk.py +4 -4
- cribl_control_plane/lakedatasets.py +12 -12
- cribl_control_plane/models/__init__.py +35 -38
- cribl_control_plane/models/configgroup.py +17 -2
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/distributedsummary.py +6 -0
- cribl_control_plane/models/hbcriblinfo.py +14 -3
- cribl_control_plane/models/heartbeatmetadata.py +3 -0
- cribl_control_plane/models/input.py +65 -63
- cribl_control_plane/models/inputappscope.py +4 -0
- cribl_control_plane/models/inputazureblob.py +4 -0
- cribl_control_plane/models/inputcollection.py +4 -0
- cribl_control_plane/models/inputconfluentcloud.py +8 -18
- cribl_control_plane/models/inputcribl.py +4 -0
- cribl_control_plane/models/inputcriblhttp.py +4 -0
- cribl_control_plane/models/inputcribllakehttp.py +4 -0
- cribl_control_plane/models/inputcriblmetrics.py +4 -0
- cribl_control_plane/models/inputcribltcp.py +4 -0
- cribl_control_plane/models/inputcrowdstrike.py +7 -0
- cribl_control_plane/models/inputdatadogagent.py +4 -0
- cribl_control_plane/models/inputdatagen.py +4 -0
- cribl_control_plane/models/inputedgeprometheus.py +12 -0
- cribl_control_plane/models/inputelastic.py +11 -0
- cribl_control_plane/models/inputeventhub.py +6 -0
- cribl_control_plane/models/inputexec.py +4 -0
- cribl_control_plane/models/inputfile.py +6 -0
- cribl_control_plane/models/inputfirehose.py +4 -0
- cribl_control_plane/models/inputgooglepubsub.py +7 -0
- cribl_control_plane/models/inputgrafana.py +8 -0
- cribl_control_plane/models/inputhttp.py +4 -0
- cribl_control_plane/models/inputhttpraw.py +4 -0
- cribl_control_plane/models/inputjournalfiles.py +4 -0
- cribl_control_plane/models/inputkafka.py +8 -17
- cribl_control_plane/models/inputkinesis.py +15 -0
- cribl_control_plane/models/inputkubeevents.py +4 -0
- cribl_control_plane/models/inputkubelogs.py +4 -0
- cribl_control_plane/models/inputkubemetrics.py +4 -0
- cribl_control_plane/models/inputloki.py +4 -0
- cribl_control_plane/models/inputmetrics.py +4 -0
- cribl_control_plane/models/inputmodeldriventelemetry.py +4 -0
- cribl_control_plane/models/inputmsk.py +7 -17
- cribl_control_plane/models/inputnetflow.py +4 -0
- cribl_control_plane/models/inputoffice365mgmt.py +11 -0
- cribl_control_plane/models/inputoffice365msgtrace.py +11 -0
- cribl_control_plane/models/inputoffice365service.py +11 -0
- cribl_control_plane/models/inputopentelemetry.py +8 -0
- cribl_control_plane/models/inputprometheus.py +10 -0
- cribl_control_plane/models/inputprometheusrw.py +4 -0
- cribl_control_plane/models/inputrawudp.py +4 -0
- cribl_control_plane/models/inputs3.py +7 -0
- cribl_control_plane/models/inputs3inventory.py +7 -0
- cribl_control_plane/models/inputsecuritylake.py +7 -0
- cribl_control_plane/models/inputsnmp.py +11 -0
- cribl_control_plane/models/inputsplunk.py +9 -0
- cribl_control_plane/models/inputsplunkhec.py +4 -0
- cribl_control_plane/models/inputsplunksearch.py +7 -0
- cribl_control_plane/models/inputsqs.py +9 -0
- cribl_control_plane/models/inputsyslog.py +8 -0
- cribl_control_plane/models/inputsystemmetrics.py +32 -0
- cribl_control_plane/models/inputsystemstate.py +4 -0
- cribl_control_plane/models/inputtcp.py +4 -0
- cribl_control_plane/models/inputtcpjson.py +4 -0
- cribl_control_plane/models/inputwef.py +6 -0
- cribl_control_plane/models/inputwindowsmetrics.py +28 -0
- cribl_control_plane/models/inputwineventlogs.py +8 -0
- cribl_control_plane/models/inputwiz.py +7 -0
- cribl_control_plane/models/inputwizwebhook.py +4 -0
- cribl_control_plane/models/inputzscalerhec.py +4 -0
- cribl_control_plane/models/jobinfo.py +4 -1
- cribl_control_plane/models/nodeprovidedinfo.py +7 -1
- cribl_control_plane/models/output.py +85 -80
- cribl_control_plane/models/outputazureblob.py +20 -0
- cribl_control_plane/models/outputazuredataexplorer.py +28 -0
- cribl_control_plane/models/outputazureeventhub.py +17 -0
- cribl_control_plane/models/outputazurelogs.py +13 -0
- cribl_control_plane/models/outputchronicle.py +13 -0
- cribl_control_plane/models/outputclickhouse.py +17 -0
- cribl_control_plane/models/outputcloudwatch.py +13 -0
- cribl_control_plane/models/outputconfluentcloud.py +24 -18
- cribl_control_plane/models/outputcriblhttp.py +15 -0
- cribl_control_plane/models/outputcribllake.py +21 -0
- cribl_control_plane/models/outputcribltcp.py +12 -0
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +15 -0
- cribl_control_plane/models/outputdatabricks.py +291 -0
- cribl_control_plane/models/outputdatadog.py +30 -0
- cribl_control_plane/models/outputdataset.py +23 -0
- cribl_control_plane/models/outputdls3.py +35 -0
- cribl_control_plane/models/outputdynatracehttp.py +22 -0
- cribl_control_plane/models/outputdynatraceotlp.py +22 -0
- cribl_control_plane/models/outputelastic.py +18 -0
- cribl_control_plane/models/outputelasticcloud.py +13 -0
- cribl_control_plane/models/outputexabeam.py +14 -0
- cribl_control_plane/models/outputfilesystem.py +15 -0
- cribl_control_plane/models/outputgooglechronicle.py +21 -0
- cribl_control_plane/models/outputgooglecloudlogging.py +19 -0
- cribl_control_plane/models/outputgooglecloudstorage.py +28 -0
- cribl_control_plane/models/outputgooglepubsub.py +13 -0
- cribl_control_plane/models/outputgrafanacloud.py +50 -0
- cribl_control_plane/models/outputgraphite.py +12 -0
- cribl_control_plane/models/outputhoneycomb.py +13 -0
- cribl_control_plane/models/outputhumiohec.py +15 -0
- cribl_control_plane/models/outputinfluxdb.py +19 -0
- cribl_control_plane/models/outputkafka.py +24 -17
- cribl_control_plane/models/outputkinesis.py +15 -0
- cribl_control_plane/models/outputloki.py +20 -0
- cribl_control_plane/models/outputminio.py +28 -0
- cribl_control_plane/models/outputmsk.py +23 -17
- cribl_control_plane/models/outputnewrelic.py +16 -0
- cribl_control_plane/models/outputnewrelicevents.py +16 -0
- cribl_control_plane/models/outputopentelemetry.py +22 -0
- cribl_control_plane/models/outputprometheus.py +13 -0
- cribl_control_plane/models/outputring.py +2 -0
- cribl_control_plane/models/outputs3.py +35 -0
- cribl_control_plane/models/outputsecuritylake.py +29 -0
- cribl_control_plane/models/outputsentinel.py +15 -0
- cribl_control_plane/models/outputsentineloneaisiem.py +13 -0
- cribl_control_plane/models/outputservicenow.py +21 -0
- cribl_control_plane/models/outputsignalfx.py +13 -0
- cribl_control_plane/models/outputsns.py +13 -0
- cribl_control_plane/models/outputsplunk.py +15 -0
- cribl_control_plane/models/outputsplunkhec.py +13 -0
- cribl_control_plane/models/outputsplunklb.py +15 -0
- cribl_control_plane/models/outputsqs.py +15 -0
- cribl_control_plane/models/outputstatsd.py +12 -0
- cribl_control_plane/models/outputstatsdext.py +12 -0
- cribl_control_plane/models/outputsumologic.py +15 -0
- cribl_control_plane/models/outputsyslog.py +24 -0
- cribl_control_plane/models/outputtcpjson.py +12 -0
- cribl_control_plane/models/outputwavefront.py +13 -0
- cribl_control_plane/models/outputwebhook.py +23 -0
- cribl_control_plane/models/outputxsiam.py +13 -0
- cribl_control_plane/models/packinfo.py +3 -0
- cribl_control_plane/models/packinstallinfo.py +3 -0
- cribl_control_plane/models/routeconf.py +3 -4
- cribl_control_plane/models/runnablejobcollection.py +4 -0
- cribl_control_plane/models/updatecribllakedatasetbylakeidandidop.py +5 -9
- {cribl_control_plane-0.2.0a1.dist-info → cribl_control_plane-0.2.0b1.dist-info}/METADATA +1 -1
- {cribl_control_plane-0.2.0a1.dist-info → cribl_control_plane-0.2.0b1.dist-info}/RECORD +140 -142
- cribl_control_plane/models/appmode.py +0 -14
- cribl_control_plane/models/cribllakedatasetupdate.py +0 -81
- cribl_control_plane/models/routecloneconf.py +0 -13
- {cribl_control_plane-0.2.0a1.dist-info → cribl_control_plane-0.2.0b1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OutputDatabricksType(str, Enum):
|
|
15
|
+
DATABRICKS = "databricks"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OutputDatabricksDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
|
+
r"""Format of the output data"""
|
|
20
|
+
|
|
21
|
+
# JSON
|
|
22
|
+
JSON = "json"
|
|
23
|
+
# Raw
|
|
24
|
+
RAW = "raw"
|
|
25
|
+
# Parquet
|
|
26
|
+
PARQUET = "parquet"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class OutputDatabricksBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
31
|
+
|
|
32
|
+
# Block
|
|
33
|
+
BLOCK = "block"
|
|
34
|
+
# Drop
|
|
35
|
+
DROP = "drop"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class OutputDatabricksDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
39
|
+
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
40
|
+
|
|
41
|
+
# Block
|
|
42
|
+
BLOCK = "block"
|
|
43
|
+
# Drop
|
|
44
|
+
DROP = "drop"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class OutputDatabricksAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
48
|
+
r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
|
|
49
|
+
|
|
50
|
+
# Manual
|
|
51
|
+
MANUAL = "manual"
|
|
52
|
+
# Secret Key pair
|
|
53
|
+
SECRET = "secret"
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class OutputDatabricksTypedDict(TypedDict):
|
|
57
|
+
type: OutputDatabricksType
|
|
58
|
+
login_url: str
|
|
59
|
+
r"""URL for Unity Catalog OAuth token endpoint (example: 'https://your-workspace.cloud.databricks.com/oauth/token')"""
|
|
60
|
+
client_id: str
|
|
61
|
+
r"""JavaScript expression to compute the OAuth client ID for Unity Catalog authentication. Can be a constant."""
|
|
62
|
+
id: NotRequired[str]
|
|
63
|
+
r"""Unique ID for this output"""
|
|
64
|
+
pipeline: NotRequired[str]
|
|
65
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
66
|
+
system_fields: NotRequired[List[str]]
|
|
67
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
68
|
+
environment: NotRequired[str]
|
|
69
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
70
|
+
streamtags: NotRequired[List[str]]
|
|
71
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
72
|
+
dest_path: NotRequired[str]
|
|
73
|
+
r"""Optional path to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myEventsVolumePath-${C.vars.myVar}`"""
|
|
74
|
+
stage_path: NotRequired[str]
|
|
75
|
+
r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant, stable storage."""
|
|
76
|
+
add_id_to_stage_path: NotRequired[bool]
|
|
77
|
+
r"""Add the Output ID value to staging location"""
|
|
78
|
+
remove_empty_dirs: NotRequired[bool]
|
|
79
|
+
r"""Remove empty staging directories after moving files"""
|
|
80
|
+
partition_expr: NotRequired[str]
|
|
81
|
+
r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
|
|
82
|
+
format_: NotRequired[OutputDatabricksDataFormat]
|
|
83
|
+
r"""Format of the output data"""
|
|
84
|
+
base_file_name: NotRequired[str]
|
|
85
|
+
r"""JavaScript expression to define the output filename prefix (can be constant)"""
|
|
86
|
+
file_name_suffix: NotRequired[str]
|
|
87
|
+
r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
|
|
88
|
+
max_file_size_mb: NotRequired[float]
|
|
89
|
+
r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
|
|
90
|
+
max_file_open_time_sec: NotRequired[float]
|
|
91
|
+
r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
|
|
92
|
+
max_file_idle_time_sec: NotRequired[float]
|
|
93
|
+
r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
|
|
94
|
+
max_open_files: NotRequired[float]
|
|
95
|
+
r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
|
|
96
|
+
header_line: NotRequired[str]
|
|
97
|
+
r"""If set, this line will be written to the beginning of each output file"""
|
|
98
|
+
write_high_water_mark: NotRequired[float]
|
|
99
|
+
r"""Buffer size used to write to a file"""
|
|
100
|
+
on_backpressure: NotRequired[OutputDatabricksBackpressureBehavior]
|
|
101
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
102
|
+
deadletter_enabled: NotRequired[bool]
|
|
103
|
+
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
104
|
+
on_disk_full_backpressure: NotRequired[OutputDatabricksDiskSpaceProtection]
|
|
105
|
+
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
106
|
+
unity_auth_method: NotRequired[OutputDatabricksAuthenticationMethod]
|
|
107
|
+
r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
|
|
108
|
+
scope: NotRequired[str]
|
|
109
|
+
r"""OAuth scope for Unity Catalog authentication"""
|
|
110
|
+
token_timeout_secs: NotRequired[float]
|
|
111
|
+
r"""How often the OAuth token should be refreshed"""
|
|
112
|
+
default_catalog: NotRequired[str]
|
|
113
|
+
r"""Name of the catalog to use for the output"""
|
|
114
|
+
default_schema: NotRequired[str]
|
|
115
|
+
r"""Name of the catalog schema to use for the output"""
|
|
116
|
+
events_volume_name: NotRequired[str]
|
|
117
|
+
r"""Name of the events volume in Databricks"""
|
|
118
|
+
over_write_files: NotRequired[bool]
|
|
119
|
+
r"""Uploaded files should be overwritten if they already exist. If disabled, upload will fail if a file already exists."""
|
|
120
|
+
description: NotRequired[str]
|
|
121
|
+
client_secret: NotRequired[str]
|
|
122
|
+
r"""JavaScript expression to compute the OAuth client secret for Unity Catalog authentication. Can be a constant."""
|
|
123
|
+
client_text_secret: NotRequired[str]
|
|
124
|
+
r"""Select or create a stored text secret"""
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class OutputDatabricks(BaseModel):
|
|
128
|
+
type: OutputDatabricksType
|
|
129
|
+
|
|
130
|
+
login_url: Annotated[str, pydantic.Field(alias="loginUrl")]
|
|
131
|
+
r"""URL for Unity Catalog OAuth token endpoint (example: 'https://your-workspace.cloud.databricks.com/oauth/token')"""
|
|
132
|
+
|
|
133
|
+
client_id: Annotated[str, pydantic.Field(alias="clientId")]
|
|
134
|
+
r"""JavaScript expression to compute the OAuth client ID for Unity Catalog authentication. Can be a constant."""
|
|
135
|
+
|
|
136
|
+
id: Optional[str] = None
|
|
137
|
+
r"""Unique ID for this output"""
|
|
138
|
+
|
|
139
|
+
pipeline: Optional[str] = None
|
|
140
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
141
|
+
|
|
142
|
+
system_fields: Annotated[
|
|
143
|
+
Optional[List[str]], pydantic.Field(alias="systemFields")
|
|
144
|
+
] = None
|
|
145
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
146
|
+
|
|
147
|
+
environment: Optional[str] = None
|
|
148
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
149
|
+
|
|
150
|
+
streamtags: Optional[List[str]] = None
|
|
151
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
152
|
+
|
|
153
|
+
dest_path: Annotated[Optional[str], pydantic.Field(alias="destPath")] = ""
|
|
154
|
+
r"""Optional path to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myEventsVolumePath-${C.vars.myVar}`"""
|
|
155
|
+
|
|
156
|
+
stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
|
|
157
|
+
"$CRIBL_HOME/state/outputs/staging"
|
|
158
|
+
)
|
|
159
|
+
r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant, stable storage."""
|
|
160
|
+
|
|
161
|
+
add_id_to_stage_path: Annotated[
|
|
162
|
+
Optional[bool], pydantic.Field(alias="addIdToStagePath")
|
|
163
|
+
] = True
|
|
164
|
+
r"""Add the Output ID value to staging location"""
|
|
165
|
+
|
|
166
|
+
remove_empty_dirs: Annotated[
|
|
167
|
+
Optional[bool], pydantic.Field(alias="removeEmptyDirs")
|
|
168
|
+
] = True
|
|
169
|
+
r"""Remove empty staging directories after moving files"""
|
|
170
|
+
|
|
171
|
+
partition_expr: Annotated[Optional[str], pydantic.Field(alias="partitionExpr")] = (
|
|
172
|
+
"C.Time.strftime(_time ? _time : Date.now()/1000, '%Y/%m/%d')"
|
|
173
|
+
)
|
|
174
|
+
r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
|
|
175
|
+
|
|
176
|
+
format_: Annotated[
|
|
177
|
+
Annotated[
|
|
178
|
+
Optional[OutputDatabricksDataFormat],
|
|
179
|
+
PlainValidator(validate_open_enum(False)),
|
|
180
|
+
],
|
|
181
|
+
pydantic.Field(alias="format"),
|
|
182
|
+
] = OutputDatabricksDataFormat.JSON
|
|
183
|
+
r"""Format of the output data"""
|
|
184
|
+
|
|
185
|
+
base_file_name: Annotated[Optional[str], pydantic.Field(alias="baseFileName")] = (
|
|
186
|
+
"`CriblOut`"
|
|
187
|
+
)
|
|
188
|
+
r"""JavaScript expression to define the output filename prefix (can be constant)"""
|
|
189
|
+
|
|
190
|
+
file_name_suffix: Annotated[
|
|
191
|
+
Optional[str], pydantic.Field(alias="fileNameSuffix")
|
|
192
|
+
] = '`.${C.env["CRIBL_WORKER_ID"]}.${__format}${__compression === "gzip" ? ".gz" : ""}`'
|
|
193
|
+
r"""JavaScript expression to define the output filename suffix (can be constant). The `__format` variable refers to the value of the `Data format` field (`json` or `raw`). The `__compression` field refers to the kind of compression being used (`none` or `gzip`)."""
|
|
194
|
+
|
|
195
|
+
max_file_size_mb: Annotated[
|
|
196
|
+
Optional[float], pydantic.Field(alias="maxFileSizeMB")
|
|
197
|
+
] = 32
|
|
198
|
+
r"""Maximum uncompressed output file size. Files of this size will be closed and moved to final output location."""
|
|
199
|
+
|
|
200
|
+
max_file_open_time_sec: Annotated[
|
|
201
|
+
Optional[float], pydantic.Field(alias="maxFileOpenTimeSec")
|
|
202
|
+
] = 300
|
|
203
|
+
r"""Maximum amount of time to write to a file. Files open for longer than this will be closed and moved to final output location."""
|
|
204
|
+
|
|
205
|
+
max_file_idle_time_sec: Annotated[
|
|
206
|
+
Optional[float], pydantic.Field(alias="maxFileIdleTimeSec")
|
|
207
|
+
] = 30
|
|
208
|
+
r"""Maximum amount of time to keep inactive files open. Files open for longer than this will be closed and moved to final output location."""
|
|
209
|
+
|
|
210
|
+
max_open_files: Annotated[Optional[float], pydantic.Field(alias="maxOpenFiles")] = (
|
|
211
|
+
100
|
|
212
|
+
)
|
|
213
|
+
r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
|
|
214
|
+
|
|
215
|
+
header_line: Annotated[Optional[str], pydantic.Field(alias="headerLine")] = ""
|
|
216
|
+
r"""If set, this line will be written to the beginning of each output file"""
|
|
217
|
+
|
|
218
|
+
write_high_water_mark: Annotated[
|
|
219
|
+
Optional[float], pydantic.Field(alias="writeHighWaterMark")
|
|
220
|
+
] = 64
|
|
221
|
+
r"""Buffer size used to write to a file"""
|
|
222
|
+
|
|
223
|
+
on_backpressure: Annotated[
|
|
224
|
+
Annotated[
|
|
225
|
+
Optional[OutputDatabricksBackpressureBehavior],
|
|
226
|
+
PlainValidator(validate_open_enum(False)),
|
|
227
|
+
],
|
|
228
|
+
pydantic.Field(alias="onBackpressure"),
|
|
229
|
+
] = OutputDatabricksBackpressureBehavior.BLOCK
|
|
230
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
231
|
+
|
|
232
|
+
deadletter_enabled: Annotated[
|
|
233
|
+
Optional[bool], pydantic.Field(alias="deadletterEnabled")
|
|
234
|
+
] = False
|
|
235
|
+
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
236
|
+
|
|
237
|
+
on_disk_full_backpressure: Annotated[
|
|
238
|
+
Annotated[
|
|
239
|
+
Optional[OutputDatabricksDiskSpaceProtection],
|
|
240
|
+
PlainValidator(validate_open_enum(False)),
|
|
241
|
+
],
|
|
242
|
+
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
243
|
+
] = OutputDatabricksDiskSpaceProtection.BLOCK
|
|
244
|
+
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
245
|
+
|
|
246
|
+
unity_auth_method: Annotated[
|
|
247
|
+
Annotated[
|
|
248
|
+
Optional[OutputDatabricksAuthenticationMethod],
|
|
249
|
+
PlainValidator(validate_open_enum(False)),
|
|
250
|
+
],
|
|
251
|
+
pydantic.Field(alias="unityAuthMethod"),
|
|
252
|
+
] = OutputDatabricksAuthenticationMethod.MANUAL
|
|
253
|
+
r"""Unity Catalog authentication method. Choose Manual to enter credentials directly, or Secret to use a stored secret."""
|
|
254
|
+
|
|
255
|
+
scope: Optional[str] = "all-apis"
|
|
256
|
+
r"""OAuth scope for Unity Catalog authentication"""
|
|
257
|
+
|
|
258
|
+
token_timeout_secs: Annotated[
|
|
259
|
+
Optional[float], pydantic.Field(alias="tokenTimeoutSecs")
|
|
260
|
+
] = 3600
|
|
261
|
+
r"""How often the OAuth token should be refreshed"""
|
|
262
|
+
|
|
263
|
+
default_catalog: Annotated[
|
|
264
|
+
Optional[str], pydantic.Field(alias="defaultCatalog")
|
|
265
|
+
] = "main"
|
|
266
|
+
r"""Name of the catalog to use for the output"""
|
|
267
|
+
|
|
268
|
+
default_schema: Annotated[Optional[str], pydantic.Field(alias="defaultSchema")] = (
|
|
269
|
+
"external"
|
|
270
|
+
)
|
|
271
|
+
r"""Name of the catalog schema to use for the output"""
|
|
272
|
+
|
|
273
|
+
events_volume_name: Annotated[
|
|
274
|
+
Optional[str], pydantic.Field(alias="eventsVolumeName")
|
|
275
|
+
] = "events"
|
|
276
|
+
r"""Name of the events volume in Databricks"""
|
|
277
|
+
|
|
278
|
+
over_write_files: Annotated[
|
|
279
|
+
Optional[bool], pydantic.Field(alias="overWriteFiles")
|
|
280
|
+
] = False
|
|
281
|
+
r"""Uploaded files should be overwritten if they already exist. If disabled, upload will fail if a file already exists."""
|
|
282
|
+
|
|
283
|
+
description: Optional[str] = None
|
|
284
|
+
|
|
285
|
+
client_secret: Annotated[Optional[str], pydantic.Field(alias="clientSecret")] = None
|
|
286
|
+
r"""JavaScript expression to compute the OAuth client secret for Unity Catalog authentication. Can be a constant."""
|
|
287
|
+
|
|
288
|
+
client_text_secret: Annotated[
|
|
289
|
+
Optional[str], pydantic.Field(alias="clientTextSecret")
|
|
290
|
+
] = None
|
|
291
|
+
r"""Select or create a stored text secret"""
|
|
@@ -18,32 +18,49 @@ class OutputDatadogType(str, Enum):
|
|
|
18
18
|
class SendLogsAs(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
19
|
r"""The content type to use when sending logs"""
|
|
20
20
|
|
|
21
|
+
# text/plain
|
|
21
22
|
TEXT = "text"
|
|
23
|
+
# application/json
|
|
22
24
|
JSON = "json"
|
|
23
25
|
|
|
24
26
|
|
|
25
27
|
class OutputDatadogSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
26
28
|
r"""Default value for message severity. When you send logs as JSON objects, the event's '__severity' field (if set) will override this value."""
|
|
27
29
|
|
|
30
|
+
# emergency
|
|
28
31
|
EMERGENCY = "emergency"
|
|
32
|
+
# alert
|
|
29
33
|
ALERT = "alert"
|
|
34
|
+
# critical
|
|
30
35
|
CRITICAL = "critical"
|
|
36
|
+
# error
|
|
31
37
|
ERROR = "error"
|
|
38
|
+
# warning
|
|
32
39
|
WARNING = "warning"
|
|
40
|
+
# notice
|
|
33
41
|
NOTICE = "notice"
|
|
42
|
+
# info
|
|
34
43
|
INFO = "info"
|
|
44
|
+
# debug
|
|
35
45
|
DEBUG = "debug"
|
|
36
46
|
|
|
37
47
|
|
|
38
48
|
class DatadogSite(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
39
49
|
r"""Datadog site to which events should be sent"""
|
|
40
50
|
|
|
51
|
+
# US
|
|
41
52
|
US = "us"
|
|
53
|
+
# US3
|
|
42
54
|
US3 = "us3"
|
|
55
|
+
# US5
|
|
43
56
|
US5 = "us5"
|
|
57
|
+
# Europe
|
|
44
58
|
EU = "eu"
|
|
59
|
+
# US1-FED
|
|
45
60
|
FED1 = "fed1"
|
|
61
|
+
# AP1
|
|
46
62
|
AP1 = "ap1"
|
|
63
|
+
# Custom
|
|
47
64
|
CUSTOM = "custom"
|
|
48
65
|
|
|
49
66
|
|
|
@@ -61,8 +78,11 @@ class OutputDatadogExtraHTTPHeader(BaseModel):
|
|
|
61
78
|
class OutputDatadogFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
62
79
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
63
80
|
|
|
81
|
+
# Payload
|
|
64
82
|
PAYLOAD = "payload"
|
|
83
|
+
# Payload + Headers
|
|
65
84
|
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
85
|
+
# None
|
|
66
86
|
NONE = "none"
|
|
67
87
|
|
|
68
88
|
|
|
@@ -123,8 +143,11 @@ class OutputDatadogTimeoutRetrySettings(BaseModel):
|
|
|
123
143
|
class OutputDatadogBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
124
144
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
125
145
|
|
|
146
|
+
# Block
|
|
126
147
|
BLOCK = "block"
|
|
148
|
+
# Drop
|
|
127
149
|
DROP = "drop"
|
|
150
|
+
# Persistent Queue
|
|
128
151
|
QUEUE = "queue"
|
|
129
152
|
|
|
130
153
|
|
|
@@ -138,22 +161,29 @@ class OutputDatadogAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
138
161
|
class OutputDatadogCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
139
162
|
r"""Codec to use to compress the persisted data"""
|
|
140
163
|
|
|
164
|
+
# None
|
|
141
165
|
NONE = "none"
|
|
166
|
+
# Gzip
|
|
142
167
|
GZIP = "gzip"
|
|
143
168
|
|
|
144
169
|
|
|
145
170
|
class OutputDatadogQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
146
171
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
147
172
|
|
|
173
|
+
# Block
|
|
148
174
|
BLOCK = "block"
|
|
175
|
+
# Drop new data
|
|
149
176
|
DROP = "drop"
|
|
150
177
|
|
|
151
178
|
|
|
152
179
|
class OutputDatadogMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
153
180
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
154
181
|
|
|
182
|
+
# Error
|
|
155
183
|
ERROR = "error"
|
|
184
|
+
# Backpressure
|
|
156
185
|
BACKPRESSURE = "backpressure"
|
|
186
|
+
# Always On
|
|
157
187
|
ALWAYS = "always"
|
|
158
188
|
|
|
159
189
|
|
|
@@ -18,12 +18,19 @@ class OutputDatasetType(str, Enum):
|
|
|
18
18
|
class OutputDatasetSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
19
|
r"""Default value for event severity. If the `sev` or `__severity` fields are set on an event, the first one matching will override this value."""
|
|
20
20
|
|
|
21
|
+
# 0 - finest
|
|
21
22
|
FINEST = "finest"
|
|
23
|
+
# 1 - finer
|
|
22
24
|
FINER = "finer"
|
|
25
|
+
# 2 - fine
|
|
23
26
|
FINE = "fine"
|
|
27
|
+
# 3 - info
|
|
24
28
|
INFO = "info"
|
|
29
|
+
# 4 - warning
|
|
25
30
|
WARNING = "warning"
|
|
31
|
+
# 5 - error
|
|
26
32
|
ERROR = "error"
|
|
33
|
+
# 6 - fatal
|
|
27
34
|
FATAL = "fatal"
|
|
28
35
|
|
|
29
36
|
|
|
@@ -84,8 +91,11 @@ class OutputDatasetTimeoutRetrySettings(BaseModel):
|
|
|
84
91
|
class DataSetSite(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
85
92
|
r"""DataSet site to which events should be sent"""
|
|
86
93
|
|
|
94
|
+
# US
|
|
87
95
|
US = "us"
|
|
96
|
+
# Europe
|
|
88
97
|
EU = "eu"
|
|
98
|
+
# Custom
|
|
89
99
|
CUSTOM = "custom"
|
|
90
100
|
|
|
91
101
|
|
|
@@ -103,16 +113,22 @@ class OutputDatasetExtraHTTPHeader(BaseModel):
|
|
|
103
113
|
class OutputDatasetFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
104
114
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
105
115
|
|
|
116
|
+
# Payload
|
|
106
117
|
PAYLOAD = "payload"
|
|
118
|
+
# Payload + Headers
|
|
107
119
|
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
120
|
+
# None
|
|
108
121
|
NONE = "none"
|
|
109
122
|
|
|
110
123
|
|
|
111
124
|
class OutputDatasetBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
112
125
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
113
126
|
|
|
127
|
+
# Block
|
|
114
128
|
BLOCK = "block"
|
|
129
|
+
# Drop
|
|
115
130
|
DROP = "drop"
|
|
131
|
+
# Persistent Queue
|
|
116
132
|
QUEUE = "queue"
|
|
117
133
|
|
|
118
134
|
|
|
@@ -126,22 +142,29 @@ class OutputDatasetAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta)
|
|
|
126
142
|
class OutputDatasetCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
127
143
|
r"""Codec to use to compress the persisted data"""
|
|
128
144
|
|
|
145
|
+
# None
|
|
129
146
|
NONE = "none"
|
|
147
|
+
# Gzip
|
|
130
148
|
GZIP = "gzip"
|
|
131
149
|
|
|
132
150
|
|
|
133
151
|
class OutputDatasetQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
134
152
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
135
153
|
|
|
154
|
+
# Block
|
|
136
155
|
BLOCK = "block"
|
|
156
|
+
# Drop new data
|
|
137
157
|
DROP = "drop"
|
|
138
158
|
|
|
139
159
|
|
|
140
160
|
class OutputDatasetMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
141
161
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
142
162
|
|
|
163
|
+
# Error
|
|
143
164
|
ERROR = "error"
|
|
165
|
+
# Backpressure
|
|
144
166
|
BACKPRESSURE = "backpressure"
|
|
167
|
+
# Always On
|
|
145
168
|
ALWAYS = "always"
|
|
146
169
|
|
|
147
170
|
|
|
@@ -18,8 +18,11 @@ class OutputDlS3Type(str, Enum):
|
|
|
18
18
|
class OutputDlS3AuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
19
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
20
20
|
|
|
21
|
+
# Auto
|
|
21
22
|
AUTO = "auto"
|
|
23
|
+
# Manual
|
|
22
24
|
MANUAL = "manual"
|
|
25
|
+
# Secret Key pair
|
|
23
26
|
SECRET = "secret"
|
|
24
27
|
|
|
25
28
|
|
|
@@ -33,54 +36,78 @@ class OutputDlS3SignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
33
36
|
class OutputDlS3ObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
37
|
r"""Object ACL to assign to uploaded objects"""
|
|
35
38
|
|
|
39
|
+
# Private
|
|
36
40
|
PRIVATE = "private"
|
|
41
|
+
# Public Read Only
|
|
37
42
|
PUBLIC_READ = "public-read"
|
|
43
|
+
# Public Read/Write
|
|
38
44
|
PUBLIC_READ_WRITE = "public-read-write"
|
|
45
|
+
# Authenticated Read Only
|
|
39
46
|
AUTHENTICATED_READ = "authenticated-read"
|
|
47
|
+
# AWS EC2 AMI Read Only
|
|
40
48
|
AWS_EXEC_READ = "aws-exec-read"
|
|
49
|
+
# Bucket Owner Read Only
|
|
41
50
|
BUCKET_OWNER_READ = "bucket-owner-read"
|
|
51
|
+
# Bucket Owner Full Control
|
|
42
52
|
BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
|
|
43
53
|
|
|
44
54
|
|
|
45
55
|
class OutputDlS3StorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
46
56
|
r"""Storage class to select for uploaded objects"""
|
|
47
57
|
|
|
58
|
+
# Standard
|
|
48
59
|
STANDARD = "STANDARD"
|
|
60
|
+
# Reduced Redundancy Storage
|
|
49
61
|
REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY"
|
|
62
|
+
# Standard, Infrequent Access
|
|
50
63
|
STANDARD_IA = "STANDARD_IA"
|
|
64
|
+
# One Zone, Infrequent Access
|
|
51
65
|
ONEZONE_IA = "ONEZONE_IA"
|
|
66
|
+
# Intelligent Tiering
|
|
52
67
|
INTELLIGENT_TIERING = "INTELLIGENT_TIERING"
|
|
68
|
+
# Glacier Flexible Retrieval
|
|
53
69
|
GLACIER = "GLACIER"
|
|
70
|
+
# Glacier Instant Retrieval
|
|
54
71
|
GLACIER_IR = "GLACIER_IR"
|
|
72
|
+
# Glacier Deep Archive
|
|
55
73
|
DEEP_ARCHIVE = "DEEP_ARCHIVE"
|
|
56
74
|
|
|
57
75
|
|
|
58
76
|
class OutputDlS3ServerSideEncryptionForUploadedObjects(
|
|
59
77
|
str, Enum, metaclass=utils.OpenEnumMeta
|
|
60
78
|
):
|
|
79
|
+
# Amazon S3 Managed Key
|
|
61
80
|
AES256 = "AES256"
|
|
81
|
+
# AWS KMS Managed Key
|
|
62
82
|
AWS_KMS = "aws:kms"
|
|
63
83
|
|
|
64
84
|
|
|
65
85
|
class OutputDlS3DataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
66
86
|
r"""Format of the output data"""
|
|
67
87
|
|
|
88
|
+
# JSON
|
|
68
89
|
JSON = "json"
|
|
90
|
+
# Raw
|
|
69
91
|
RAW = "raw"
|
|
92
|
+
# Parquet
|
|
70
93
|
PARQUET = "parquet"
|
|
71
94
|
|
|
72
95
|
|
|
73
96
|
class OutputDlS3BackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
74
97
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
75
98
|
|
|
99
|
+
# Block
|
|
76
100
|
BLOCK = "block"
|
|
101
|
+
# Drop
|
|
77
102
|
DROP = "drop"
|
|
78
103
|
|
|
79
104
|
|
|
80
105
|
class OutputDlS3DiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
81
106
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
82
107
|
|
|
108
|
+
# Block
|
|
83
109
|
BLOCK = "block"
|
|
110
|
+
# Drop
|
|
84
111
|
DROP = "drop"
|
|
85
112
|
|
|
86
113
|
|
|
@@ -94,23 +121,31 @@ class OutputDlS3Compression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
94
121
|
class OutputDlS3CompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
95
122
|
r"""Compression level to apply before moving files to final destination"""
|
|
96
123
|
|
|
124
|
+
# Best Speed
|
|
97
125
|
BEST_SPEED = "best_speed"
|
|
126
|
+
# Normal
|
|
98
127
|
NORMAL = "normal"
|
|
128
|
+
# Best Compression
|
|
99
129
|
BEST_COMPRESSION = "best_compression"
|
|
100
130
|
|
|
101
131
|
|
|
102
132
|
class OutputDlS3ParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
103
133
|
r"""Determines which data types are supported and how they are represented"""
|
|
104
134
|
|
|
135
|
+
# 1.0
|
|
105
136
|
PARQUET_1_0 = "PARQUET_1_0"
|
|
137
|
+
# 2.4
|
|
106
138
|
PARQUET_2_4 = "PARQUET_2_4"
|
|
139
|
+
# 2.6
|
|
107
140
|
PARQUET_2_6 = "PARQUET_2_6"
|
|
108
141
|
|
|
109
142
|
|
|
110
143
|
class OutputDlS3DataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
111
144
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
112
145
|
|
|
146
|
+
# V1
|
|
113
147
|
DATA_PAGE_V1 = "DATA_PAGE_V1"
|
|
148
|
+
# V2
|
|
114
149
|
DATA_PAGE_V2 = "DATA_PAGE_V2"
|
|
115
150
|
|
|
116
151
|
|
|
@@ -39,8 +39,11 @@ class OutputDynatraceHTTPFailedRequestLoggingMode(
|
|
|
39
39
|
):
|
|
40
40
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
41
41
|
|
|
42
|
+
# Payload
|
|
42
43
|
PAYLOAD = "payload"
|
|
44
|
+
# Payload + Headers
|
|
43
45
|
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
46
|
+
# None
|
|
44
47
|
NONE = "none"
|
|
45
48
|
|
|
46
49
|
|
|
@@ -101,53 +104,72 @@ class OutputDynatraceHTTPTimeoutRetrySettings(BaseModel):
|
|
|
101
104
|
class OutputDynatraceHTTPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
102
105
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
103
106
|
|
|
107
|
+
# Block
|
|
104
108
|
BLOCK = "block"
|
|
109
|
+
# Drop
|
|
105
110
|
DROP = "drop"
|
|
111
|
+
# Persistent Queue
|
|
106
112
|
QUEUE = "queue"
|
|
107
113
|
|
|
108
114
|
|
|
109
115
|
class OutputDynatraceHTTPAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
116
|
+
# Auth token
|
|
110
117
|
TOKEN = "token"
|
|
118
|
+
# Token (text secret)
|
|
111
119
|
TEXT_SECRET = "textSecret"
|
|
112
120
|
|
|
113
121
|
|
|
114
122
|
class OutputDynatraceHTTPFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
115
123
|
r"""How to format events before sending. Defaults to JSON. Plaintext is not currently supported."""
|
|
116
124
|
|
|
125
|
+
# JSON
|
|
117
126
|
JSON_ARRAY = "json_array"
|
|
127
|
+
# Plaintext
|
|
118
128
|
PLAINTEXT = "plaintext"
|
|
119
129
|
|
|
120
130
|
|
|
121
131
|
class Endpoint(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
132
|
+
# Cloud
|
|
122
133
|
CLOUD = "cloud"
|
|
134
|
+
# ActiveGate
|
|
123
135
|
ACTIVE_GATE = "activeGate"
|
|
136
|
+
# Manual
|
|
124
137
|
MANUAL = "manual"
|
|
125
138
|
|
|
126
139
|
|
|
127
140
|
class TelemetryType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
141
|
+
# Logs
|
|
128
142
|
LOGS = "logs"
|
|
143
|
+
# Metrics
|
|
129
144
|
METRICS = "metrics"
|
|
130
145
|
|
|
131
146
|
|
|
132
147
|
class OutputDynatraceHTTPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
133
148
|
r"""Codec to use to compress the persisted data"""
|
|
134
149
|
|
|
150
|
+
# None
|
|
135
151
|
NONE = "none"
|
|
152
|
+
# Gzip
|
|
136
153
|
GZIP = "gzip"
|
|
137
154
|
|
|
138
155
|
|
|
139
156
|
class OutputDynatraceHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
140
157
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
141
158
|
|
|
159
|
+
# Block
|
|
142
160
|
BLOCK = "block"
|
|
161
|
+
# Drop new data
|
|
143
162
|
DROP = "drop"
|
|
144
163
|
|
|
145
164
|
|
|
146
165
|
class OutputDynatraceHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
147
166
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
148
167
|
|
|
168
|
+
# Error
|
|
149
169
|
ERROR = "error"
|
|
170
|
+
# Backpressure
|
|
150
171
|
BACKPRESSURE = "backpressure"
|
|
172
|
+
# Always On
|
|
151
173
|
ALWAYS = "always"
|
|
152
174
|
|
|
153
175
|
|