cribl-control-plane 0.0.49__py3-none-any.whl → 0.1.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -6
- cribl_control_plane/errors/healthstatus_error.py +8 -2
- cribl_control_plane/health.py +6 -2
- cribl_control_plane/models/__init__.py +68 -30
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +7 -2
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/getversionshowop.py +6 -5
- cribl_control_plane/models/gitshowresult.py +19 -0
- cribl_control_plane/models/hbcriblinfo.py +24 -3
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/heartbeatmetadata.py +3 -0
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +41 -32
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +41 -28
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
- cribl_control_plane/models/inputmsk.py +48 -30
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeprovidedinfo.py +3 -0
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/output.py +3 -0
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +73 -28
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +35 -12
- cribl_control_plane/models/outputclickhouse.py +55 -20
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +71 -44
- cribl_control_plane/models/outputcriblhttp.py +44 -16
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
- cribl_control_plane/models/outputdatabricks.py +439 -0
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +53 -20
- cribl_control_plane/models/outputdynatraceotlp.py +55 -22
- cribl_control_plane/models/outputelastic.py +43 -18
- cribl_control_plane/models/outputelasticcloud.py +36 -12
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +97 -32
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +35 -12
- cribl_control_plane/models/outputhumiohec.py +43 -16
- cribl_control_plane/models/outputinfluxdb.py +42 -16
- cribl_control_plane/models/outputkafka.py +69 -40
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +77 -42
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +41 -14
- cribl_control_plane/models/outputopentelemetry.py +67 -26
- cribl_control_plane/models/outputprometheus.py +35 -12
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
- cribl_control_plane/models/outputservicenow.py +60 -24
- cribl_control_plane/models/outputsignalfx.py +37 -14
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +35 -12
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +35 -12
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +35 -14
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/routeconf.py +3 -4
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0b1.dist-info}/METADATA +1 -1
- cribl_control_plane-0.1.0b1.dist-info/RECORD +327 -0
- cribl_control_plane/models/appmode.py +0 -13
- cribl_control_plane/models/routecloneconf.py +0 -13
- cribl_control_plane-0.0.49.dist-info/RECORD +0 -325
- {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.1.0b1.dist-info}/WHEEL +0 -0
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputDatasetType(str, Enum):
|
|
|
12
15
|
DATASET = "dataset"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputDatasetSeverity(str, Enum):
|
|
18
|
+
class OutputDatasetSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Default value for event severity. If the `sev` or `__severity` fields are set on an event, the first one matching will override this value."""
|
|
17
20
|
|
|
18
21
|
FINEST = "finest"
|
|
@@ -78,7 +81,7 @@ class OutputDatasetTimeoutRetrySettings(BaseModel):
|
|
|
78
81
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
79
82
|
|
|
80
83
|
|
|
81
|
-
class DataSetSite(str, Enum):
|
|
84
|
+
class DataSetSite(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
82
85
|
r"""DataSet site to which events should be sent"""
|
|
83
86
|
|
|
84
87
|
US = "us"
|
|
@@ -97,7 +100,7 @@ class OutputDatasetExtraHTTPHeader(BaseModel):
|
|
|
97
100
|
name: Optional[str] = None
|
|
98
101
|
|
|
99
102
|
|
|
100
|
-
class OutputDatasetFailedRequestLoggingMode(str, Enum):
|
|
103
|
+
class OutputDatasetFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
101
104
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
102
105
|
|
|
103
106
|
PAYLOAD = "payload"
|
|
@@ -105,7 +108,7 @@ class OutputDatasetFailedRequestLoggingMode(str, Enum):
|
|
|
105
108
|
NONE = "none"
|
|
106
109
|
|
|
107
110
|
|
|
108
|
-
class OutputDatasetBackpressureBehavior(str, Enum):
|
|
111
|
+
class OutputDatasetBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
109
112
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
110
113
|
|
|
111
114
|
BLOCK = "block"
|
|
@@ -113,28 +116,28 @@ class OutputDatasetBackpressureBehavior(str, Enum):
|
|
|
113
116
|
QUEUE = "queue"
|
|
114
117
|
|
|
115
118
|
|
|
116
|
-
class OutputDatasetAuthenticationMethod(str, Enum):
|
|
119
|
+
class OutputDatasetAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
117
120
|
r"""Enter API key directly, or select a stored secret"""
|
|
118
121
|
|
|
119
122
|
MANUAL = "manual"
|
|
120
123
|
SECRET = "secret"
|
|
121
124
|
|
|
122
125
|
|
|
123
|
-
class OutputDatasetCompression(str, Enum):
|
|
126
|
+
class OutputDatasetCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
124
127
|
r"""Codec to use to compress the persisted data"""
|
|
125
128
|
|
|
126
129
|
NONE = "none"
|
|
127
130
|
GZIP = "gzip"
|
|
128
131
|
|
|
129
132
|
|
|
130
|
-
class OutputDatasetQueueFullBehavior(str, Enum):
|
|
133
|
+
class OutputDatasetQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
131
134
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
132
135
|
|
|
133
136
|
BLOCK = "block"
|
|
134
137
|
DROP = "drop"
|
|
135
138
|
|
|
136
139
|
|
|
137
|
-
class OutputDatasetMode(str, Enum):
|
|
140
|
+
class OutputDatasetMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
138
141
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
139
142
|
|
|
140
143
|
ERROR = "error"
|
|
@@ -272,7 +275,10 @@ class OutputDataset(BaseModel):
|
|
|
272
275
|
r"""Name of the event field that contains the timestamp. If not specified, defaults to `ts`, `_time`, or `Date.now()`, in that order."""
|
|
273
276
|
|
|
274
277
|
default_severity: Annotated[
|
|
275
|
-
|
|
278
|
+
Annotated[
|
|
279
|
+
Optional[OutputDatasetSeverity], PlainValidator(validate_open_enum(False))
|
|
280
|
+
],
|
|
281
|
+
pydantic.Field(alias="defaultSeverity"),
|
|
276
282
|
] = OutputDatasetSeverity.INFO
|
|
277
283
|
r"""Default value for event severity. If the `sev` or `__severity` fields are set on an event, the first one matching will override this value."""
|
|
278
284
|
|
|
@@ -292,7 +298,9 @@ class OutputDataset(BaseModel):
|
|
|
292
298
|
] = False
|
|
293
299
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
294
300
|
|
|
295
|
-
site:
|
|
301
|
+
site: Annotated[
|
|
302
|
+
Optional[DataSetSite], PlainValidator(validate_open_enum(False))
|
|
303
|
+
] = DataSetSite.US
|
|
296
304
|
r"""DataSet site to which events should be sent"""
|
|
297
305
|
|
|
298
306
|
concurrency: Optional[float] = 5
|
|
@@ -339,7 +347,10 @@ class OutputDataset(BaseModel):
|
|
|
339
347
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
340
348
|
|
|
341
349
|
failed_request_logging_mode: Annotated[
|
|
342
|
-
|
|
350
|
+
Annotated[
|
|
351
|
+
Optional[OutputDatasetFailedRequestLoggingMode],
|
|
352
|
+
PlainValidator(validate_open_enum(False)),
|
|
353
|
+
],
|
|
343
354
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
344
355
|
] = OutputDatasetFailedRequestLoggingMode.NONE
|
|
345
356
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -350,13 +361,20 @@ class OutputDataset(BaseModel):
|
|
|
350
361
|
r"""List of headers that are safe to log in plain text"""
|
|
351
362
|
|
|
352
363
|
on_backpressure: Annotated[
|
|
353
|
-
|
|
364
|
+
Annotated[
|
|
365
|
+
Optional[OutputDatasetBackpressureBehavior],
|
|
366
|
+
PlainValidator(validate_open_enum(False)),
|
|
367
|
+
],
|
|
354
368
|
pydantic.Field(alias="onBackpressure"),
|
|
355
369
|
] = OutputDatasetBackpressureBehavior.BLOCK
|
|
356
370
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
357
371
|
|
|
358
372
|
auth_type: Annotated[
|
|
359
|
-
|
|
373
|
+
Annotated[
|
|
374
|
+
Optional[OutputDatasetAuthenticationMethod],
|
|
375
|
+
PlainValidator(validate_open_enum(False)),
|
|
376
|
+
],
|
|
377
|
+
pydantic.Field(alias="authType"),
|
|
360
378
|
] = OutputDatasetAuthenticationMethod.MANUAL
|
|
361
379
|
r"""Enter API key directly, or select a stored secret"""
|
|
362
380
|
|
|
@@ -383,19 +401,29 @@ class OutputDataset(BaseModel):
|
|
|
383
401
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
384
402
|
|
|
385
403
|
pq_compress: Annotated[
|
|
386
|
-
|
|
404
|
+
Annotated[
|
|
405
|
+
Optional[OutputDatasetCompression],
|
|
406
|
+
PlainValidator(validate_open_enum(False)),
|
|
407
|
+
],
|
|
408
|
+
pydantic.Field(alias="pqCompress"),
|
|
387
409
|
] = OutputDatasetCompression.NONE
|
|
388
410
|
r"""Codec to use to compress the persisted data"""
|
|
389
411
|
|
|
390
412
|
pq_on_backpressure: Annotated[
|
|
391
|
-
|
|
413
|
+
Annotated[
|
|
414
|
+
Optional[OutputDatasetQueueFullBehavior],
|
|
415
|
+
PlainValidator(validate_open_enum(False)),
|
|
416
|
+
],
|
|
392
417
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
393
418
|
] = OutputDatasetQueueFullBehavior.BLOCK
|
|
394
419
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
395
420
|
|
|
396
|
-
pq_mode: Annotated[
|
|
397
|
-
|
|
398
|
-
|
|
421
|
+
pq_mode: Annotated[
|
|
422
|
+
Annotated[
|
|
423
|
+
Optional[OutputDatasetMode], PlainValidator(validate_open_enum(False))
|
|
424
|
+
],
|
|
425
|
+
pydantic.Field(alias="pqMode"),
|
|
426
|
+
] = OutputDatasetMode.ERROR
|
|
399
427
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
400
428
|
|
|
401
429
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputDiskSpoolType(str, Enum):
|
|
|
12
15
|
DISK_SPOOL = "disk_spool"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputDiskSpoolCompression(str, Enum):
|
|
18
|
+
class OutputDiskSpoolCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Data compression format. Default is gzip."""
|
|
17
20
|
|
|
18
21
|
NONE = "none"
|
|
@@ -73,7 +76,9 @@ class OutputDiskSpool(BaseModel):
|
|
|
73
76
|
max_data_time: Annotated[Optional[str], pydantic.Field(alias="maxDataTime")] = "24h"
|
|
74
77
|
r"""Maximum amount of time to retain data before older buckets are deleted. Examples: 2h, 4d. Default is 24h."""
|
|
75
78
|
|
|
76
|
-
compress:
|
|
79
|
+
compress: Annotated[
|
|
80
|
+
Optional[OutputDiskSpoolCompression], PlainValidator(validate_open_enum(False))
|
|
81
|
+
] = OutputDiskSpoolCompression.GZIP
|
|
77
82
|
r"""Data compression format. Default is gzip."""
|
|
78
83
|
|
|
79
84
|
partition_expr: Annotated[Optional[str], pydantic.Field(alias="partitionExpr")] = (
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputDlS3Type(str, Enum):
|
|
|
12
15
|
DL_S3 = "dl_s3"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputDlS3AuthenticationMethod(str, Enum):
|
|
18
|
+
class OutputDlS3AuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
17
20
|
|
|
18
21
|
AUTO = "auto"
|
|
@@ -20,14 +23,14 @@ class OutputDlS3AuthenticationMethod(str, Enum):
|
|
|
20
23
|
SECRET = "secret"
|
|
21
24
|
|
|
22
25
|
|
|
23
|
-
class OutputDlS3SignatureVersion(str, Enum):
|
|
26
|
+
class OutputDlS3SignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
24
27
|
r"""Signature version to use for signing S3 requests"""
|
|
25
28
|
|
|
26
29
|
V2 = "v2"
|
|
27
30
|
V4 = "v4"
|
|
28
31
|
|
|
29
32
|
|
|
30
|
-
class OutputDlS3ObjectACL(str, Enum):
|
|
33
|
+
class OutputDlS3ObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
31
34
|
r"""Object ACL to assign to uploaded objects"""
|
|
32
35
|
|
|
33
36
|
PRIVATE = "private"
|
|
@@ -39,7 +42,7 @@ class OutputDlS3ObjectACL(str, Enum):
|
|
|
39
42
|
BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
|
|
40
43
|
|
|
41
44
|
|
|
42
|
-
class OutputDlS3StorageClass(str, Enum):
|
|
45
|
+
class OutputDlS3StorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
43
46
|
r"""Storage class to select for uploaded objects"""
|
|
44
47
|
|
|
45
48
|
STANDARD = "STANDARD"
|
|
@@ -52,12 +55,14 @@ class OutputDlS3StorageClass(str, Enum):
|
|
|
52
55
|
DEEP_ARCHIVE = "DEEP_ARCHIVE"
|
|
53
56
|
|
|
54
57
|
|
|
55
|
-
class OutputDlS3ServerSideEncryptionForUploadedObjects(
|
|
58
|
+
class OutputDlS3ServerSideEncryptionForUploadedObjects(
|
|
59
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
60
|
+
):
|
|
56
61
|
AES256 = "AES256"
|
|
57
62
|
AWS_KMS = "aws:kms"
|
|
58
63
|
|
|
59
64
|
|
|
60
|
-
class OutputDlS3DataFormat(str, Enum):
|
|
65
|
+
class OutputDlS3DataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
61
66
|
r"""Format of the output data"""
|
|
62
67
|
|
|
63
68
|
JSON = "json"
|
|
@@ -65,28 +70,28 @@ class OutputDlS3DataFormat(str, Enum):
|
|
|
65
70
|
PARQUET = "parquet"
|
|
66
71
|
|
|
67
72
|
|
|
68
|
-
class OutputDlS3BackpressureBehavior(str, Enum):
|
|
73
|
+
class OutputDlS3BackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
69
74
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
70
75
|
|
|
71
76
|
BLOCK = "block"
|
|
72
77
|
DROP = "drop"
|
|
73
78
|
|
|
74
79
|
|
|
75
|
-
class OutputDlS3DiskSpaceProtection(str, Enum):
|
|
80
|
+
class OutputDlS3DiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
76
81
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
77
82
|
|
|
78
83
|
BLOCK = "block"
|
|
79
84
|
DROP = "drop"
|
|
80
85
|
|
|
81
86
|
|
|
82
|
-
class OutputDlS3Compression(str, Enum):
|
|
87
|
+
class OutputDlS3Compression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
83
88
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
84
89
|
|
|
85
90
|
NONE = "none"
|
|
86
91
|
GZIP = "gzip"
|
|
87
92
|
|
|
88
93
|
|
|
89
|
-
class OutputDlS3CompressionLevel(str, Enum):
|
|
94
|
+
class OutputDlS3CompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
90
95
|
r"""Compression level to apply before moving files to final destination"""
|
|
91
96
|
|
|
92
97
|
BEST_SPEED = "best_speed"
|
|
@@ -94,7 +99,7 @@ class OutputDlS3CompressionLevel(str, Enum):
|
|
|
94
99
|
BEST_COMPRESSION = "best_compression"
|
|
95
100
|
|
|
96
101
|
|
|
97
|
-
class OutputDlS3ParquetVersion(str, Enum):
|
|
102
|
+
class OutputDlS3ParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
98
103
|
r"""Determines which data types are supported and how they are represented"""
|
|
99
104
|
|
|
100
105
|
PARQUET_1_0 = "PARQUET_1_0"
|
|
@@ -102,7 +107,7 @@ class OutputDlS3ParquetVersion(str, Enum):
|
|
|
102
107
|
PARQUET_2_6 = "PARQUET_2_6"
|
|
103
108
|
|
|
104
109
|
|
|
105
|
-
class OutputDlS3DataPageVersion(str, Enum):
|
|
110
|
+
class OutputDlS3DataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
106
111
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
107
112
|
|
|
108
113
|
DATA_PAGE_V1 = "DATA_PAGE_V1"
|
|
@@ -274,7 +279,10 @@ class OutputDlS3(BaseModel):
|
|
|
274
279
|
r"""Secret key. This value can be a constant or a JavaScript expression. Example: `${C.env.SOME_SECRET}`)"""
|
|
275
280
|
|
|
276
281
|
aws_authentication_method: Annotated[
|
|
277
|
-
|
|
282
|
+
Annotated[
|
|
283
|
+
Optional[OutputDlS3AuthenticationMethod],
|
|
284
|
+
PlainValidator(validate_open_enum(False)),
|
|
285
|
+
],
|
|
278
286
|
pydantic.Field(alias="awsAuthenticationMethod"),
|
|
279
287
|
] = OutputDlS3AuthenticationMethod.AUTO
|
|
280
288
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
@@ -283,7 +291,11 @@ class OutputDlS3(BaseModel):
|
|
|
283
291
|
r"""S3 service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to S3-compatible endpoint."""
|
|
284
292
|
|
|
285
293
|
signature_version: Annotated[
|
|
286
|
-
|
|
294
|
+
Annotated[
|
|
295
|
+
Optional[OutputDlS3SignatureVersion],
|
|
296
|
+
PlainValidator(validate_open_enum(False)),
|
|
297
|
+
],
|
|
298
|
+
pydantic.Field(alias="signatureVersion"),
|
|
287
299
|
] = OutputDlS3SignatureVersion.V4
|
|
288
300
|
r"""Signature version to use for signing S3 requests"""
|
|
289
301
|
|
|
@@ -331,17 +343,26 @@ class OutputDlS3(BaseModel):
|
|
|
331
343
|
r"""Prefix to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myKeyPrefix-${C.vars.myVar}`"""
|
|
332
344
|
|
|
333
345
|
object_acl: Annotated[
|
|
334
|
-
|
|
346
|
+
Annotated[
|
|
347
|
+
Optional[OutputDlS3ObjectACL], PlainValidator(validate_open_enum(False))
|
|
348
|
+
],
|
|
349
|
+
pydantic.Field(alias="objectACL"),
|
|
335
350
|
] = OutputDlS3ObjectACL.PRIVATE
|
|
336
351
|
r"""Object ACL to assign to uploaded objects"""
|
|
337
352
|
|
|
338
353
|
storage_class: Annotated[
|
|
339
|
-
|
|
354
|
+
Annotated[
|
|
355
|
+
Optional[OutputDlS3StorageClass], PlainValidator(validate_open_enum(False))
|
|
356
|
+
],
|
|
357
|
+
pydantic.Field(alias="storageClass"),
|
|
340
358
|
] = None
|
|
341
359
|
r"""Storage class to select for uploaded objects"""
|
|
342
360
|
|
|
343
361
|
server_side_encryption: Annotated[
|
|
344
|
-
|
|
362
|
+
Annotated[
|
|
363
|
+
Optional[OutputDlS3ServerSideEncryptionForUploadedObjects],
|
|
364
|
+
PlainValidator(validate_open_enum(False)),
|
|
365
|
+
],
|
|
345
366
|
pydantic.Field(alias="serverSideEncryption"),
|
|
346
367
|
] = None
|
|
347
368
|
|
|
@@ -354,7 +375,10 @@ class OutputDlS3(BaseModel):
|
|
|
354
375
|
r"""Remove empty staging directories after moving files"""
|
|
355
376
|
|
|
356
377
|
format_: Annotated[
|
|
357
|
-
|
|
378
|
+
Annotated[
|
|
379
|
+
Optional[OutputDlS3DataFormat], PlainValidator(validate_open_enum(False))
|
|
380
|
+
],
|
|
381
|
+
pydantic.Field(alias="format"),
|
|
358
382
|
] = OutputDlS3DataFormat.JSON
|
|
359
383
|
r"""Format of the output data"""
|
|
360
384
|
|
|
@@ -387,7 +411,11 @@ class OutputDlS3(BaseModel):
|
|
|
387
411
|
r"""Buffer size used to write to a file"""
|
|
388
412
|
|
|
389
413
|
on_backpressure: Annotated[
|
|
390
|
-
|
|
414
|
+
Annotated[
|
|
415
|
+
Optional[OutputDlS3BackpressureBehavior],
|
|
416
|
+
PlainValidator(validate_open_enum(False)),
|
|
417
|
+
],
|
|
418
|
+
pydantic.Field(alias="onBackpressure"),
|
|
391
419
|
] = OutputDlS3BackpressureBehavior.BLOCK
|
|
392
420
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
393
421
|
|
|
@@ -397,7 +425,10 @@ class OutputDlS3(BaseModel):
|
|
|
397
425
|
r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
|
|
398
426
|
|
|
399
427
|
on_disk_full_backpressure: Annotated[
|
|
400
|
-
|
|
428
|
+
Annotated[
|
|
429
|
+
Optional[OutputDlS3DiskSpaceProtection],
|
|
430
|
+
PlainValidator(validate_open_enum(False)),
|
|
431
|
+
],
|
|
401
432
|
pydantic.Field(alias="onDiskFullBackpressure"),
|
|
402
433
|
] = OutputDlS3DiskSpaceProtection.BLOCK
|
|
403
434
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
@@ -440,11 +471,17 @@ class OutputDlS3(BaseModel):
|
|
|
440
471
|
aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
|
|
441
472
|
r"""Select or create a stored secret that references your access key and secret key"""
|
|
442
473
|
|
|
443
|
-
compress:
|
|
474
|
+
compress: Annotated[
|
|
475
|
+
Optional[OutputDlS3Compression], PlainValidator(validate_open_enum(False))
|
|
476
|
+
] = OutputDlS3Compression.GZIP
|
|
444
477
|
r"""Data compression format to apply to HTTP content before it is delivered"""
|
|
445
478
|
|
|
446
479
|
compression_level: Annotated[
|
|
447
|
-
|
|
480
|
+
Annotated[
|
|
481
|
+
Optional[OutputDlS3CompressionLevel],
|
|
482
|
+
PlainValidator(validate_open_enum(False)),
|
|
483
|
+
],
|
|
484
|
+
pydantic.Field(alias="compressionLevel"),
|
|
448
485
|
] = OutputDlS3CompressionLevel.BEST_SPEED
|
|
449
486
|
r"""Compression level to apply before moving files to final destination"""
|
|
450
487
|
|
|
@@ -454,12 +491,19 @@ class OutputDlS3(BaseModel):
|
|
|
454
491
|
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
455
492
|
|
|
456
493
|
parquet_version: Annotated[
|
|
457
|
-
|
|
494
|
+
Annotated[
|
|
495
|
+
Optional[OutputDlS3ParquetVersion],
|
|
496
|
+
PlainValidator(validate_open_enum(False)),
|
|
497
|
+
],
|
|
498
|
+
pydantic.Field(alias="parquetVersion"),
|
|
458
499
|
] = OutputDlS3ParquetVersion.PARQUET_2_6
|
|
459
500
|
r"""Determines which data types are supported and how they are represented"""
|
|
460
501
|
|
|
461
502
|
parquet_data_page_version: Annotated[
|
|
462
|
-
|
|
503
|
+
Annotated[
|
|
504
|
+
Optional[OutputDlS3DataPageVersion],
|
|
505
|
+
PlainValidator(validate_open_enum(False)),
|
|
506
|
+
],
|
|
463
507
|
pydantic.Field(alias="parquetDataPageVersion"),
|
|
464
508
|
] = OutputDlS3DataPageVersion.DATA_PAGE_V2
|
|
465
509
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputDynatraceHTTPType(str, Enum):
|
|
|
12
15
|
DYNATRACE_HTTP = "dynatrace_http"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputDynatraceHTTPMethod(str, Enum):
|
|
18
|
+
class OutputDynatraceHTTPMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""The method to use when sending events"""
|
|
17
20
|
|
|
18
21
|
POST = "POST"
|
|
@@ -31,7 +34,9 @@ class OutputDynatraceHTTPExtraHTTPHeader(BaseModel):
|
|
|
31
34
|
name: Optional[str] = None
|
|
32
35
|
|
|
33
36
|
|
|
34
|
-
class OutputDynatraceHTTPFailedRequestLoggingMode(
|
|
37
|
+
class OutputDynatraceHTTPFailedRequestLoggingMode(
|
|
38
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
39
|
+
):
|
|
35
40
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
36
41
|
|
|
37
42
|
PAYLOAD = "payload"
|
|
@@ -93,7 +98,7 @@ class OutputDynatraceHTTPTimeoutRetrySettings(BaseModel):
|
|
|
93
98
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
94
99
|
|
|
95
100
|
|
|
96
|
-
class OutputDynatraceHTTPBackpressureBehavior(str, Enum):
|
|
101
|
+
class OutputDynatraceHTTPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
97
102
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
98
103
|
|
|
99
104
|
BLOCK = "block"
|
|
@@ -101,44 +106,44 @@ class OutputDynatraceHTTPBackpressureBehavior(str, Enum):
|
|
|
101
106
|
QUEUE = "queue"
|
|
102
107
|
|
|
103
108
|
|
|
104
|
-
class OutputDynatraceHTTPAuthenticationType(str, Enum):
|
|
109
|
+
class OutputDynatraceHTTPAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
105
110
|
TOKEN = "token"
|
|
106
111
|
TEXT_SECRET = "textSecret"
|
|
107
112
|
|
|
108
113
|
|
|
109
|
-
class OutputDynatraceHTTPFormat(str, Enum):
|
|
114
|
+
class OutputDynatraceHTTPFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
110
115
|
r"""How to format events before sending. Defaults to JSON. Plaintext is not currently supported."""
|
|
111
116
|
|
|
112
117
|
JSON_ARRAY = "json_array"
|
|
113
118
|
PLAINTEXT = "plaintext"
|
|
114
119
|
|
|
115
120
|
|
|
116
|
-
class Endpoint(str, Enum):
|
|
121
|
+
class Endpoint(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
117
122
|
CLOUD = "cloud"
|
|
118
123
|
ACTIVE_GATE = "activeGate"
|
|
119
124
|
MANUAL = "manual"
|
|
120
125
|
|
|
121
126
|
|
|
122
|
-
class TelemetryType(str, Enum):
|
|
127
|
+
class TelemetryType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
123
128
|
LOGS = "logs"
|
|
124
129
|
METRICS = "metrics"
|
|
125
130
|
|
|
126
131
|
|
|
127
|
-
class OutputDynatraceHTTPCompression(str, Enum):
|
|
132
|
+
class OutputDynatraceHTTPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
128
133
|
r"""Codec to use to compress the persisted data"""
|
|
129
134
|
|
|
130
135
|
NONE = "none"
|
|
131
136
|
GZIP = "gzip"
|
|
132
137
|
|
|
133
138
|
|
|
134
|
-
class OutputDynatraceHTTPQueueFullBehavior(str, Enum):
|
|
139
|
+
class OutputDynatraceHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
135
140
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
136
141
|
|
|
137
142
|
BLOCK = "block"
|
|
138
143
|
DROP = "drop"
|
|
139
144
|
|
|
140
145
|
|
|
141
|
-
class OutputDynatraceHTTPMode(str, Enum):
|
|
146
|
+
class OutputDynatraceHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
142
147
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
143
148
|
|
|
144
149
|
ERROR = "error"
|
|
@@ -261,7 +266,9 @@ class OutputDynatraceHTTP(BaseModel):
|
|
|
261
266
|
streamtags: Optional[List[str]] = None
|
|
262
267
|
r"""Tags for filtering and grouping in @{product}"""
|
|
263
268
|
|
|
264
|
-
method:
|
|
269
|
+
method: Annotated[
|
|
270
|
+
Optional[OutputDynatraceHTTPMethod], PlainValidator(validate_open_enum(False))
|
|
271
|
+
] = OutputDynatraceHTTPMethod.POST
|
|
265
272
|
r"""The method to use when sending events"""
|
|
266
273
|
|
|
267
274
|
keep_alive: Annotated[Optional[bool], pydantic.Field(alias="keepAlive")] = True
|
|
@@ -311,7 +318,10 @@ class OutputDynatraceHTTP(BaseModel):
|
|
|
311
318
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
312
319
|
|
|
313
320
|
failed_request_logging_mode: Annotated[
|
|
314
|
-
|
|
321
|
+
Annotated[
|
|
322
|
+
Optional[OutputDynatraceHTTPFailedRequestLoggingMode],
|
|
323
|
+
PlainValidator(validate_open_enum(False)),
|
|
324
|
+
],
|
|
315
325
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
316
326
|
] = OutputDynatraceHTTPFailedRequestLoggingMode.NONE
|
|
317
327
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -338,25 +348,38 @@ class OutputDynatraceHTTP(BaseModel):
|
|
|
338
348
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
339
349
|
|
|
340
350
|
on_backpressure: Annotated[
|
|
341
|
-
|
|
351
|
+
Annotated[
|
|
352
|
+
Optional[OutputDynatraceHTTPBackpressureBehavior],
|
|
353
|
+
PlainValidator(validate_open_enum(False)),
|
|
354
|
+
],
|
|
342
355
|
pydantic.Field(alias="onBackpressure"),
|
|
343
356
|
] = OutputDynatraceHTTPBackpressureBehavior.BLOCK
|
|
344
357
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
345
358
|
|
|
346
359
|
auth_type: Annotated[
|
|
347
|
-
|
|
360
|
+
Annotated[
|
|
361
|
+
Optional[OutputDynatraceHTTPAuthenticationType],
|
|
362
|
+
PlainValidator(validate_open_enum(False)),
|
|
363
|
+
],
|
|
348
364
|
pydantic.Field(alias="authType"),
|
|
349
365
|
] = OutputDynatraceHTTPAuthenticationType.TOKEN
|
|
350
366
|
|
|
351
367
|
format_: Annotated[
|
|
352
|
-
|
|
368
|
+
Annotated[
|
|
369
|
+
Optional[OutputDynatraceHTTPFormat],
|
|
370
|
+
PlainValidator(validate_open_enum(False)),
|
|
371
|
+
],
|
|
372
|
+
pydantic.Field(alias="format"),
|
|
353
373
|
] = OutputDynatraceHTTPFormat.JSON_ARRAY
|
|
354
374
|
r"""How to format events before sending. Defaults to JSON. Plaintext is not currently supported."""
|
|
355
375
|
|
|
356
|
-
endpoint:
|
|
376
|
+
endpoint: Annotated[
|
|
377
|
+
Optional[Endpoint], PlainValidator(validate_open_enum(False))
|
|
378
|
+
] = Endpoint.CLOUD
|
|
357
379
|
|
|
358
380
|
telemetry_type: Annotated[
|
|
359
|
-
Optional[TelemetryType],
|
|
381
|
+
Annotated[Optional[TelemetryType], PlainValidator(validate_open_enum(False))],
|
|
382
|
+
pydantic.Field(alias="telemetryType"),
|
|
360
383
|
] = TelemetryType.LOGS
|
|
361
384
|
|
|
362
385
|
total_memory_limit_kb: Annotated[
|
|
@@ -380,18 +403,28 @@ class OutputDynatraceHTTP(BaseModel):
|
|
|
380
403
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
381
404
|
|
|
382
405
|
pq_compress: Annotated[
|
|
383
|
-
|
|
406
|
+
Annotated[
|
|
407
|
+
Optional[OutputDynatraceHTTPCompression],
|
|
408
|
+
PlainValidator(validate_open_enum(False)),
|
|
409
|
+
],
|
|
410
|
+
pydantic.Field(alias="pqCompress"),
|
|
384
411
|
] = OutputDynatraceHTTPCompression.NONE
|
|
385
412
|
r"""Codec to use to compress the persisted data"""
|
|
386
413
|
|
|
387
414
|
pq_on_backpressure: Annotated[
|
|
388
|
-
|
|
415
|
+
Annotated[
|
|
416
|
+
Optional[OutputDynatraceHTTPQueueFullBehavior],
|
|
417
|
+
PlainValidator(validate_open_enum(False)),
|
|
418
|
+
],
|
|
389
419
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
390
420
|
] = OutputDynatraceHTTPQueueFullBehavior.BLOCK
|
|
391
421
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
392
422
|
|
|
393
423
|
pq_mode: Annotated[
|
|
394
|
-
|
|
424
|
+
Annotated[
|
|
425
|
+
Optional[OutputDynatraceHTTPMode], PlainValidator(validate_open_enum(False))
|
|
426
|
+
],
|
|
427
|
+
pydantic.Field(alias="pqMode"),
|
|
395
428
|
] = OutputDynatraceHTTPMode.ERROR
|
|
396
429
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
397
430
|
|