cribl-control-plane 0.0.50__py3-none-any.whl → 0.0.50rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +3 -5
- cribl_control_plane/errors/healthstatus_error.py +8 -2
- cribl_control_plane/groups_sdk.py +4 -4
- cribl_control_plane/health.py +6 -2
- cribl_control_plane/models/__init__.py +56 -31
- cribl_control_plane/models/cacheconnection.py +10 -2
- cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
- cribl_control_plane/models/cloudprovider.py +2 -1
- cribl_control_plane/models/configgroup.py +24 -4
- cribl_control_plane/models/configgroupcloud.py +6 -2
- cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
- cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
- cribl_control_plane/models/createversionpushop.py +5 -5
- cribl_control_plane/models/cribllakedataset.py +8 -2
- cribl_control_plane/models/datasetmetadata.py +8 -2
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +7 -2
- cribl_control_plane/models/getversionshowop.py +6 -5
- cribl_control_plane/models/gitinfo.py +14 -3
- cribl_control_plane/models/gitshowresult.py +19 -0
- cribl_control_plane/models/hbcriblinfo.py +24 -3
- cribl_control_plane/models/healthstatus.py +7 -4
- cribl_control_plane/models/heartbeatmetadata.py +3 -0
- cribl_control_plane/models/input.py +65 -63
- cribl_control_plane/models/inputappscope.py +34 -14
- cribl_control_plane/models/inputazureblob.py +17 -6
- cribl_control_plane/models/inputcollection.py +11 -4
- cribl_control_plane/models/inputconfluentcloud.py +41 -32
- cribl_control_plane/models/inputcribl.py +11 -4
- cribl_control_plane/models/inputcriblhttp.py +23 -8
- cribl_control_plane/models/inputcribllakehttp.py +22 -10
- cribl_control_plane/models/inputcriblmetrics.py +12 -4
- cribl_control_plane/models/inputcribltcp.py +23 -8
- cribl_control_plane/models/inputcrowdstrike.py +26 -10
- cribl_control_plane/models/inputdatadogagent.py +24 -8
- cribl_control_plane/models/inputdatagen.py +11 -4
- cribl_control_plane/models/inputedgeprometheus.py +58 -24
- cribl_control_plane/models/inputelastic.py +40 -14
- cribl_control_plane/models/inputeventhub.py +15 -6
- cribl_control_plane/models/inputexec.py +14 -6
- cribl_control_plane/models/inputfile.py +15 -6
- cribl_control_plane/models/inputfirehose.py +23 -8
- cribl_control_plane/models/inputgooglepubsub.py +19 -6
- cribl_control_plane/models/inputgrafana.py +67 -24
- cribl_control_plane/models/inputhttp.py +23 -8
- cribl_control_plane/models/inputhttpraw.py +23 -8
- cribl_control_plane/models/inputjournalfiles.py +12 -4
- cribl_control_plane/models/inputkafka.py +41 -28
- cribl_control_plane/models/inputkinesis.py +38 -14
- cribl_control_plane/models/inputkubeevents.py +11 -4
- cribl_control_plane/models/inputkubelogs.py +16 -8
- cribl_control_plane/models/inputkubemetrics.py +16 -8
- cribl_control_plane/models/inputloki.py +29 -10
- cribl_control_plane/models/inputmetrics.py +23 -8
- cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
- cribl_control_plane/models/inputmsk.py +48 -30
- cribl_control_plane/models/inputnetflow.py +11 -4
- cribl_control_plane/models/inputoffice365mgmt.py +33 -14
- cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
- cribl_control_plane/models/inputoffice365service.py +35 -16
- cribl_control_plane/models/inputopentelemetry.py +38 -16
- cribl_control_plane/models/inputprometheus.py +50 -18
- cribl_control_plane/models/inputprometheusrw.py +30 -10
- cribl_control_plane/models/inputrawudp.py +11 -4
- cribl_control_plane/models/inputs3.py +21 -8
- cribl_control_plane/models/inputs3inventory.py +26 -10
- cribl_control_plane/models/inputsecuritylake.py +27 -10
- cribl_control_plane/models/inputsnmp.py +16 -6
- cribl_control_plane/models/inputsplunk.py +33 -12
- cribl_control_plane/models/inputsplunkhec.py +29 -10
- cribl_control_plane/models/inputsplunksearch.py +33 -14
- cribl_control_plane/models/inputsqs.py +27 -10
- cribl_control_plane/models/inputsyslog.py +43 -16
- cribl_control_plane/models/inputsystemmetrics.py +48 -24
- cribl_control_plane/models/inputsystemstate.py +16 -8
- cribl_control_plane/models/inputtcp.py +29 -10
- cribl_control_plane/models/inputtcpjson.py +29 -10
- cribl_control_plane/models/inputwef.py +37 -14
- cribl_control_plane/models/inputwindowsmetrics.py +44 -24
- cribl_control_plane/models/inputwineventlogs.py +20 -10
- cribl_control_plane/models/inputwiz.py +21 -8
- cribl_control_plane/models/inputwizwebhook.py +23 -8
- cribl_control_plane/models/inputzscalerhec.py +29 -10
- cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
- cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
- cribl_control_plane/models/masterworkerentry.py +7 -2
- cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
- cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeprovidedinfo.py +3 -0
- cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
- cribl_control_plane/models/nodeupgradestate.py +2 -1
- cribl_control_plane/models/nodeupgradestatus.py +13 -5
- cribl_control_plane/models/output.py +84 -79
- cribl_control_plane/models/outputazureblob.py +48 -18
- cribl_control_plane/models/outputazuredataexplorer.py +73 -28
- cribl_control_plane/models/outputazureeventhub.py +40 -18
- cribl_control_plane/models/outputazurelogs.py +35 -12
- cribl_control_plane/models/outputclickhouse.py +55 -20
- cribl_control_plane/models/outputcloudwatch.py +29 -10
- cribl_control_plane/models/outputconfluentcloud.py +71 -44
- cribl_control_plane/models/outputcriblhttp.py +44 -16
- cribl_control_plane/models/outputcribllake.py +46 -16
- cribl_control_plane/models/outputcribltcp.py +45 -18
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
- cribl_control_plane/models/outputdatabricks.py +282 -0
- cribl_control_plane/models/outputdatadog.py +48 -20
- cribl_control_plane/models/outputdataset.py +46 -18
- cribl_control_plane/models/outputdiskspool.py +7 -2
- cribl_control_plane/models/outputdls3.py +68 -24
- cribl_control_plane/models/outputdynatracehttp.py +53 -20
- cribl_control_plane/models/outputdynatraceotlp.py +55 -22
- cribl_control_plane/models/outputelastic.py +43 -18
- cribl_control_plane/models/outputelasticcloud.py +36 -12
- cribl_control_plane/models/outputexabeam.py +29 -10
- cribl_control_plane/models/outputfilesystem.py +39 -14
- cribl_control_plane/models/outputgooglechronicle.py +50 -16
- cribl_control_plane/models/outputgooglecloudlogging.py +50 -18
- cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
- cribl_control_plane/models/outputgooglepubsub.py +31 -10
- cribl_control_plane/models/outputgrafanacloud.py +97 -32
- cribl_control_plane/models/outputgraphite.py +31 -14
- cribl_control_plane/models/outputhoneycomb.py +35 -12
- cribl_control_plane/models/outputhumiohec.py +43 -16
- cribl_control_plane/models/outputinfluxdb.py +42 -16
- cribl_control_plane/models/outputkafka.py +69 -40
- cribl_control_plane/models/outputkinesis.py +40 -16
- cribl_control_plane/models/outputloki.py +41 -16
- cribl_control_plane/models/outputminio.py +65 -24
- cribl_control_plane/models/outputmsk.py +77 -42
- cribl_control_plane/models/outputnewrelic.py +43 -18
- cribl_control_plane/models/outputnewrelicevents.py +41 -14
- cribl_control_plane/models/outputopentelemetry.py +67 -26
- cribl_control_plane/models/outputprometheus.py +35 -12
- cribl_control_plane/models/outputring.py +19 -8
- cribl_control_plane/models/outputs3.py +68 -26
- cribl_control_plane/models/outputsecuritylake.py +52 -18
- cribl_control_plane/models/outputsentinel.py +45 -18
- cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
- cribl_control_plane/models/outputservicenow.py +60 -24
- cribl_control_plane/models/outputsignalfx.py +37 -14
- cribl_control_plane/models/outputsns.py +36 -14
- cribl_control_plane/models/outputsplunk.py +60 -24
- cribl_control_plane/models/outputsplunkhec.py +35 -12
- cribl_control_plane/models/outputsplunklb.py +77 -30
- cribl_control_plane/models/outputsqs.py +41 -16
- cribl_control_plane/models/outputstatsd.py +30 -14
- cribl_control_plane/models/outputstatsdext.py +29 -12
- cribl_control_plane/models/outputsumologic.py +35 -12
- cribl_control_plane/models/outputsyslog.py +58 -24
- cribl_control_plane/models/outputtcpjson.py +52 -20
- cribl_control_plane/models/outputwavefront.py +35 -12
- cribl_control_plane/models/outputwebhook.py +58 -22
- cribl_control_plane/models/outputxsiam.py +35 -14
- cribl_control_plane/models/productscore.py +2 -1
- cribl_control_plane/models/rbacresource.py +2 -1
- cribl_control_plane/models/resourcepolicy.py +4 -2
- cribl_control_plane/models/routeconf.py +3 -4
- cribl_control_plane/models/runnablejobcollection.py +30 -13
- cribl_control_plane/models/runnablejobexecutor.py +13 -4
- cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
- cribl_control_plane/models/workertypes.py +2 -1
- {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/METADATA +1 -1
- cribl_control_plane-0.0.50rc2.dist-info/RECORD +327 -0
- cribl_control_plane/models/appmode.py +0 -13
- cribl_control_plane/models/routecloneconf.py +0 -13
- cribl_control_plane-0.0.50.dist-info/RECORD +0 -325
- {cribl_control_plane-0.0.50.dist-info → cribl_control_plane-0.0.50rc2.dist-info}/WHEEL +0 -0
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputNewrelicType(str, Enum):
|
|
|
12
15
|
NEWRELIC = "newrelic"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputNewrelicRegion(str, Enum):
|
|
18
|
+
class OutputNewrelicRegion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Which New Relic region endpoint to use."""
|
|
17
20
|
|
|
18
21
|
US = "US"
|
|
@@ -20,7 +23,7 @@ class OutputNewrelicRegion(str, Enum):
|
|
|
20
23
|
CUSTOM = "Custom"
|
|
21
24
|
|
|
22
25
|
|
|
23
|
-
class FieldName(str, Enum):
|
|
26
|
+
class FieldName(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
24
27
|
SERVICE = "service"
|
|
25
28
|
HOSTNAME = "hostname"
|
|
26
29
|
TIMESTAMP = "timestamp"
|
|
@@ -34,7 +37,7 @@ class OutputNewrelicMetadatumTypedDict(TypedDict):
|
|
|
34
37
|
|
|
35
38
|
|
|
36
39
|
class OutputNewrelicMetadatum(BaseModel):
|
|
37
|
-
name: FieldName
|
|
40
|
+
name: Annotated[FieldName, PlainValidator(validate_open_enum(False))]
|
|
38
41
|
|
|
39
42
|
value: str
|
|
40
43
|
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
@@ -51,7 +54,7 @@ class OutputNewrelicExtraHTTPHeader(BaseModel):
|
|
|
51
54
|
name: Optional[str] = None
|
|
52
55
|
|
|
53
56
|
|
|
54
|
-
class OutputNewrelicFailedRequestLoggingMode(str, Enum):
|
|
57
|
+
class OutputNewrelicFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
55
58
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
56
59
|
|
|
57
60
|
PAYLOAD = "payload"
|
|
@@ -113,7 +116,7 @@ class OutputNewrelicTimeoutRetrySettings(BaseModel):
|
|
|
113
116
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
114
117
|
|
|
115
118
|
|
|
116
|
-
class OutputNewrelicBackpressureBehavior(str, Enum):
|
|
119
|
+
class OutputNewrelicBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
117
120
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
118
121
|
|
|
119
122
|
BLOCK = "block"
|
|
@@ -121,28 +124,28 @@ class OutputNewrelicBackpressureBehavior(str, Enum):
|
|
|
121
124
|
QUEUE = "queue"
|
|
122
125
|
|
|
123
126
|
|
|
124
|
-
class OutputNewrelicAuthenticationMethod(str, Enum):
|
|
127
|
+
class OutputNewrelicAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
125
128
|
r"""Enter API key directly, or select a stored secret"""
|
|
126
129
|
|
|
127
130
|
MANUAL = "manual"
|
|
128
131
|
SECRET = "secret"
|
|
129
132
|
|
|
130
133
|
|
|
131
|
-
class OutputNewrelicCompression(str, Enum):
|
|
134
|
+
class OutputNewrelicCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
132
135
|
r"""Codec to use to compress the persisted data"""
|
|
133
136
|
|
|
134
137
|
NONE = "none"
|
|
135
138
|
GZIP = "gzip"
|
|
136
139
|
|
|
137
140
|
|
|
138
|
-
class OutputNewrelicQueueFullBehavior(str, Enum):
|
|
141
|
+
class OutputNewrelicQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
139
142
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
140
143
|
|
|
141
144
|
BLOCK = "block"
|
|
142
145
|
DROP = "drop"
|
|
143
146
|
|
|
144
147
|
|
|
145
|
-
class OutputNewrelicMode(str, Enum):
|
|
148
|
+
class OutputNewrelicMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
146
149
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
147
150
|
|
|
148
151
|
ERROR = "error"
|
|
@@ -257,7 +260,9 @@ class OutputNewrelic(BaseModel):
|
|
|
257
260
|
streamtags: Optional[List[str]] = None
|
|
258
261
|
r"""Tags for filtering and grouping in @{product}"""
|
|
259
262
|
|
|
260
|
-
region:
|
|
263
|
+
region: Annotated[
|
|
264
|
+
Optional[OutputNewrelicRegion], PlainValidator(validate_open_enum(False))
|
|
265
|
+
] = OutputNewrelicRegion.US
|
|
261
266
|
r"""Which New Relic region endpoint to use."""
|
|
262
267
|
|
|
263
268
|
log_type: Annotated[Optional[str], pydantic.Field(alias="logType")] = ""
|
|
@@ -313,7 +318,10 @@ class OutputNewrelic(BaseModel):
|
|
|
313
318
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
314
319
|
|
|
315
320
|
failed_request_logging_mode: Annotated[
|
|
316
|
-
|
|
321
|
+
Annotated[
|
|
322
|
+
Optional[OutputNewrelicFailedRequestLoggingMode],
|
|
323
|
+
PlainValidator(validate_open_enum(False)),
|
|
324
|
+
],
|
|
317
325
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
318
326
|
] = OutputNewrelicFailedRequestLoggingMode.NONE
|
|
319
327
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -340,13 +348,20 @@ class OutputNewrelic(BaseModel):
|
|
|
340
348
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
341
349
|
|
|
342
350
|
on_backpressure: Annotated[
|
|
343
|
-
|
|
351
|
+
Annotated[
|
|
352
|
+
Optional[OutputNewrelicBackpressureBehavior],
|
|
353
|
+
PlainValidator(validate_open_enum(False)),
|
|
354
|
+
],
|
|
344
355
|
pydantic.Field(alias="onBackpressure"),
|
|
345
356
|
] = OutputNewrelicBackpressureBehavior.BLOCK
|
|
346
357
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
347
358
|
|
|
348
359
|
auth_type: Annotated[
|
|
349
|
-
|
|
360
|
+
Annotated[
|
|
361
|
+
Optional[OutputNewrelicAuthenticationMethod],
|
|
362
|
+
PlainValidator(validate_open_enum(False)),
|
|
363
|
+
],
|
|
364
|
+
pydantic.Field(alias="authType"),
|
|
350
365
|
] = OutputNewrelicAuthenticationMethod.MANUAL
|
|
351
366
|
r"""Enter API key directly, or select a stored secret"""
|
|
352
367
|
|
|
@@ -373,19 +388,29 @@ class OutputNewrelic(BaseModel):
|
|
|
373
388
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
374
389
|
|
|
375
390
|
pq_compress: Annotated[
|
|
376
|
-
|
|
391
|
+
Annotated[
|
|
392
|
+
Optional[OutputNewrelicCompression],
|
|
393
|
+
PlainValidator(validate_open_enum(False)),
|
|
394
|
+
],
|
|
395
|
+
pydantic.Field(alias="pqCompress"),
|
|
377
396
|
] = OutputNewrelicCompression.NONE
|
|
378
397
|
r"""Codec to use to compress the persisted data"""
|
|
379
398
|
|
|
380
399
|
pq_on_backpressure: Annotated[
|
|
381
|
-
|
|
400
|
+
Annotated[
|
|
401
|
+
Optional[OutputNewrelicQueueFullBehavior],
|
|
402
|
+
PlainValidator(validate_open_enum(False)),
|
|
403
|
+
],
|
|
382
404
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
383
405
|
] = OutputNewrelicQueueFullBehavior.BLOCK
|
|
384
406
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
385
407
|
|
|
386
|
-
pq_mode: Annotated[
|
|
387
|
-
|
|
388
|
-
|
|
408
|
+
pq_mode: Annotated[
|
|
409
|
+
Annotated[
|
|
410
|
+
Optional[OutputNewrelicMode], PlainValidator(validate_open_enum(False))
|
|
411
|
+
],
|
|
412
|
+
pydantic.Field(alias="pqMode"),
|
|
413
|
+
] = OutputNewrelicMode.ERROR
|
|
389
414
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
390
415
|
|
|
391
416
|
pq_controls: Annotated[
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,7 +15,7 @@ class OutputNewrelicEventsType(str, Enum):
|
|
|
12
15
|
NEWRELIC_EVENTS = "newrelic_events"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputNewrelicEventsRegion(str, Enum):
|
|
18
|
+
class OutputNewrelicEventsRegion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Which New Relic region endpoint to use."""
|
|
17
20
|
|
|
18
21
|
US = "US"
|
|
@@ -31,7 +34,9 @@ class OutputNewrelicEventsExtraHTTPHeader(BaseModel):
|
|
|
31
34
|
name: Optional[str] = None
|
|
32
35
|
|
|
33
36
|
|
|
34
|
-
class OutputNewrelicEventsFailedRequestLoggingMode(
|
|
37
|
+
class OutputNewrelicEventsFailedRequestLoggingMode(
|
|
38
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
39
|
+
):
|
|
35
40
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
36
41
|
|
|
37
42
|
PAYLOAD = "payload"
|
|
@@ -93,7 +98,7 @@ class OutputNewrelicEventsTimeoutRetrySettings(BaseModel):
|
|
|
93
98
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
94
99
|
|
|
95
100
|
|
|
96
|
-
class OutputNewrelicEventsBackpressureBehavior(str, Enum):
|
|
101
|
+
class OutputNewrelicEventsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
97
102
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
98
103
|
|
|
99
104
|
BLOCK = "block"
|
|
@@ -101,28 +106,28 @@ class OutputNewrelicEventsBackpressureBehavior(str, Enum):
|
|
|
101
106
|
QUEUE = "queue"
|
|
102
107
|
|
|
103
108
|
|
|
104
|
-
class OutputNewrelicEventsAuthenticationMethod(str, Enum):
|
|
109
|
+
class OutputNewrelicEventsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
105
110
|
r"""Enter API key directly, or select a stored secret"""
|
|
106
111
|
|
|
107
112
|
MANUAL = "manual"
|
|
108
113
|
SECRET = "secret"
|
|
109
114
|
|
|
110
115
|
|
|
111
|
-
class OutputNewrelicEventsCompression(str, Enum):
|
|
116
|
+
class OutputNewrelicEventsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
112
117
|
r"""Codec to use to compress the persisted data"""
|
|
113
118
|
|
|
114
119
|
NONE = "none"
|
|
115
120
|
GZIP = "gzip"
|
|
116
121
|
|
|
117
122
|
|
|
118
|
-
class OutputNewrelicEventsQueueFullBehavior(str, Enum):
|
|
123
|
+
class OutputNewrelicEventsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
119
124
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
120
125
|
|
|
121
126
|
BLOCK = "block"
|
|
122
127
|
DROP = "drop"
|
|
123
128
|
|
|
124
129
|
|
|
125
|
-
class OutputNewrelicEventsMode(str, Enum):
|
|
130
|
+
class OutputNewrelicEventsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
126
131
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
127
132
|
|
|
128
133
|
ERROR = "error"
|
|
@@ -243,7 +248,9 @@ class OutputNewrelicEvents(BaseModel):
|
|
|
243
248
|
streamtags: Optional[List[str]] = None
|
|
244
249
|
r"""Tags for filtering and grouping in @{product}"""
|
|
245
250
|
|
|
246
|
-
region:
|
|
251
|
+
region: Annotated[
|
|
252
|
+
Optional[OutputNewrelicEventsRegion], PlainValidator(validate_open_enum(False))
|
|
253
|
+
] = OutputNewrelicEventsRegion.US
|
|
247
254
|
r"""Which New Relic region endpoint to use."""
|
|
248
255
|
|
|
249
256
|
concurrency: Optional[float] = 5
|
|
@@ -290,7 +297,10 @@ class OutputNewrelicEvents(BaseModel):
|
|
|
290
297
|
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
|
|
291
298
|
|
|
292
299
|
failed_request_logging_mode: Annotated[
|
|
293
|
-
|
|
300
|
+
Annotated[
|
|
301
|
+
Optional[OutputNewrelicEventsFailedRequestLoggingMode],
|
|
302
|
+
PlainValidator(validate_open_enum(False)),
|
|
303
|
+
],
|
|
294
304
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
295
305
|
] = OutputNewrelicEventsFailedRequestLoggingMode.NONE
|
|
296
306
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -317,13 +327,19 @@ class OutputNewrelicEvents(BaseModel):
|
|
|
317
327
|
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
318
328
|
|
|
319
329
|
on_backpressure: Annotated[
|
|
320
|
-
|
|
330
|
+
Annotated[
|
|
331
|
+
Optional[OutputNewrelicEventsBackpressureBehavior],
|
|
332
|
+
PlainValidator(validate_open_enum(False)),
|
|
333
|
+
],
|
|
321
334
|
pydantic.Field(alias="onBackpressure"),
|
|
322
335
|
] = OutputNewrelicEventsBackpressureBehavior.BLOCK
|
|
323
336
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
324
337
|
|
|
325
338
|
auth_type: Annotated[
|
|
326
|
-
|
|
339
|
+
Annotated[
|
|
340
|
+
Optional[OutputNewrelicEventsAuthenticationMethod],
|
|
341
|
+
PlainValidator(validate_open_enum(False)),
|
|
342
|
+
],
|
|
327
343
|
pydantic.Field(alias="authType"),
|
|
328
344
|
] = OutputNewrelicEventsAuthenticationMethod.MANUAL
|
|
329
345
|
r"""Enter API key directly, or select a stored secret"""
|
|
@@ -346,18 +362,29 @@ class OutputNewrelicEvents(BaseModel):
|
|
|
346
362
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
347
363
|
|
|
348
364
|
pq_compress: Annotated[
|
|
349
|
-
|
|
365
|
+
Annotated[
|
|
366
|
+
Optional[OutputNewrelicEventsCompression],
|
|
367
|
+
PlainValidator(validate_open_enum(False)),
|
|
368
|
+
],
|
|
369
|
+
pydantic.Field(alias="pqCompress"),
|
|
350
370
|
] = OutputNewrelicEventsCompression.NONE
|
|
351
371
|
r"""Codec to use to compress the persisted data"""
|
|
352
372
|
|
|
353
373
|
pq_on_backpressure: Annotated[
|
|
354
|
-
|
|
374
|
+
Annotated[
|
|
375
|
+
Optional[OutputNewrelicEventsQueueFullBehavior],
|
|
376
|
+
PlainValidator(validate_open_enum(False)),
|
|
377
|
+
],
|
|
355
378
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
356
379
|
] = OutputNewrelicEventsQueueFullBehavior.BLOCK
|
|
357
380
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
358
381
|
|
|
359
382
|
pq_mode: Annotated[
|
|
360
|
-
|
|
383
|
+
Annotated[
|
|
384
|
+
Optional[OutputNewrelicEventsMode],
|
|
385
|
+
PlainValidator(validate_open_enum(False)),
|
|
386
|
+
],
|
|
387
|
+
pydantic.Field(alias="pqMode"),
|
|
361
388
|
] = OutputNewrelicEventsMode.ERROR
|
|
362
389
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
363
390
|
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
4
5
|
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
5
7
|
from enum import Enum
|
|
6
8
|
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
7
10
|
from typing import List, Optional
|
|
8
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
9
12
|
|
|
@@ -12,21 +15,21 @@ class OutputOpenTelemetryType(str, Enum):
|
|
|
12
15
|
OPEN_TELEMETRY = "open_telemetry"
|
|
13
16
|
|
|
14
17
|
|
|
15
|
-
class OutputOpenTelemetryProtocol(str, Enum):
|
|
18
|
+
class OutputOpenTelemetryProtocol(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
16
19
|
r"""Select a transport option for OpenTelemetry"""
|
|
17
20
|
|
|
18
21
|
GRPC = "grpc"
|
|
19
22
|
HTTP = "http"
|
|
20
23
|
|
|
21
24
|
|
|
22
|
-
class OutputOpenTelemetryOTLPVersion(str, Enum):
|
|
25
|
+
class OutputOpenTelemetryOTLPVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
23
26
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
24
27
|
|
|
25
28
|
ZERO_DOT_10_DOT_0 = "0.10.0"
|
|
26
29
|
ONE_DOT_3_DOT_1 = "1.3.1"
|
|
27
30
|
|
|
28
31
|
|
|
29
|
-
class OutputOpenTelemetryCompressCompression(str, Enum):
|
|
32
|
+
class OutputOpenTelemetryCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
33
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
31
34
|
|
|
32
35
|
NONE = "none"
|
|
@@ -34,14 +37,16 @@ class OutputOpenTelemetryCompressCompression(str, Enum):
|
|
|
34
37
|
GZIP = "gzip"
|
|
35
38
|
|
|
36
39
|
|
|
37
|
-
class OutputOpenTelemetryHTTPCompressCompression(
|
|
40
|
+
class OutputOpenTelemetryHTTPCompressCompression(
|
|
41
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
42
|
+
):
|
|
38
43
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
39
44
|
|
|
40
45
|
NONE = "none"
|
|
41
46
|
GZIP = "gzip"
|
|
42
47
|
|
|
43
48
|
|
|
44
|
-
class OutputOpenTelemetryAuthenticationType(str, Enum):
|
|
49
|
+
class OutputOpenTelemetryAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
45
50
|
r"""OpenTelemetry authentication type"""
|
|
46
51
|
|
|
47
52
|
NONE = "none"
|
|
@@ -63,7 +68,9 @@ class OutputOpenTelemetryMetadatum(BaseModel):
|
|
|
63
68
|
key: Optional[str] = ""
|
|
64
69
|
|
|
65
70
|
|
|
66
|
-
class OutputOpenTelemetryFailedRequestLoggingMode(
|
|
71
|
+
class OutputOpenTelemetryFailedRequestLoggingMode(
|
|
72
|
+
str, Enum, metaclass=utils.OpenEnumMeta
|
|
73
|
+
):
|
|
67
74
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
68
75
|
|
|
69
76
|
PAYLOAD = "payload"
|
|
@@ -71,7 +78,7 @@ class OutputOpenTelemetryFailedRequestLoggingMode(str, Enum):
|
|
|
71
78
|
NONE = "none"
|
|
72
79
|
|
|
73
80
|
|
|
74
|
-
class OutputOpenTelemetryBackpressureBehavior(str, Enum):
|
|
81
|
+
class OutputOpenTelemetryBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
75
82
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
76
83
|
|
|
77
84
|
BLOCK = "block"
|
|
@@ -174,14 +181,14 @@ class OutputOpenTelemetryTimeoutRetrySettings(BaseModel):
|
|
|
174
181
|
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
175
182
|
|
|
176
183
|
|
|
177
|
-
class OutputOpenTelemetryMinimumTLSVersion(str, Enum):
|
|
184
|
+
class OutputOpenTelemetryMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
178
185
|
TL_SV1 = "TLSv1"
|
|
179
186
|
TL_SV1_1 = "TLSv1.1"
|
|
180
187
|
TL_SV1_2 = "TLSv1.2"
|
|
181
188
|
TL_SV1_3 = "TLSv1.3"
|
|
182
189
|
|
|
183
190
|
|
|
184
|
-
class OutputOpenTelemetryMaximumTLSVersion(str, Enum):
|
|
191
|
+
class OutputOpenTelemetryMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
185
192
|
TL_SV1 = "TLSv1"
|
|
186
193
|
TL_SV1_1 = "TLSv1.1"
|
|
187
194
|
TL_SV1_2 = "TLSv1.2"
|
|
@@ -236,31 +243,37 @@ class OutputOpenTelemetryTLSSettingsClientSide(BaseModel):
|
|
|
236
243
|
r"""Passphrase to use to decrypt private key"""
|
|
237
244
|
|
|
238
245
|
min_version: Annotated[
|
|
239
|
-
|
|
246
|
+
Annotated[
|
|
247
|
+
Optional[OutputOpenTelemetryMinimumTLSVersion],
|
|
248
|
+
PlainValidator(validate_open_enum(False)),
|
|
249
|
+
],
|
|
240
250
|
pydantic.Field(alias="minVersion"),
|
|
241
251
|
] = None
|
|
242
252
|
|
|
243
253
|
max_version: Annotated[
|
|
244
|
-
|
|
254
|
+
Annotated[
|
|
255
|
+
Optional[OutputOpenTelemetryMaximumTLSVersion],
|
|
256
|
+
PlainValidator(validate_open_enum(False)),
|
|
257
|
+
],
|
|
245
258
|
pydantic.Field(alias="maxVersion"),
|
|
246
259
|
] = None
|
|
247
260
|
|
|
248
261
|
|
|
249
|
-
class OutputOpenTelemetryPqCompressCompression(str, Enum):
|
|
262
|
+
class OutputOpenTelemetryPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
250
263
|
r"""Codec to use to compress the persisted data"""
|
|
251
264
|
|
|
252
265
|
NONE = "none"
|
|
253
266
|
GZIP = "gzip"
|
|
254
267
|
|
|
255
268
|
|
|
256
|
-
class OutputOpenTelemetryQueueFullBehavior(str, Enum):
|
|
269
|
+
class OutputOpenTelemetryQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
257
270
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
258
271
|
|
|
259
272
|
BLOCK = "block"
|
|
260
273
|
DROP = "drop"
|
|
261
274
|
|
|
262
275
|
|
|
263
|
-
class OutputOpenTelemetryMode(str, Enum):
|
|
276
|
+
class OutputOpenTelemetryMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
264
277
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
265
278
|
|
|
266
279
|
ERROR = "error"
|
|
@@ -412,27 +425,40 @@ class OutputOpenTelemetry(BaseModel):
|
|
|
412
425
|
streamtags: Optional[List[str]] = None
|
|
413
426
|
r"""Tags for filtering and grouping in @{product}"""
|
|
414
427
|
|
|
415
|
-
protocol:
|
|
428
|
+
protocol: Annotated[
|
|
429
|
+
Optional[OutputOpenTelemetryProtocol], PlainValidator(validate_open_enum(False))
|
|
430
|
+
] = OutputOpenTelemetryProtocol.GRPC
|
|
416
431
|
r"""Select a transport option for OpenTelemetry"""
|
|
417
432
|
|
|
418
433
|
otlp_version: Annotated[
|
|
419
|
-
|
|
434
|
+
Annotated[
|
|
435
|
+
Optional[OutputOpenTelemetryOTLPVersion],
|
|
436
|
+
PlainValidator(validate_open_enum(False)),
|
|
437
|
+
],
|
|
438
|
+
pydantic.Field(alias="otlpVersion"),
|
|
420
439
|
] = OutputOpenTelemetryOTLPVersion.ZERO_DOT_10_DOT_0
|
|
421
440
|
r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
|
|
422
441
|
|
|
423
|
-
compress:
|
|
424
|
-
OutputOpenTelemetryCompressCompression
|
|
425
|
-
|
|
442
|
+
compress: Annotated[
|
|
443
|
+
Optional[OutputOpenTelemetryCompressCompression],
|
|
444
|
+
PlainValidator(validate_open_enum(False)),
|
|
445
|
+
] = OutputOpenTelemetryCompressCompression.GZIP
|
|
426
446
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
427
447
|
|
|
428
448
|
http_compress: Annotated[
|
|
429
|
-
|
|
449
|
+
Annotated[
|
|
450
|
+
Optional[OutputOpenTelemetryHTTPCompressCompression],
|
|
451
|
+
PlainValidator(validate_open_enum(False)),
|
|
452
|
+
],
|
|
430
453
|
pydantic.Field(alias="httpCompress"),
|
|
431
454
|
] = OutputOpenTelemetryHTTPCompressCompression.GZIP
|
|
432
455
|
r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
|
|
433
456
|
|
|
434
457
|
auth_type: Annotated[
|
|
435
|
-
|
|
458
|
+
Annotated[
|
|
459
|
+
Optional[OutputOpenTelemetryAuthenticationType],
|
|
460
|
+
PlainValidator(validate_open_enum(False)),
|
|
461
|
+
],
|
|
436
462
|
pydantic.Field(alias="authType"),
|
|
437
463
|
] = OutputOpenTelemetryAuthenticationType.NONE
|
|
438
464
|
r"""OpenTelemetry authentication type"""
|
|
@@ -472,7 +498,10 @@ class OutputOpenTelemetry(BaseModel):
|
|
|
472
498
|
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
473
499
|
|
|
474
500
|
failed_request_logging_mode: Annotated[
|
|
475
|
-
|
|
501
|
+
Annotated[
|
|
502
|
+
Optional[OutputOpenTelemetryFailedRequestLoggingMode],
|
|
503
|
+
PlainValidator(validate_open_enum(False)),
|
|
504
|
+
],
|
|
476
505
|
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
477
506
|
] = OutputOpenTelemetryFailedRequestLoggingMode.NONE
|
|
478
507
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
@@ -491,7 +520,10 @@ class OutputOpenTelemetry(BaseModel):
|
|
|
491
520
|
r"""Disable to close the connection immediately after sending the outgoing request"""
|
|
492
521
|
|
|
493
522
|
on_backpressure: Annotated[
|
|
494
|
-
|
|
523
|
+
Annotated[
|
|
524
|
+
Optional[OutputOpenTelemetryBackpressureBehavior],
|
|
525
|
+
PlainValidator(validate_open_enum(False)),
|
|
526
|
+
],
|
|
495
527
|
pydantic.Field(alias="onBackpressure"),
|
|
496
528
|
] = OutputOpenTelemetryBackpressureBehavior.BLOCK
|
|
497
529
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
@@ -607,19 +639,28 @@ class OutputOpenTelemetry(BaseModel):
|
|
|
607
639
|
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
608
640
|
|
|
609
641
|
pq_compress: Annotated[
|
|
610
|
-
|
|
642
|
+
Annotated[
|
|
643
|
+
Optional[OutputOpenTelemetryPqCompressCompression],
|
|
644
|
+
PlainValidator(validate_open_enum(False)),
|
|
645
|
+
],
|
|
611
646
|
pydantic.Field(alias="pqCompress"),
|
|
612
647
|
] = OutputOpenTelemetryPqCompressCompression.NONE
|
|
613
648
|
r"""Codec to use to compress the persisted data"""
|
|
614
649
|
|
|
615
650
|
pq_on_backpressure: Annotated[
|
|
616
|
-
|
|
651
|
+
Annotated[
|
|
652
|
+
Optional[OutputOpenTelemetryQueueFullBehavior],
|
|
653
|
+
PlainValidator(validate_open_enum(False)),
|
|
654
|
+
],
|
|
617
655
|
pydantic.Field(alias="pqOnBackpressure"),
|
|
618
656
|
] = OutputOpenTelemetryQueueFullBehavior.BLOCK
|
|
619
657
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
620
658
|
|
|
621
659
|
pq_mode: Annotated[
|
|
622
|
-
|
|
660
|
+
Annotated[
|
|
661
|
+
Optional[OutputOpenTelemetryMode], PlainValidator(validate_open_enum(False))
|
|
662
|
+
],
|
|
663
|
+
pydantic.Field(alias="pqMode"),
|
|
623
664
|
] = OutputOpenTelemetryMode.ERROR
|
|
624
665
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
625
666
|
|