cribl-control-plane 0.2.1rc7__py3-none-any.whl → 0.3.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/errors/__init__.py +5 -8
- cribl_control_plane/errors/{healthserverstatus_error.py → healthstatus_error.py} +9 -10
- cribl_control_plane/groups_sdk.py +28 -52
- cribl_control_plane/health.py +16 -22
- cribl_control_plane/models/__init__.py +54 -217
- cribl_control_plane/models/appmode.py +14 -0
- cribl_control_plane/models/authtoken.py +1 -5
- cribl_control_plane/models/cacheconnection.py +0 -20
- cribl_control_plane/models/configgroup.py +7 -55
- cribl_control_plane/models/configgroupcloud.py +1 -11
- cribl_control_plane/models/createconfiggroupbyproductop.py +5 -17
- cribl_control_plane/models/createroutesappendbyidop.py +2 -2
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/cribllakedataset.py +1 -11
- cribl_control_plane/models/cribllakedatasetupdate.py +1 -11
- cribl_control_plane/models/datasetmetadata.py +1 -11
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
- cribl_control_plane/models/distributedsummary.py +0 -6
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +0 -11
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +0 -11
- cribl_control_plane/models/hbcriblinfo.py +3 -24
- cribl_control_plane/models/{healthserverstatus.py → healthstatus.py} +8 -27
- cribl_control_plane/models/heartbeatmetadata.py +0 -3
- cribl_control_plane/models/input.py +78 -80
- cribl_control_plane/models/inputappscope.py +17 -80
- cribl_control_plane/models/inputazureblob.py +1 -33
- cribl_control_plane/models/inputcollection.py +1 -24
- cribl_control_plane/models/inputconfluentcloud.py +18 -195
- cribl_control_plane/models/inputcribl.py +1 -24
- cribl_control_plane/models/inputcriblhttp.py +17 -62
- cribl_control_plane/models/inputcribllakehttp.py +17 -62
- cribl_control_plane/models/inputcriblmetrics.py +1 -24
- cribl_control_plane/models/inputcribltcp.py +17 -62
- cribl_control_plane/models/inputcrowdstrike.py +1 -54
- cribl_control_plane/models/inputdatadogagent.py +17 -62
- cribl_control_plane/models/inputdatagen.py +1 -24
- cribl_control_plane/models/inputedgeprometheus.py +34 -147
- cribl_control_plane/models/inputelastic.py +27 -119
- cribl_control_plane/models/inputeventhub.py +1 -182
- cribl_control_plane/models/inputexec.py +1 -33
- cribl_control_plane/models/inputfile.py +3 -42
- cribl_control_plane/models/inputfirehose.py +17 -62
- cribl_control_plane/models/inputgooglepubsub.py +1 -36
- cribl_control_plane/models/inputgrafana.py +32 -157
- cribl_control_plane/models/inputhttp.py +17 -62
- cribl_control_plane/models/inputhttpraw.py +17 -62
- cribl_control_plane/models/inputjournalfiles.py +1 -24
- cribl_control_plane/models/inputkafka.py +17 -189
- cribl_control_plane/models/inputkinesis.py +1 -80
- cribl_control_plane/models/inputkubeevents.py +1 -24
- cribl_control_plane/models/inputkubelogs.py +1 -33
- cribl_control_plane/models/inputkubemetrics.py +1 -33
- cribl_control_plane/models/inputloki.py +17 -71
- cribl_control_plane/models/inputmetrics.py +17 -62
- cribl_control_plane/models/inputmodeldriventelemetry.py +17 -62
- cribl_control_plane/models/inputmsk.py +18 -81
- cribl_control_plane/models/inputnetflow.py +1 -24
- cribl_control_plane/models/inputoffice365mgmt.py +1 -67
- cribl_control_plane/models/inputoffice365msgtrace.py +1 -67
- cribl_control_plane/models/inputoffice365service.py +1 -67
- cribl_control_plane/models/inputopentelemetry.py +16 -92
- cribl_control_plane/models/inputprometheus.py +34 -138
- cribl_control_plane/models/inputprometheusrw.py +17 -71
- cribl_control_plane/models/inputrawudp.py +1 -24
- cribl_control_plane/models/inputs3.py +1 -45
- cribl_control_plane/models/inputs3inventory.py +1 -54
- cribl_control_plane/models/inputsecuritylake.py +1 -54
- cribl_control_plane/models/inputsnmp.py +1 -40
- cribl_control_plane/models/inputsplunk.py +17 -85
- cribl_control_plane/models/inputsplunkhec.py +16 -70
- cribl_control_plane/models/inputsplunksearch.py +1 -63
- cribl_control_plane/models/inputsqs.py +1 -56
- cribl_control_plane/models/inputsyslog.py +32 -121
- cribl_control_plane/models/inputsystemmetrics.py +9 -142
- cribl_control_plane/models/inputsystemstate.py +1 -33
- cribl_control_plane/models/inputtcp.py +17 -81
- cribl_control_plane/models/inputtcpjson.py +17 -71
- cribl_control_plane/models/inputwef.py +1 -71
- cribl_control_plane/models/inputwindowsmetrics.py +9 -129
- cribl_control_plane/models/inputwineventlogs.py +1 -60
- cribl_control_plane/models/inputwiz.py +1 -45
- cribl_control_plane/models/inputwizwebhook.py +17 -62
- cribl_control_plane/models/inputzscalerhec.py +16 -70
- cribl_control_plane/models/jobinfo.py +1 -4
- cribl_control_plane/models/jobstatus.py +3 -34
- cribl_control_plane/models/listconfiggroupbyproductop.py +0 -11
- cribl_control_plane/models/logininfo.py +3 -3
- cribl_control_plane/models/masterworkerentry.py +1 -11
- cribl_control_plane/models/nodeprovidedinfo.py +1 -11
- cribl_control_plane/models/nodeupgradestatus.py +0 -38
- cribl_control_plane/models/output.py +88 -93
- cribl_control_plane/models/outputazureblob.py +1 -110
- cribl_control_plane/models/outputazuredataexplorer.py +87 -452
- cribl_control_plane/models/outputazureeventhub.py +19 -281
- cribl_control_plane/models/outputazurelogs.py +19 -115
- cribl_control_plane/models/outputchronicle.py +19 -115
- cribl_control_plane/models/outputclickhouse.py +19 -155
- cribl_control_plane/models/outputcloudwatch.py +19 -106
- cribl_control_plane/models/outputconfluentcloud.py +38 -311
- cribl_control_plane/models/outputcriblhttp.py +19 -135
- cribl_control_plane/models/outputcribllake.py +1 -97
- cribl_control_plane/models/outputcribltcp.py +19 -132
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +20 -129
- cribl_control_plane/models/outputdatadog.py +19 -159
- cribl_control_plane/models/outputdataset.py +19 -143
- cribl_control_plane/models/outputdiskspool.py +1 -11
- cribl_control_plane/models/outputdls3.py +1 -152
- cribl_control_plane/models/outputdynatracehttp.py +19 -160
- cribl_control_plane/models/outputdynatraceotlp.py +19 -160
- cribl_control_plane/models/outputelastic.py +19 -163
- cribl_control_plane/models/outputelasticcloud.py +19 -140
- cribl_control_plane/models/outputexabeam.py +1 -61
- cribl_control_plane/models/outputfilesystem.py +1 -87
- cribl_control_plane/models/outputgooglechronicle.py +20 -166
- cribl_control_plane/models/outputgooglecloudlogging.py +20 -131
- cribl_control_plane/models/outputgooglecloudstorage.py +1 -136
- cribl_control_plane/models/outputgooglepubsub.py +19 -106
- cribl_control_plane/models/outputgrafanacloud.py +37 -288
- cribl_control_plane/models/outputgraphite.py +19 -105
- cribl_control_plane/models/outputhoneycomb.py +19 -115
- cribl_control_plane/models/outputhumiohec.py +19 -126
- cribl_control_plane/models/outputinfluxdb.py +19 -130
- cribl_control_plane/models/outputkafka.py +34 -302
- cribl_control_plane/models/outputkinesis.py +19 -133
- cribl_control_plane/models/outputloki.py +17 -129
- cribl_control_plane/models/outputminio.py +1 -145
- cribl_control_plane/models/outputmsk.py +34 -193
- cribl_control_plane/models/outputnewrelic.py +19 -136
- cribl_control_plane/models/outputnewrelicevents.py +20 -128
- cribl_control_plane/models/outputopentelemetry.py +19 -178
- cribl_control_plane/models/outputprometheus.py +19 -115
- cribl_control_plane/models/outputring.py +1 -31
- cribl_control_plane/models/outputs3.py +1 -152
- cribl_control_plane/models/outputsecuritylake.py +1 -114
- cribl_control_plane/models/outputsentinel.py +19 -135
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -134
- cribl_control_plane/models/outputservicenow.py +19 -168
- cribl_control_plane/models/outputsignalfx.py +19 -115
- cribl_control_plane/models/outputsns.py +17 -113
- cribl_control_plane/models/outputsplunk.py +19 -153
- cribl_control_plane/models/outputsplunkhec.py +19 -208
- cribl_control_plane/models/outputsplunklb.py +19 -182
- cribl_control_plane/models/outputsqs.py +17 -124
- cribl_control_plane/models/outputstatsd.py +19 -105
- cribl_control_plane/models/outputstatsdext.py +19 -105
- cribl_control_plane/models/outputsumologic.py +19 -117
- cribl_control_plane/models/outputsyslog.py +96 -259
- cribl_control_plane/models/outputtcpjson.py +19 -141
- cribl_control_plane/models/outputwavefront.py +19 -115
- cribl_control_plane/models/outputwebhook.py +19 -161
- cribl_control_plane/models/outputxsiam.py +17 -113
- cribl_control_plane/models/packinfo.py +5 -8
- cribl_control_plane/models/packinstallinfo.py +5 -8
- cribl_control_plane/models/resourcepolicy.py +0 -11
- cribl_control_plane/models/{uploadpackresponse.py → routecloneconf.py} +4 -4
- cribl_control_plane/models/routeconf.py +4 -3
- cribl_control_plane/models/runnablejobcollection.py +9 -72
- cribl_control_plane/models/runnablejobexecutor.py +9 -32
- cribl_control_plane/models/runnablejobscheduledsearch.py +9 -23
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +0 -11
- cribl_control_plane/packs.py +7 -202
- cribl_control_plane/routes_sdk.py +6 -6
- cribl_control_plane/tokens.py +15 -23
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/METADATA +9 -50
- cribl_control_plane-0.3.0a1.dist-info/RECORD +330 -0
- cribl_control_plane/models/groupcreaterequest.py +0 -171
- cribl_control_plane/models/outpostnodeinfo.py +0 -16
- cribl_control_plane/models/outputdatabricks.py +0 -482
- cribl_control_plane/models/updatepacksop.py +0 -25
- cribl_control_plane-0.2.1rc7.dist-info/RECORD +0 -331
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -19,11 +18,8 @@ class OutputNewrelicType(str, Enum):
|
|
|
19
18
|
class OutputNewrelicRegion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
20
19
|
r"""Which New Relic region endpoint to use."""
|
|
21
20
|
|
|
22
|
-
# US
|
|
23
21
|
US = "US"
|
|
24
|
-
# Europe
|
|
25
22
|
EU = "EU"
|
|
26
|
-
# Custom
|
|
27
23
|
CUSTOM = "Custom"
|
|
28
24
|
|
|
29
25
|
|
|
@@ -46,15 +42,6 @@ class OutputNewrelicMetadatum(BaseModel):
|
|
|
46
42
|
value: str
|
|
47
43
|
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
48
44
|
|
|
49
|
-
@field_serializer("name")
|
|
50
|
-
def serialize_name(self, value):
|
|
51
|
-
if isinstance(value, str):
|
|
52
|
-
try:
|
|
53
|
-
return models.FieldName(value)
|
|
54
|
-
except ValueError:
|
|
55
|
-
return value
|
|
56
|
-
return value
|
|
57
|
-
|
|
58
45
|
|
|
59
46
|
class OutputNewrelicExtraHTTPHeaderTypedDict(TypedDict):
|
|
60
47
|
value: str
|
|
@@ -70,11 +57,8 @@ class OutputNewrelicExtraHTTPHeader(BaseModel):
|
|
|
70
57
|
class OutputNewrelicFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
71
58
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
72
59
|
|
|
73
|
-
# Payload
|
|
74
60
|
PAYLOAD = "payload"
|
|
75
|
-
# Payload + Headers
|
|
76
61
|
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
77
|
-
# None
|
|
78
62
|
NONE = "none"
|
|
79
63
|
|
|
80
64
|
|
|
@@ -135,11 +119,8 @@ class OutputNewrelicTimeoutRetrySettings(BaseModel):
|
|
|
135
119
|
class OutputNewrelicBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
136
120
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
137
121
|
|
|
138
|
-
# Block
|
|
139
122
|
BLOCK = "block"
|
|
140
|
-
# Drop
|
|
141
123
|
DROP = "drop"
|
|
142
|
-
# Persistent Queue
|
|
143
124
|
QUEUE = "queue"
|
|
144
125
|
|
|
145
126
|
|
|
@@ -150,35 +131,28 @@ class OutputNewrelicAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta
|
|
|
150
131
|
SECRET = "secret"
|
|
151
132
|
|
|
152
133
|
|
|
153
|
-
class OutputNewrelicMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
154
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
155
|
-
|
|
156
|
-
# Error
|
|
157
|
-
ERROR = "error"
|
|
158
|
-
# Backpressure
|
|
159
|
-
ALWAYS = "always"
|
|
160
|
-
# Always On
|
|
161
|
-
BACKPRESSURE = "backpressure"
|
|
162
|
-
|
|
163
|
-
|
|
164
134
|
class OutputNewrelicCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
165
135
|
r"""Codec to use to compress the persisted data"""
|
|
166
136
|
|
|
167
|
-
# None
|
|
168
137
|
NONE = "none"
|
|
169
|
-
# Gzip
|
|
170
138
|
GZIP = "gzip"
|
|
171
139
|
|
|
172
140
|
|
|
173
141
|
class OutputNewrelicQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
174
142
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
175
143
|
|
|
176
|
-
# Block
|
|
177
144
|
BLOCK = "block"
|
|
178
|
-
# Drop new data
|
|
179
145
|
DROP = "drop"
|
|
180
146
|
|
|
181
147
|
|
|
148
|
+
class OutputNewrelicMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
149
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
150
|
+
|
|
151
|
+
ERROR = "error"
|
|
152
|
+
BACKPRESSURE = "backpressure"
|
|
153
|
+
ALWAYS = "always"
|
|
154
|
+
|
|
155
|
+
|
|
182
156
|
class OutputNewrelicPqControlsTypedDict(TypedDict):
|
|
183
157
|
pass
|
|
184
158
|
|
|
@@ -247,16 +221,6 @@ class OutputNewrelicTypedDict(TypedDict):
|
|
|
247
221
|
r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
|
|
248
222
|
description: NotRequired[str]
|
|
249
223
|
custom_url: NotRequired[str]
|
|
250
|
-
pq_strict_ordering: NotRequired[bool]
|
|
251
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
252
|
-
pq_rate_per_sec: NotRequired[float]
|
|
253
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
254
|
-
pq_mode: NotRequired[OutputNewrelicMode]
|
|
255
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
256
|
-
pq_max_buffer_size: NotRequired[float]
|
|
257
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
258
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
259
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
260
224
|
pq_max_file_size: NotRequired[str]
|
|
261
225
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
262
226
|
pq_max_size: NotRequired[str]
|
|
@@ -267,6 +231,8 @@ class OutputNewrelicTypedDict(TypedDict):
|
|
|
267
231
|
r"""Codec to use to compress the persisted data"""
|
|
268
232
|
pq_on_backpressure: NotRequired[OutputNewrelicQueueFullBehavior]
|
|
269
233
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
234
|
+
pq_mode: NotRequired[OutputNewrelicMode]
|
|
235
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
270
236
|
pq_controls: NotRequired[OutputNewrelicPqControlsTypedDict]
|
|
271
237
|
api_key: NotRequired[str]
|
|
272
238
|
r"""New Relic API key. Can be overridden using __newRelic_apiKey field."""
|
|
@@ -408,34 +374,6 @@ class OutputNewrelic(BaseModel):
|
|
|
408
374
|
|
|
409
375
|
custom_url: Annotated[Optional[str], pydantic.Field(alias="customUrl")] = None
|
|
410
376
|
|
|
411
|
-
pq_strict_ordering: Annotated[
|
|
412
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
413
|
-
] = True
|
|
414
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
415
|
-
|
|
416
|
-
pq_rate_per_sec: Annotated[
|
|
417
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
418
|
-
] = 0
|
|
419
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
420
|
-
|
|
421
|
-
pq_mode: Annotated[
|
|
422
|
-
Annotated[
|
|
423
|
-
Optional[OutputNewrelicMode], PlainValidator(validate_open_enum(False))
|
|
424
|
-
],
|
|
425
|
-
pydantic.Field(alias="pqMode"),
|
|
426
|
-
] = OutputNewrelicMode.ERROR
|
|
427
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
428
|
-
|
|
429
|
-
pq_max_buffer_size: Annotated[
|
|
430
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
431
|
-
] = 42
|
|
432
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
433
|
-
|
|
434
|
-
pq_max_backpressure_sec: Annotated[
|
|
435
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
436
|
-
] = 30
|
|
437
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
438
|
-
|
|
439
377
|
pq_max_file_size: Annotated[
|
|
440
378
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
441
379
|
] = "1 MB"
|
|
@@ -467,6 +405,14 @@ class OutputNewrelic(BaseModel):
|
|
|
467
405
|
] = OutputNewrelicQueueFullBehavior.BLOCK
|
|
468
406
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
469
407
|
|
|
408
|
+
pq_mode: Annotated[
|
|
409
|
+
Annotated[
|
|
410
|
+
Optional[OutputNewrelicMode], PlainValidator(validate_open_enum(False))
|
|
411
|
+
],
|
|
412
|
+
pydantic.Field(alias="pqMode"),
|
|
413
|
+
] = OutputNewrelicMode.ERROR
|
|
414
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
415
|
+
|
|
470
416
|
pq_controls: Annotated[
|
|
471
417
|
Optional[OutputNewrelicPqControls], pydantic.Field(alias="pqControls")
|
|
472
418
|
] = None
|
|
@@ -476,66 +422,3 @@ class OutputNewrelic(BaseModel):
|
|
|
476
422
|
|
|
477
423
|
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
478
424
|
r"""Select or create a stored text secret"""
|
|
479
|
-
|
|
480
|
-
@field_serializer("region")
|
|
481
|
-
def serialize_region(self, value):
|
|
482
|
-
if isinstance(value, str):
|
|
483
|
-
try:
|
|
484
|
-
return models.OutputNewrelicRegion(value)
|
|
485
|
-
except ValueError:
|
|
486
|
-
return value
|
|
487
|
-
return value
|
|
488
|
-
|
|
489
|
-
@field_serializer("failed_request_logging_mode")
|
|
490
|
-
def serialize_failed_request_logging_mode(self, value):
|
|
491
|
-
if isinstance(value, str):
|
|
492
|
-
try:
|
|
493
|
-
return models.OutputNewrelicFailedRequestLoggingMode(value)
|
|
494
|
-
except ValueError:
|
|
495
|
-
return value
|
|
496
|
-
return value
|
|
497
|
-
|
|
498
|
-
@field_serializer("on_backpressure")
|
|
499
|
-
def serialize_on_backpressure(self, value):
|
|
500
|
-
if isinstance(value, str):
|
|
501
|
-
try:
|
|
502
|
-
return models.OutputNewrelicBackpressureBehavior(value)
|
|
503
|
-
except ValueError:
|
|
504
|
-
return value
|
|
505
|
-
return value
|
|
506
|
-
|
|
507
|
-
@field_serializer("auth_type")
|
|
508
|
-
def serialize_auth_type(self, value):
|
|
509
|
-
if isinstance(value, str):
|
|
510
|
-
try:
|
|
511
|
-
return models.OutputNewrelicAuthenticationMethod(value)
|
|
512
|
-
except ValueError:
|
|
513
|
-
return value
|
|
514
|
-
return value
|
|
515
|
-
|
|
516
|
-
@field_serializer("pq_mode")
|
|
517
|
-
def serialize_pq_mode(self, value):
|
|
518
|
-
if isinstance(value, str):
|
|
519
|
-
try:
|
|
520
|
-
return models.OutputNewrelicMode(value)
|
|
521
|
-
except ValueError:
|
|
522
|
-
return value
|
|
523
|
-
return value
|
|
524
|
-
|
|
525
|
-
@field_serializer("pq_compress")
|
|
526
|
-
def serialize_pq_compress(self, value):
|
|
527
|
-
if isinstance(value, str):
|
|
528
|
-
try:
|
|
529
|
-
return models.OutputNewrelicCompression(value)
|
|
530
|
-
except ValueError:
|
|
531
|
-
return value
|
|
532
|
-
return value
|
|
533
|
-
|
|
534
|
-
@field_serializer("pq_on_backpressure")
|
|
535
|
-
def serialize_pq_on_backpressure(self, value):
|
|
536
|
-
if isinstance(value, str):
|
|
537
|
-
try:
|
|
538
|
-
return models.OutputNewrelicQueueFullBehavior(value)
|
|
539
|
-
except ValueError:
|
|
540
|
-
return value
|
|
541
|
-
return value
|
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -19,11 +18,8 @@ class OutputNewrelicEventsType(str, Enum):
|
|
|
19
18
|
class OutputNewrelicEventsRegion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
20
19
|
r"""Which New Relic region endpoint to use."""
|
|
21
20
|
|
|
22
|
-
# US
|
|
23
21
|
US = "US"
|
|
24
|
-
# Europe
|
|
25
22
|
EU = "EU"
|
|
26
|
-
# Custom
|
|
27
23
|
CUSTOM = "Custom"
|
|
28
24
|
|
|
29
25
|
|
|
@@ -43,11 +39,8 @@ class OutputNewrelicEventsFailedRequestLoggingMode(
|
|
|
43
39
|
):
|
|
44
40
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
45
41
|
|
|
46
|
-
# Payload
|
|
47
42
|
PAYLOAD = "payload"
|
|
48
|
-
# Payload + Headers
|
|
49
43
|
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
50
|
-
# None
|
|
51
44
|
NONE = "none"
|
|
52
45
|
|
|
53
46
|
|
|
@@ -108,11 +101,8 @@ class OutputNewrelicEventsTimeoutRetrySettings(BaseModel):
|
|
|
108
101
|
class OutputNewrelicEventsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
109
102
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
110
103
|
|
|
111
|
-
# Block
|
|
112
104
|
BLOCK = "block"
|
|
113
|
-
# Drop
|
|
114
105
|
DROP = "drop"
|
|
115
|
-
# Persistent Queue
|
|
116
106
|
QUEUE = "queue"
|
|
117
107
|
|
|
118
108
|
|
|
@@ -123,35 +113,28 @@ class OutputNewrelicEventsAuthenticationMethod(str, Enum, metaclass=utils.OpenEn
|
|
|
123
113
|
SECRET = "secret"
|
|
124
114
|
|
|
125
115
|
|
|
126
|
-
class OutputNewrelicEventsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
127
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
128
|
-
|
|
129
|
-
# Error
|
|
130
|
-
ERROR = "error"
|
|
131
|
-
# Backpressure
|
|
132
|
-
ALWAYS = "always"
|
|
133
|
-
# Always On
|
|
134
|
-
BACKPRESSURE = "backpressure"
|
|
135
|
-
|
|
136
|
-
|
|
137
116
|
class OutputNewrelicEventsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
138
117
|
r"""Codec to use to compress the persisted data"""
|
|
139
118
|
|
|
140
|
-
# None
|
|
141
119
|
NONE = "none"
|
|
142
|
-
# Gzip
|
|
143
120
|
GZIP = "gzip"
|
|
144
121
|
|
|
145
122
|
|
|
146
123
|
class OutputNewrelicEventsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
147
124
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
148
125
|
|
|
149
|
-
# Block
|
|
150
126
|
BLOCK = "block"
|
|
151
|
-
# Drop new data
|
|
152
127
|
DROP = "drop"
|
|
153
128
|
|
|
154
129
|
|
|
130
|
+
class OutputNewrelicEventsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
131
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
132
|
+
|
|
133
|
+
ERROR = "error"
|
|
134
|
+
BACKPRESSURE = "backpressure"
|
|
135
|
+
ALWAYS = "always"
|
|
136
|
+
|
|
137
|
+
|
|
155
138
|
class OutputNewrelicEventsPqControlsTypedDict(TypedDict):
|
|
156
139
|
pass
|
|
157
140
|
|
|
@@ -220,16 +203,6 @@ class OutputNewrelicEventsTypedDict(TypedDict):
|
|
|
220
203
|
r"""Enter API key directly, or select a stored secret"""
|
|
221
204
|
description: NotRequired[str]
|
|
222
205
|
custom_url: NotRequired[str]
|
|
223
|
-
pq_strict_ordering: NotRequired[bool]
|
|
224
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
225
|
-
pq_rate_per_sec: NotRequired[float]
|
|
226
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
227
|
-
pq_mode: NotRequired[OutputNewrelicEventsMode]
|
|
228
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
229
|
-
pq_max_buffer_size: NotRequired[float]
|
|
230
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
231
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
232
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
233
206
|
pq_max_file_size: NotRequired[str]
|
|
234
207
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
235
208
|
pq_max_size: NotRequired[str]
|
|
@@ -240,6 +213,8 @@ class OutputNewrelicEventsTypedDict(TypedDict):
|
|
|
240
213
|
r"""Codec to use to compress the persisted data"""
|
|
241
214
|
pq_on_backpressure: NotRequired[OutputNewrelicEventsQueueFullBehavior]
|
|
242
215
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
216
|
+
pq_mode: NotRequired[OutputNewrelicEventsMode]
|
|
217
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
243
218
|
pq_controls: NotRequired[OutputNewrelicEventsPqControlsTypedDict]
|
|
244
219
|
api_key: NotRequired[str]
|
|
245
220
|
r"""New Relic API key. Can be overridden using __newRelic_apiKey field."""
|
|
@@ -373,35 +348,6 @@ class OutputNewrelicEvents(BaseModel):
|
|
|
373
348
|
|
|
374
349
|
custom_url: Annotated[Optional[str], pydantic.Field(alias="customUrl")] = None
|
|
375
350
|
|
|
376
|
-
pq_strict_ordering: Annotated[
|
|
377
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
378
|
-
] = True
|
|
379
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
380
|
-
|
|
381
|
-
pq_rate_per_sec: Annotated[
|
|
382
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
383
|
-
] = 0
|
|
384
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
385
|
-
|
|
386
|
-
pq_mode: Annotated[
|
|
387
|
-
Annotated[
|
|
388
|
-
Optional[OutputNewrelicEventsMode],
|
|
389
|
-
PlainValidator(validate_open_enum(False)),
|
|
390
|
-
],
|
|
391
|
-
pydantic.Field(alias="pqMode"),
|
|
392
|
-
] = OutputNewrelicEventsMode.ERROR
|
|
393
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
394
|
-
|
|
395
|
-
pq_max_buffer_size: Annotated[
|
|
396
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
397
|
-
] = 42
|
|
398
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
399
|
-
|
|
400
|
-
pq_max_backpressure_sec: Annotated[
|
|
401
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
402
|
-
] = 30
|
|
403
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
404
|
-
|
|
405
351
|
pq_max_file_size: Annotated[
|
|
406
352
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
407
353
|
] = "1 MB"
|
|
@@ -433,6 +379,15 @@ class OutputNewrelicEvents(BaseModel):
|
|
|
433
379
|
] = OutputNewrelicEventsQueueFullBehavior.BLOCK
|
|
434
380
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
435
381
|
|
|
382
|
+
pq_mode: Annotated[
|
|
383
|
+
Annotated[
|
|
384
|
+
Optional[OutputNewrelicEventsMode],
|
|
385
|
+
PlainValidator(validate_open_enum(False)),
|
|
386
|
+
],
|
|
387
|
+
pydantic.Field(alias="pqMode"),
|
|
388
|
+
] = OutputNewrelicEventsMode.ERROR
|
|
389
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
390
|
+
|
|
436
391
|
pq_controls: Annotated[
|
|
437
392
|
Optional[OutputNewrelicEventsPqControls], pydantic.Field(alias="pqControls")
|
|
438
393
|
] = None
|
|
@@ -442,66 +397,3 @@ class OutputNewrelicEvents(BaseModel):
|
|
|
442
397
|
|
|
443
398
|
text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
|
|
444
399
|
r"""Select or create a stored text secret"""
|
|
445
|
-
|
|
446
|
-
@field_serializer("region")
|
|
447
|
-
def serialize_region(self, value):
|
|
448
|
-
if isinstance(value, str):
|
|
449
|
-
try:
|
|
450
|
-
return models.OutputNewrelicEventsRegion(value)
|
|
451
|
-
except ValueError:
|
|
452
|
-
return value
|
|
453
|
-
return value
|
|
454
|
-
|
|
455
|
-
@field_serializer("failed_request_logging_mode")
|
|
456
|
-
def serialize_failed_request_logging_mode(self, value):
|
|
457
|
-
if isinstance(value, str):
|
|
458
|
-
try:
|
|
459
|
-
return models.OutputNewrelicEventsFailedRequestLoggingMode(value)
|
|
460
|
-
except ValueError:
|
|
461
|
-
return value
|
|
462
|
-
return value
|
|
463
|
-
|
|
464
|
-
@field_serializer("on_backpressure")
|
|
465
|
-
def serialize_on_backpressure(self, value):
|
|
466
|
-
if isinstance(value, str):
|
|
467
|
-
try:
|
|
468
|
-
return models.OutputNewrelicEventsBackpressureBehavior(value)
|
|
469
|
-
except ValueError:
|
|
470
|
-
return value
|
|
471
|
-
return value
|
|
472
|
-
|
|
473
|
-
@field_serializer("auth_type")
|
|
474
|
-
def serialize_auth_type(self, value):
|
|
475
|
-
if isinstance(value, str):
|
|
476
|
-
try:
|
|
477
|
-
return models.OutputNewrelicEventsAuthenticationMethod(value)
|
|
478
|
-
except ValueError:
|
|
479
|
-
return value
|
|
480
|
-
return value
|
|
481
|
-
|
|
482
|
-
@field_serializer("pq_mode")
|
|
483
|
-
def serialize_pq_mode(self, value):
|
|
484
|
-
if isinstance(value, str):
|
|
485
|
-
try:
|
|
486
|
-
return models.OutputNewrelicEventsMode(value)
|
|
487
|
-
except ValueError:
|
|
488
|
-
return value
|
|
489
|
-
return value
|
|
490
|
-
|
|
491
|
-
@field_serializer("pq_compress")
|
|
492
|
-
def serialize_pq_compress(self, value):
|
|
493
|
-
if isinstance(value, str):
|
|
494
|
-
try:
|
|
495
|
-
return models.OutputNewrelicEventsCompression(value)
|
|
496
|
-
except ValueError:
|
|
497
|
-
return value
|
|
498
|
-
return value
|
|
499
|
-
|
|
500
|
-
@field_serializer("pq_on_backpressure")
|
|
501
|
-
def serialize_pq_on_backpressure(self, value):
|
|
502
|
-
if isinstance(value, str):
|
|
503
|
-
try:
|
|
504
|
-
return models.OutputNewrelicEventsQueueFullBehavior(value)
|
|
505
|
-
except ValueError:
|
|
506
|
-
return value
|
|
507
|
-
return value
|