cribl-control-plane 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/__init__.py +17 -0
- cribl_control_plane/_hooks/__init__.py +5 -0
- cribl_control_plane/_hooks/clientcredentials.py +211 -0
- cribl_control_plane/_hooks/registration.py +13 -0
- cribl_control_plane/_hooks/sdkhooks.py +81 -0
- cribl_control_plane/_hooks/types.py +112 -0
- cribl_control_plane/_version.py +15 -0
- cribl_control_plane/auth_sdk.py +184 -0
- cribl_control_plane/basesdk.py +358 -0
- cribl_control_plane/errors/__init__.py +60 -0
- cribl_control_plane/errors/apierror.py +38 -0
- cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
- cribl_control_plane/errors/error.py +24 -0
- cribl_control_plane/errors/healthstatus_error.py +38 -0
- cribl_control_plane/errors/no_response_error.py +13 -0
- cribl_control_plane/errors/responsevalidationerror.py +25 -0
- cribl_control_plane/health.py +166 -0
- cribl_control_plane/httpclient.py +126 -0
- cribl_control_plane/models/__init__.py +7305 -0
- cribl_control_plane/models/addhectokenrequest.py +34 -0
- cribl_control_plane/models/authtoken.py +13 -0
- cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
- cribl_control_plane/models/createinputop.py +24 -0
- cribl_control_plane/models/createoutputop.py +24 -0
- cribl_control_plane/models/createoutputtestbyidop.py +46 -0
- cribl_control_plane/models/criblevent.py +14 -0
- cribl_control_plane/models/deleteinputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputbyidop.py +37 -0
- cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getinputbyidop.py +37 -0
- cribl_control_plane/models/getoutputbyidop.py +37 -0
- cribl_control_plane/models/getoutputpqbyidop.py +36 -0
- cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
- cribl_control_plane/models/healthstatus.py +36 -0
- cribl_control_plane/models/input.py +199 -0
- cribl_control_plane/models/inputappscope.py +448 -0
- cribl_control_plane/models/inputazureblob.py +308 -0
- cribl_control_plane/models/inputcollection.py +208 -0
- cribl_control_plane/models/inputconfluentcloud.py +585 -0
- cribl_control_plane/models/inputcribl.py +165 -0
- cribl_control_plane/models/inputcriblhttp.py +341 -0
- cribl_control_plane/models/inputcribllakehttp.py +342 -0
- cribl_control_plane/models/inputcriblmetrics.py +175 -0
- cribl_control_plane/models/inputcribltcp.py +299 -0
- cribl_control_plane/models/inputcrowdstrike.py +410 -0
- cribl_control_plane/models/inputdatadogagent.py +364 -0
- cribl_control_plane/models/inputdatagen.py +180 -0
- cribl_control_plane/models/inputedgeprometheus.py +551 -0
- cribl_control_plane/models/inputelastic.py +494 -0
- cribl_control_plane/models/inputeventhub.py +360 -0
- cribl_control_plane/models/inputexec.py +213 -0
- cribl_control_plane/models/inputfile.py +259 -0
- cribl_control_plane/models/inputfirehose.py +341 -0
- cribl_control_plane/models/inputgooglepubsub.py +247 -0
- cribl_control_plane/models/inputgrafana_union.py +1247 -0
- cribl_control_plane/models/inputhttp.py +403 -0
- cribl_control_plane/models/inputhttpraw.py +407 -0
- cribl_control_plane/models/inputjournalfiles.py +208 -0
- cribl_control_plane/models/inputkafka.py +581 -0
- cribl_control_plane/models/inputkinesis.py +363 -0
- cribl_control_plane/models/inputkubeevents.py +182 -0
- cribl_control_plane/models/inputkubelogs.py +256 -0
- cribl_control_plane/models/inputkubemetrics.py +233 -0
- cribl_control_plane/models/inputloki.py +468 -0
- cribl_control_plane/models/inputmetrics.py +290 -0
- cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
- cribl_control_plane/models/inputmsk.py +654 -0
- cribl_control_plane/models/inputnetflow.py +224 -0
- cribl_control_plane/models/inputoffice365mgmt.py +384 -0
- cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
- cribl_control_plane/models/inputoffice365service.py +377 -0
- cribl_control_plane/models/inputopentelemetry.py +516 -0
- cribl_control_plane/models/inputprometheus.py +464 -0
- cribl_control_plane/models/inputprometheusrw.py +470 -0
- cribl_control_plane/models/inputrawudp.py +207 -0
- cribl_control_plane/models/inputs3.py +416 -0
- cribl_control_plane/models/inputs3inventory.py +440 -0
- cribl_control_plane/models/inputsecuritylake.py +425 -0
- cribl_control_plane/models/inputsnmp.py +274 -0
- cribl_control_plane/models/inputsplunk.py +387 -0
- cribl_control_plane/models/inputsplunkhec.py +478 -0
- cribl_control_plane/models/inputsplunksearch.py +537 -0
- cribl_control_plane/models/inputsqs.py +320 -0
- cribl_control_plane/models/inputsyslog_union.py +759 -0
- cribl_control_plane/models/inputsystemmetrics.py +533 -0
- cribl_control_plane/models/inputsystemstate.py +417 -0
- cribl_control_plane/models/inputtcp.py +359 -0
- cribl_control_plane/models/inputtcpjson.py +334 -0
- cribl_control_plane/models/inputwef.py +498 -0
- cribl_control_plane/models/inputwindowsmetrics.py +457 -0
- cribl_control_plane/models/inputwineventlogs.py +222 -0
- cribl_control_plane/models/inputwiz.py +334 -0
- cribl_control_plane/models/inputzscalerhec.py +439 -0
- cribl_control_plane/models/listinputop.py +24 -0
- cribl_control_plane/models/listoutputop.py +24 -0
- cribl_control_plane/models/logininfo.py +16 -0
- cribl_control_plane/models/output.py +229 -0
- cribl_control_plane/models/outputazureblob.py +471 -0
- cribl_control_plane/models/outputazuredataexplorer.py +660 -0
- cribl_control_plane/models/outputazureeventhub.py +321 -0
- cribl_control_plane/models/outputazurelogs.py +386 -0
- cribl_control_plane/models/outputclickhouse.py +650 -0
- cribl_control_plane/models/outputcloudwatch.py +273 -0
- cribl_control_plane/models/outputconfluentcloud.py +591 -0
- cribl_control_plane/models/outputcriblhttp.py +494 -0
- cribl_control_plane/models/outputcribllake.py +396 -0
- cribl_control_plane/models/outputcribltcp.py +387 -0
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
- cribl_control_plane/models/outputdatadog.py +472 -0
- cribl_control_plane/models/outputdataset.py +437 -0
- cribl_control_plane/models/outputdefault.py +55 -0
- cribl_control_plane/models/outputdevnull.py +50 -0
- cribl_control_plane/models/outputdiskspool.py +89 -0
- cribl_control_plane/models/outputdls3.py +560 -0
- cribl_control_plane/models/outputdynatracehttp.py +454 -0
- cribl_control_plane/models/outputdynatraceotlp.py +486 -0
- cribl_control_plane/models/outputelastic.py +494 -0
- cribl_control_plane/models/outputelasticcloud.py +407 -0
- cribl_control_plane/models/outputexabeam.py +297 -0
- cribl_control_plane/models/outputfilesystem.py +357 -0
- cribl_control_plane/models/outputgooglechronicle.py +486 -0
- cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
- cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
- cribl_control_plane/models/outputgooglepubsub.py +274 -0
- cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
- cribl_control_plane/models/outputgraphite.py +225 -0
- cribl_control_plane/models/outputhoneycomb.py +369 -0
- cribl_control_plane/models/outputhumiohec.py +389 -0
- cribl_control_plane/models/outputinfluxdb.py +523 -0
- cribl_control_plane/models/outputkafka.py +581 -0
- cribl_control_plane/models/outputkinesis.py +312 -0
- cribl_control_plane/models/outputloki.py +425 -0
- cribl_control_plane/models/outputminio.py +512 -0
- cribl_control_plane/models/outputmsk.py +654 -0
- cribl_control_plane/models/outputnetflow.py +80 -0
- cribl_control_plane/models/outputnewrelic.py +424 -0
- cribl_control_plane/models/outputnewrelicevents.py +401 -0
- cribl_control_plane/models/outputopentelemetry.py +669 -0
- cribl_control_plane/models/outputprometheus.py +485 -0
- cribl_control_plane/models/outputring.py +121 -0
- cribl_control_plane/models/outputrouter.py +83 -0
- cribl_control_plane/models/outputs3.py +556 -0
- cribl_control_plane/models/outputsamplesresponse.py +14 -0
- cribl_control_plane/models/outputsecuritylake.py +505 -0
- cribl_control_plane/models/outputsentinel.py +488 -0
- cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
- cribl_control_plane/models/outputservicenow.py +543 -0
- cribl_control_plane/models/outputsignalfx.py +369 -0
- cribl_control_plane/models/outputsnmp.py +80 -0
- cribl_control_plane/models/outputsns.py +274 -0
- cribl_control_plane/models/outputsplunk.py +383 -0
- cribl_control_plane/models/outputsplunkhec.py +434 -0
- cribl_control_plane/models/outputsplunklb.py +558 -0
- cribl_control_plane/models/outputsqs.py +328 -0
- cribl_control_plane/models/outputstatsd.py +224 -0
- cribl_control_plane/models/outputstatsdext.py +225 -0
- cribl_control_plane/models/outputsumologic.py +378 -0
- cribl_control_plane/models/outputsyslog.py +415 -0
- cribl_control_plane/models/outputtcpjson.py +413 -0
- cribl_control_plane/models/outputtestrequest.py +15 -0
- cribl_control_plane/models/outputtestresponse.py +29 -0
- cribl_control_plane/models/outputwavefront.py +369 -0
- cribl_control_plane/models/outputwebhook.py +689 -0
- cribl_control_plane/models/outputxsiam.py +415 -0
- cribl_control_plane/models/schemeclientoauth.py +24 -0
- cribl_control_plane/models/security.py +36 -0
- cribl_control_plane/models/updatehectokenrequest.py +31 -0
- cribl_control_plane/models/updateinputbyidop.py +44 -0
- cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
- cribl_control_plane/models/updateoutputbyidop.py +44 -0
- cribl_control_plane/outputs.py +1615 -0
- cribl_control_plane/py.typed +1 -0
- cribl_control_plane/sdk.py +164 -0
- cribl_control_plane/sdkconfiguration.py +36 -0
- cribl_control_plane/sources.py +1355 -0
- cribl_control_plane/types/__init__.py +21 -0
- cribl_control_plane/types/basemodel.py +39 -0
- cribl_control_plane/utils/__init__.py +187 -0
- cribl_control_plane/utils/annotations.py +55 -0
- cribl_control_plane/utils/datetimes.py +23 -0
- cribl_control_plane/utils/enums.py +74 -0
- cribl_control_plane/utils/eventstreaming.py +238 -0
- cribl_control_plane/utils/forms.py +223 -0
- cribl_control_plane/utils/headers.py +136 -0
- cribl_control_plane/utils/logger.py +27 -0
- cribl_control_plane/utils/metadata.py +118 -0
- cribl_control_plane/utils/queryparams.py +205 -0
- cribl_control_plane/utils/requestbodies.py +66 -0
- cribl_control_plane/utils/retries.py +217 -0
- cribl_control_plane/utils/security.py +207 -0
- cribl_control_plane/utils/serializers.py +249 -0
- cribl_control_plane/utils/unmarshal_json_response.py +24 -0
- cribl_control_plane/utils/url.py +155 -0
- cribl_control_plane/utils/values.py +137 -0
- cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
- cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
- cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OutputAzureEventhubType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
15
|
+
AZURE_EVENTHUB = "azure_eventhub"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OutputAzureEventhubAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
|
+
r"""Control the number of required acknowledgments"""
|
|
20
|
+
|
|
21
|
+
ONE = 1
|
|
22
|
+
ZERO = 0
|
|
23
|
+
MINUS_1 = -1
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class OutputAzureEventhubRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
|
+
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
28
|
+
|
|
29
|
+
JSON = "json"
|
|
30
|
+
RAW = "raw"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class OutputAzureEventhubSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
34
|
+
PLAIN = "plain"
|
|
35
|
+
OAUTHBEARER = "oauthbearer"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class OutputAzureEventhubAuthenticationTypedDict(TypedDict):
|
|
39
|
+
r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
|
|
40
|
+
|
|
41
|
+
disabled: NotRequired[bool]
|
|
42
|
+
mechanism: NotRequired[OutputAzureEventhubSASLMechanism]
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class OutputAzureEventhubAuthentication(BaseModel):
|
|
46
|
+
r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
|
|
47
|
+
|
|
48
|
+
disabled: Optional[bool] = False
|
|
49
|
+
|
|
50
|
+
mechanism: Annotated[
|
|
51
|
+
Optional[OutputAzureEventhubSASLMechanism],
|
|
52
|
+
PlainValidator(validate_open_enum(False)),
|
|
53
|
+
] = OutputAzureEventhubSASLMechanism.PLAIN
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class OutputAzureEventhubTLSSettingsClientSideTypedDict(TypedDict):
|
|
57
|
+
disabled: NotRequired[bool]
|
|
58
|
+
reject_unauthorized: NotRequired[bool]
|
|
59
|
+
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another trusted CA (such as the system's)"""
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class OutputAzureEventhubTLSSettingsClientSide(BaseModel):
|
|
63
|
+
disabled: Optional[bool] = False
|
|
64
|
+
|
|
65
|
+
reject_unauthorized: Annotated[
|
|
66
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
67
|
+
] = True
|
|
68
|
+
r"""Reject certificates that are not authorized by a CA in the CA certificate path, or by another trusted CA (such as the system's)"""
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class OutputAzureEventhubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
72
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
73
|
+
|
|
74
|
+
BLOCK = "block"
|
|
75
|
+
DROP = "drop"
|
|
76
|
+
QUEUE = "queue"
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class OutputAzureEventhubCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
80
|
+
r"""Codec to use to compress the persisted data"""
|
|
81
|
+
|
|
82
|
+
NONE = "none"
|
|
83
|
+
GZIP = "gzip"
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class OutputAzureEventhubQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
87
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
88
|
+
|
|
89
|
+
BLOCK = "block"
|
|
90
|
+
DROP = "drop"
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class OutputAzureEventhubMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
94
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
95
|
+
|
|
96
|
+
ERROR = "error"
|
|
97
|
+
BACKPRESSURE = "backpressure"
|
|
98
|
+
ALWAYS = "always"
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class OutputAzureEventhubPqControlsTypedDict(TypedDict):
|
|
102
|
+
pass
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class OutputAzureEventhubPqControls(BaseModel):
|
|
106
|
+
pass
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class OutputAzureEventhubTypedDict(TypedDict):
|
|
110
|
+
brokers: List[str]
|
|
111
|
+
r"""List of Event Hubs Kafka brokers to connect to, eg. yourdomain.servicebus.windows.net:9093. The hostname can be found in the host portion of the primary or secondary connection string in Shared Access Policies."""
|
|
112
|
+
topic: str
|
|
113
|
+
r"""The name of the Event Hub (Kafka Topic) to publish events. Can be overwritten using field __topicOut."""
|
|
114
|
+
id: NotRequired[str]
|
|
115
|
+
r"""Unique ID for this output"""
|
|
116
|
+
type: NotRequired[OutputAzureEventhubType]
|
|
117
|
+
pipeline: NotRequired[str]
|
|
118
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
119
|
+
system_fields: NotRequired[List[str]]
|
|
120
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
121
|
+
environment: NotRequired[str]
|
|
122
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
123
|
+
streamtags: NotRequired[List[str]]
|
|
124
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
125
|
+
ack: NotRequired[OutputAzureEventhubAcknowledgments]
|
|
126
|
+
r"""Control the number of required acknowledgments"""
|
|
127
|
+
format_: NotRequired[OutputAzureEventhubRecordDataFormat]
|
|
128
|
+
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
129
|
+
max_record_size_kb: NotRequired[float]
|
|
130
|
+
r"""Maximum size of each record batch before compression. Setting should be < message.max.bytes settings in Event Hubs brokers."""
|
|
131
|
+
flush_event_count: NotRequired[float]
|
|
132
|
+
r"""Maximum number of events in a batch before forcing a flush"""
|
|
133
|
+
flush_period_sec: NotRequired[float]
|
|
134
|
+
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
|
|
135
|
+
connection_timeout: NotRequired[float]
|
|
136
|
+
r"""Maximum time to wait for a connection to complete successfully"""
|
|
137
|
+
request_timeout: NotRequired[float]
|
|
138
|
+
r"""Maximum time to wait for Kafka to respond to a request"""
|
|
139
|
+
max_retries: NotRequired[float]
|
|
140
|
+
r"""If messages are failing, you can set the maximum number of retries as high as 100 to prevent loss of data"""
|
|
141
|
+
max_back_off: NotRequired[float]
|
|
142
|
+
r"""The maximum wait time for a retry, in milliseconds. Default (and minimum) is 30,000 ms (30 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
143
|
+
initial_backoff: NotRequired[float]
|
|
144
|
+
r"""Initial value used to calculate the retry, in milliseconds. Maximum is 600,000 ms (10 minutes)."""
|
|
145
|
+
backoff_rate: NotRequired[float]
|
|
146
|
+
r"""Set the backoff multiplier (2-20) to control the retry frequency for failed messages. For faster retries, use a lower multiplier. For slower retries with more delay between attempts, use a higher multiplier. The multiplier is used in an exponential backoff formula; see the Kafka [documentation](https://kafka.js.org/docs/retry-detailed) for details."""
|
|
147
|
+
authentication_timeout: NotRequired[float]
|
|
148
|
+
r"""Maximum time to wait for Kafka to respond to an authentication request"""
|
|
149
|
+
reauthentication_threshold: NotRequired[float]
|
|
150
|
+
r"""Specifies a time window during which @{product} can reauthenticate if needed. Creates the window measuring backward from the moment when credentials are set to expire."""
|
|
151
|
+
sasl: NotRequired[OutputAzureEventhubAuthenticationTypedDict]
|
|
152
|
+
r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
|
|
153
|
+
tls: NotRequired[OutputAzureEventhubTLSSettingsClientSideTypedDict]
|
|
154
|
+
on_backpressure: NotRequired[OutputAzureEventhubBackpressureBehavior]
|
|
155
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
156
|
+
description: NotRequired[str]
|
|
157
|
+
pq_max_file_size: NotRequired[str]
|
|
158
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
159
|
+
pq_max_size: NotRequired[str]
|
|
160
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
161
|
+
pq_path: NotRequired[str]
|
|
162
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
163
|
+
pq_compress: NotRequired[OutputAzureEventhubCompression]
|
|
164
|
+
r"""Codec to use to compress the persisted data"""
|
|
165
|
+
pq_on_backpressure: NotRequired[OutputAzureEventhubQueueFullBehavior]
|
|
166
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
167
|
+
pq_mode: NotRequired[OutputAzureEventhubMode]
|
|
168
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
169
|
+
pq_controls: NotRequired[OutputAzureEventhubPqControlsTypedDict]
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class OutputAzureEventhub(BaseModel):
|
|
173
|
+
brokers: List[str]
|
|
174
|
+
r"""List of Event Hubs Kafka brokers to connect to, eg. yourdomain.servicebus.windows.net:9093. The hostname can be found in the host portion of the primary or secondary connection string in Shared Access Policies."""
|
|
175
|
+
|
|
176
|
+
topic: str
|
|
177
|
+
r"""The name of the Event Hub (Kafka Topic) to publish events. Can be overwritten using field __topicOut."""
|
|
178
|
+
|
|
179
|
+
id: Optional[str] = None
|
|
180
|
+
r"""Unique ID for this output"""
|
|
181
|
+
|
|
182
|
+
type: Annotated[
|
|
183
|
+
Optional[OutputAzureEventhubType], PlainValidator(validate_open_enum(False))
|
|
184
|
+
] = None
|
|
185
|
+
|
|
186
|
+
pipeline: Optional[str] = None
|
|
187
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
188
|
+
|
|
189
|
+
system_fields: Annotated[
|
|
190
|
+
Optional[List[str]], pydantic.Field(alias="systemFields")
|
|
191
|
+
] = None
|
|
192
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
193
|
+
|
|
194
|
+
environment: Optional[str] = None
|
|
195
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
196
|
+
|
|
197
|
+
streamtags: Optional[List[str]] = None
|
|
198
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
199
|
+
|
|
200
|
+
ack: Annotated[
|
|
201
|
+
Optional[OutputAzureEventhubAcknowledgments],
|
|
202
|
+
PlainValidator(validate_open_enum(True)),
|
|
203
|
+
] = OutputAzureEventhubAcknowledgments.ONE
|
|
204
|
+
r"""Control the number of required acknowledgments"""
|
|
205
|
+
|
|
206
|
+
format_: Annotated[
|
|
207
|
+
Annotated[
|
|
208
|
+
Optional[OutputAzureEventhubRecordDataFormat],
|
|
209
|
+
PlainValidator(validate_open_enum(False)),
|
|
210
|
+
],
|
|
211
|
+
pydantic.Field(alias="format"),
|
|
212
|
+
] = OutputAzureEventhubRecordDataFormat.JSON
|
|
213
|
+
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
214
|
+
|
|
215
|
+
max_record_size_kb: Annotated[
|
|
216
|
+
Optional[float], pydantic.Field(alias="maxRecordSizeKB")
|
|
217
|
+
] = 768
|
|
218
|
+
r"""Maximum size of each record batch before compression. Setting should be < message.max.bytes settings in Event Hubs brokers."""
|
|
219
|
+
|
|
220
|
+
flush_event_count: Annotated[
|
|
221
|
+
Optional[float], pydantic.Field(alias="flushEventCount")
|
|
222
|
+
] = 1000
|
|
223
|
+
r"""Maximum number of events in a batch before forcing a flush"""
|
|
224
|
+
|
|
225
|
+
flush_period_sec: Annotated[
|
|
226
|
+
Optional[float], pydantic.Field(alias="flushPeriodSec")
|
|
227
|
+
] = 1
|
|
228
|
+
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
|
|
229
|
+
|
|
230
|
+
connection_timeout: Annotated[
|
|
231
|
+
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
232
|
+
] = 10000
|
|
233
|
+
r"""Maximum time to wait for a connection to complete successfully"""
|
|
234
|
+
|
|
235
|
+
request_timeout: Annotated[
|
|
236
|
+
Optional[float], pydantic.Field(alias="requestTimeout")
|
|
237
|
+
] = 60000
|
|
238
|
+
r"""Maximum time to wait for Kafka to respond to a request"""
|
|
239
|
+
|
|
240
|
+
max_retries: Annotated[Optional[float], pydantic.Field(alias="maxRetries")] = 5
|
|
241
|
+
r"""If messages are failing, you can set the maximum number of retries as high as 100 to prevent loss of data"""
|
|
242
|
+
|
|
243
|
+
max_back_off: Annotated[Optional[float], pydantic.Field(alias="maxBackOff")] = 30000
|
|
244
|
+
r"""The maximum wait time for a retry, in milliseconds. Default (and minimum) is 30,000 ms (30 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
245
|
+
|
|
246
|
+
initial_backoff: Annotated[
|
|
247
|
+
Optional[float], pydantic.Field(alias="initialBackoff")
|
|
248
|
+
] = 300
|
|
249
|
+
r"""Initial value used to calculate the retry, in milliseconds. Maximum is 600,000 ms (10 minutes)."""
|
|
250
|
+
|
|
251
|
+
backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
|
|
252
|
+
r"""Set the backoff multiplier (2-20) to control the retry frequency for failed messages. For faster retries, use a lower multiplier. For slower retries with more delay between attempts, use a higher multiplier. The multiplier is used in an exponential backoff formula; see the Kafka [documentation](https://kafka.js.org/docs/retry-detailed) for details."""
|
|
253
|
+
|
|
254
|
+
authentication_timeout: Annotated[
|
|
255
|
+
Optional[float], pydantic.Field(alias="authenticationTimeout")
|
|
256
|
+
] = 10000
|
|
257
|
+
r"""Maximum time to wait for Kafka to respond to an authentication request"""
|
|
258
|
+
|
|
259
|
+
reauthentication_threshold: Annotated[
|
|
260
|
+
Optional[float], pydantic.Field(alias="reauthenticationThreshold")
|
|
261
|
+
] = 10000
|
|
262
|
+
r"""Specifies a time window during which @{product} can reauthenticate if needed. Creates the window measuring backward from the moment when credentials are set to expire."""
|
|
263
|
+
|
|
264
|
+
sasl: Optional[OutputAzureEventhubAuthentication] = None
|
|
265
|
+
r"""Authentication parameters to use when connecting to brokers. Using TLS is highly recommended."""
|
|
266
|
+
|
|
267
|
+
tls: Optional[OutputAzureEventhubTLSSettingsClientSide] = None
|
|
268
|
+
|
|
269
|
+
on_backpressure: Annotated[
|
|
270
|
+
Annotated[
|
|
271
|
+
Optional[OutputAzureEventhubBackpressureBehavior],
|
|
272
|
+
PlainValidator(validate_open_enum(False)),
|
|
273
|
+
],
|
|
274
|
+
pydantic.Field(alias="onBackpressure"),
|
|
275
|
+
] = OutputAzureEventhubBackpressureBehavior.BLOCK
|
|
276
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
277
|
+
|
|
278
|
+
description: Optional[str] = None
|
|
279
|
+
|
|
280
|
+
pq_max_file_size: Annotated[
|
|
281
|
+
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
282
|
+
] = "1 MB"
|
|
283
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
284
|
+
|
|
285
|
+
pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
|
|
286
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
287
|
+
|
|
288
|
+
pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
|
|
289
|
+
"$CRIBL_HOME/state/queues"
|
|
290
|
+
)
|
|
291
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
292
|
+
|
|
293
|
+
pq_compress: Annotated[
|
|
294
|
+
Annotated[
|
|
295
|
+
Optional[OutputAzureEventhubCompression],
|
|
296
|
+
PlainValidator(validate_open_enum(False)),
|
|
297
|
+
],
|
|
298
|
+
pydantic.Field(alias="pqCompress"),
|
|
299
|
+
] = OutputAzureEventhubCompression.NONE
|
|
300
|
+
r"""Codec to use to compress the persisted data"""
|
|
301
|
+
|
|
302
|
+
pq_on_backpressure: Annotated[
|
|
303
|
+
Annotated[
|
|
304
|
+
Optional[OutputAzureEventhubQueueFullBehavior],
|
|
305
|
+
PlainValidator(validate_open_enum(False)),
|
|
306
|
+
],
|
|
307
|
+
pydantic.Field(alias="pqOnBackpressure"),
|
|
308
|
+
] = OutputAzureEventhubQueueFullBehavior.BLOCK
|
|
309
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
310
|
+
|
|
311
|
+
pq_mode: Annotated[
|
|
312
|
+
Annotated[
|
|
313
|
+
Optional[OutputAzureEventhubMode], PlainValidator(validate_open_enum(False))
|
|
314
|
+
],
|
|
315
|
+
pydantic.Field(alias="pqMode"),
|
|
316
|
+
] = OutputAzureEventhubMode.ERROR
|
|
317
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
318
|
+
|
|
319
|
+
pq_controls: Annotated[
|
|
320
|
+
Optional[OutputAzureEventhubPqControls], pydantic.Field(alias="pqControls")
|
|
321
|
+
] = None
|