cribl-control-plane 0.1.0b2__py3-none-any.whl → 0.1.1rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_hooks/clientcredentials.py +91 -41
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/errors/apierror.py +1 -1
- cribl_control_plane/errors/criblcontrolplaneerror.py +1 -1
- cribl_control_plane/errors/error.py +1 -1
- cribl_control_plane/errors/healthstatus_error.py +1 -1
- cribl_control_plane/errors/no_response_error.py +1 -1
- cribl_control_plane/errors/responsevalidationerror.py +1 -1
- cribl_control_plane/httpclient.py +0 -1
- cribl_control_plane/lakedatasets.py +12 -12
- cribl_control_plane/models/__init__.py +89 -7
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/cribllakedatasetupdate.py +81 -0
- cribl_control_plane/models/distributedsummary.py +6 -0
- cribl_control_plane/models/gitinfo.py +14 -3
- cribl_control_plane/models/input.py +65 -63
- cribl_control_plane/models/inputappscope.py +4 -0
- cribl_control_plane/models/inputazureblob.py +4 -0
- cribl_control_plane/models/inputcollection.py +4 -0
- cribl_control_plane/models/inputconfluentcloud.py +8 -0
- cribl_control_plane/models/inputcribl.py +4 -0
- cribl_control_plane/models/inputcriblhttp.py +4 -0
- cribl_control_plane/models/inputcribllakehttp.py +4 -0
- cribl_control_plane/models/inputcriblmetrics.py +4 -0
- cribl_control_plane/models/inputcribltcp.py +4 -0
- cribl_control_plane/models/inputcrowdstrike.py +7 -0
- cribl_control_plane/models/inputdatadogagent.py +4 -0
- cribl_control_plane/models/inputdatagen.py +4 -0
- cribl_control_plane/models/inputedgeprometheus.py +12 -0
- cribl_control_plane/models/inputelastic.py +11 -0
- cribl_control_plane/models/inputeventhub.py +6 -0
- cribl_control_plane/models/inputexec.py +4 -0
- cribl_control_plane/models/inputfile.py +6 -0
- cribl_control_plane/models/inputfirehose.py +4 -0
- cribl_control_plane/models/inputgooglepubsub.py +7 -0
- cribl_control_plane/models/inputgrafana.py +8 -0
- cribl_control_plane/models/inputhttp.py +4 -0
- cribl_control_plane/models/inputhttpraw.py +4 -0
- cribl_control_plane/models/inputjournalfiles.py +4 -0
- cribl_control_plane/models/inputkafka.py +8 -0
- cribl_control_plane/models/inputkinesis.py +15 -0
- cribl_control_plane/models/inputkubeevents.py +4 -0
- cribl_control_plane/models/inputkubelogs.py +4 -0
- cribl_control_plane/models/inputkubemetrics.py +4 -0
- cribl_control_plane/models/inputloki.py +4 -0
- cribl_control_plane/models/inputmetrics.py +4 -0
- cribl_control_plane/models/inputmodeldriventelemetry.py +4 -0
- cribl_control_plane/models/inputmsk.py +7 -0
- cribl_control_plane/models/inputnetflow.py +4 -0
- cribl_control_plane/models/inputoffice365mgmt.py +11 -0
- cribl_control_plane/models/inputoffice365msgtrace.py +11 -0
- cribl_control_plane/models/inputoffice365service.py +11 -0
- cribl_control_plane/models/inputopentelemetry.py +8 -0
- cribl_control_plane/models/inputprometheus.py +10 -0
- cribl_control_plane/models/inputprometheusrw.py +4 -0
- cribl_control_plane/models/inputrawudp.py +4 -0
- cribl_control_plane/models/inputs3.py +7 -0
- cribl_control_plane/models/inputs3inventory.py +7 -0
- cribl_control_plane/models/inputsecuritylake.py +7 -0
- cribl_control_plane/models/inputsnmp.py +11 -0
- cribl_control_plane/models/inputsplunk.py +9 -0
- cribl_control_plane/models/inputsplunkhec.py +4 -0
- cribl_control_plane/models/inputsplunksearch.py +7 -0
- cribl_control_plane/models/inputsqs.py +17 -10
- cribl_control_plane/models/inputsyslog.py +8 -0
- cribl_control_plane/models/inputsystemmetrics.py +32 -0
- cribl_control_plane/models/inputsystemstate.py +4 -0
- cribl_control_plane/models/inputtcp.py +4 -0
- cribl_control_plane/models/inputtcpjson.py +4 -0
- cribl_control_plane/models/inputwef.py +6 -0
- cribl_control_plane/models/inputwindowsmetrics.py +28 -0
- cribl_control_plane/models/inputwineventlogs.py +8 -0
- cribl_control_plane/models/inputwiz.py +7 -0
- cribl_control_plane/models/inputwizwebhook.py +4 -0
- cribl_control_plane/models/inputzscalerhec.py +4 -0
- cribl_control_plane/models/jobinfo.py +4 -1
- cribl_control_plane/models/nodeprovidedinfo.py +4 -1
- cribl_control_plane/models/output.py +74 -69
- cribl_control_plane/models/outputazureblob.py +20 -0
- cribl_control_plane/models/outputazuredataexplorer.py +28 -0
- cribl_control_plane/models/outputazureeventhub.py +17 -0
- cribl_control_plane/models/outputazurelogs.py +13 -0
- cribl_control_plane/models/outputchronicle.py +444 -0
- cribl_control_plane/models/outputclickhouse.py +17 -0
- cribl_control_plane/models/outputcloudwatch.py +13 -0
- cribl_control_plane/models/outputconfluentcloud.py +24 -0
- cribl_control_plane/models/outputcriblhttp.py +15 -0
- cribl_control_plane/models/outputcribllake.py +21 -0
- cribl_control_plane/models/outputcribltcp.py +12 -0
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +15 -0
- cribl_control_plane/models/outputdatabricks.py +9 -0
- cribl_control_plane/models/outputdatadog.py +30 -0
- cribl_control_plane/models/outputdataset.py +23 -0
- cribl_control_plane/models/outputdls3.py +35 -0
- cribl_control_plane/models/outputdynatracehttp.py +22 -0
- cribl_control_plane/models/outputdynatraceotlp.py +22 -0
- cribl_control_plane/models/outputelastic.py +18 -0
- cribl_control_plane/models/outputelasticcloud.py +13 -0
- cribl_control_plane/models/outputexabeam.py +14 -0
- cribl_control_plane/models/outputfilesystem.py +15 -0
- cribl_control_plane/models/outputgooglechronicle.py +26 -4
- cribl_control_plane/models/outputgooglecloudlogging.py +28 -4
- cribl_control_plane/models/outputgooglecloudstorage.py +28 -0
- cribl_control_plane/models/outputgooglepubsub.py +13 -0
- cribl_control_plane/models/outputgrafanacloud.py +50 -0
- cribl_control_plane/models/outputgraphite.py +12 -0
- cribl_control_plane/models/outputhoneycomb.py +13 -0
- cribl_control_plane/models/outputhumiohec.py +15 -0
- cribl_control_plane/models/outputinfluxdb.py +19 -0
- cribl_control_plane/models/outputkafka.py +24 -0
- cribl_control_plane/models/outputkinesis.py +15 -0
- cribl_control_plane/models/outputloki.py +20 -0
- cribl_control_plane/models/outputminio.py +28 -0
- cribl_control_plane/models/outputmsk.py +23 -0
- cribl_control_plane/models/outputnewrelic.py +16 -0
- cribl_control_plane/models/outputnewrelicevents.py +16 -0
- cribl_control_plane/models/outputopentelemetry.py +22 -0
- cribl_control_plane/models/outputprometheus.py +13 -0
- cribl_control_plane/models/outputring.py +2 -0
- cribl_control_plane/models/outputs3.py +35 -0
- cribl_control_plane/models/outputsecuritylake.py +29 -0
- cribl_control_plane/models/outputsentinel.py +15 -0
- cribl_control_plane/models/outputsentineloneaisiem.py +13 -0
- cribl_control_plane/models/outputservicenow.py +21 -0
- cribl_control_plane/models/outputsignalfx.py +13 -0
- cribl_control_plane/models/outputsns.py +13 -0
- cribl_control_plane/models/outputsplunk.py +15 -0
- cribl_control_plane/models/outputsplunkhec.py +13 -0
- cribl_control_plane/models/outputsplunklb.py +15 -0
- cribl_control_plane/models/outputsqs.py +23 -10
- cribl_control_plane/models/outputstatsd.py +12 -0
- cribl_control_plane/models/outputstatsdext.py +12 -0
- cribl_control_plane/models/outputsumologic.py +15 -0
- cribl_control_plane/models/outputsyslog.py +24 -0
- cribl_control_plane/models/outputtcpjson.py +12 -0
- cribl_control_plane/models/outputwavefront.py +13 -0
- cribl_control_plane/models/outputwebhook.py +23 -0
- cribl_control_plane/models/outputxsiam.py +13 -0
- cribl_control_plane/models/packinfo.py +6 -3
- cribl_control_plane/models/packinstallinfo.py +6 -3
- cribl_control_plane/models/runnablejobcollection.py +4 -0
- cribl_control_plane/models/updatecribllakedatasetbylakeidandidop.py +9 -5
- cribl_control_plane/models/updatepacksop.py +27 -0
- cribl_control_plane/models/uploadpackresponse.py +13 -0
- cribl_control_plane/packs.py +196 -1
- {cribl_control_plane-0.1.0b2.dist-info → cribl_control_plane-0.1.1rc2.dist-info}/METADATA +47 -13
- {cribl_control_plane-0.1.0b2.dist-info → cribl_control_plane-0.1.1rc2.dist-info}/RECORD +148 -144
- {cribl_control_plane-0.1.0b2.dist-info → cribl_control_plane-0.1.1rc2.dist-info}/WHEEL +0 -0
|
@@ -18,20 +18,27 @@ class OutputAzureEventhubType(str, Enum):
|
|
|
18
18
|
class OutputAzureEventhubAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
19
|
r"""Control the number of required acknowledgments"""
|
|
20
20
|
|
|
21
|
+
# Leader
|
|
21
22
|
ONE = 1
|
|
23
|
+
# None
|
|
22
24
|
ZERO = 0
|
|
25
|
+
# All
|
|
23
26
|
MINUS_1 = -1
|
|
24
27
|
|
|
25
28
|
|
|
26
29
|
class OutputAzureEventhubRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
27
30
|
r"""Format to use to serialize events before writing to the Event Hubs Kafka brokers"""
|
|
28
31
|
|
|
32
|
+
# JSON
|
|
29
33
|
JSON = "json"
|
|
34
|
+
# Field _raw
|
|
30
35
|
RAW = "raw"
|
|
31
36
|
|
|
32
37
|
|
|
33
38
|
class OutputAzureEventhubSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
39
|
+
# PLAIN
|
|
34
40
|
PLAIN = "plain"
|
|
41
|
+
# OAUTHBEARER
|
|
35
42
|
OAUTHBEARER = "oauthbearer"
|
|
36
43
|
|
|
37
44
|
|
|
@@ -71,30 +78,40 @@ class OutputAzureEventhubTLSSettingsClientSide(BaseModel):
|
|
|
71
78
|
class OutputAzureEventhubBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
72
79
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
73
80
|
|
|
81
|
+
# Block
|
|
74
82
|
BLOCK = "block"
|
|
83
|
+
# Drop
|
|
75
84
|
DROP = "drop"
|
|
85
|
+
# Persistent Queue
|
|
76
86
|
QUEUE = "queue"
|
|
77
87
|
|
|
78
88
|
|
|
79
89
|
class OutputAzureEventhubCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
80
90
|
r"""Codec to use to compress the persisted data"""
|
|
81
91
|
|
|
92
|
+
# None
|
|
82
93
|
NONE = "none"
|
|
94
|
+
# Gzip
|
|
83
95
|
GZIP = "gzip"
|
|
84
96
|
|
|
85
97
|
|
|
86
98
|
class OutputAzureEventhubQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
87
99
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
88
100
|
|
|
101
|
+
# Block
|
|
89
102
|
BLOCK = "block"
|
|
103
|
+
# Drop new data
|
|
90
104
|
DROP = "drop"
|
|
91
105
|
|
|
92
106
|
|
|
93
107
|
class OutputAzureEventhubMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
94
108
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
95
109
|
|
|
110
|
+
# Error
|
|
96
111
|
ERROR = "error"
|
|
112
|
+
# Backpressure
|
|
97
113
|
BACKPRESSURE = "backpressure"
|
|
114
|
+
# Always On
|
|
98
115
|
ALWAYS = "always"
|
|
99
116
|
|
|
100
117
|
|
|
@@ -29,8 +29,11 @@ class OutputAzureLogsExtraHTTPHeader(BaseModel):
|
|
|
29
29
|
class OutputAzureLogsFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
30
30
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
31
31
|
|
|
32
|
+
# Payload
|
|
32
33
|
PAYLOAD = "payload"
|
|
34
|
+
# Payload + Headers
|
|
33
35
|
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
36
|
+
# None
|
|
34
37
|
NONE = "none"
|
|
35
38
|
|
|
36
39
|
|
|
@@ -91,8 +94,11 @@ class OutputAzureLogsTimeoutRetrySettings(BaseModel):
|
|
|
91
94
|
class OutputAzureLogsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
92
95
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
93
96
|
|
|
97
|
+
# Block
|
|
94
98
|
BLOCK = "block"
|
|
99
|
+
# Drop
|
|
95
100
|
DROP = "drop"
|
|
101
|
+
# Persistent Queue
|
|
96
102
|
QUEUE = "queue"
|
|
97
103
|
|
|
98
104
|
|
|
@@ -106,22 +112,29 @@ class OutputAzureLogsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMet
|
|
|
106
112
|
class OutputAzureLogsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
107
113
|
r"""Codec to use to compress the persisted data"""
|
|
108
114
|
|
|
115
|
+
# None
|
|
109
116
|
NONE = "none"
|
|
117
|
+
# Gzip
|
|
110
118
|
GZIP = "gzip"
|
|
111
119
|
|
|
112
120
|
|
|
113
121
|
class OutputAzureLogsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
114
122
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
115
123
|
|
|
124
|
+
# Block
|
|
116
125
|
BLOCK = "block"
|
|
126
|
+
# Drop new data
|
|
117
127
|
DROP = "drop"
|
|
118
128
|
|
|
119
129
|
|
|
120
130
|
class OutputAzureLogsMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
121
131
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
122
132
|
|
|
133
|
+
# Error
|
|
123
134
|
ERROR = "error"
|
|
135
|
+
# Backpressure
|
|
124
136
|
BACKPRESSURE = "backpressure"
|
|
137
|
+
# Always On
|
|
125
138
|
ALWAYS = "always"
|
|
126
139
|
|
|
127
140
|
|
|
@@ -0,0 +1,444 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OutputChronicleType(str, Enum):
|
|
15
|
+
CHRONICLE = "chronicle"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OutputChronicleAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
|
+
SERVICE_ACCOUNT = "serviceAccount"
|
|
20
|
+
SERVICE_ACCOUNT_SECRET = "serviceAccountSecret"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class OutputChronicleResponseRetrySettingTypedDict(TypedDict):
|
|
24
|
+
http_status: float
|
|
25
|
+
r"""The HTTP response status code that will trigger retries"""
|
|
26
|
+
initial_backoff: NotRequired[float]
|
|
27
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
28
|
+
backoff_rate: NotRequired[float]
|
|
29
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
30
|
+
max_backoff: NotRequired[float]
|
|
31
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class OutputChronicleResponseRetrySetting(BaseModel):
|
|
35
|
+
http_status: Annotated[float, pydantic.Field(alias="httpStatus")]
|
|
36
|
+
r"""The HTTP response status code that will trigger retries"""
|
|
37
|
+
|
|
38
|
+
initial_backoff: Annotated[
|
|
39
|
+
Optional[float], pydantic.Field(alias="initialBackoff")
|
|
40
|
+
] = 1000
|
|
41
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
42
|
+
|
|
43
|
+
backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
|
|
44
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
45
|
+
|
|
46
|
+
max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
|
|
47
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class OutputChronicleTimeoutRetrySettingsTypedDict(TypedDict):
|
|
51
|
+
timeout_retry: NotRequired[bool]
|
|
52
|
+
initial_backoff: NotRequired[float]
|
|
53
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
54
|
+
backoff_rate: NotRequired[float]
|
|
55
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
56
|
+
max_backoff: NotRequired[float]
|
|
57
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class OutputChronicleTimeoutRetrySettings(BaseModel):
|
|
61
|
+
timeout_retry: Annotated[Optional[bool], pydantic.Field(alias="timeoutRetry")] = (
|
|
62
|
+
False
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
initial_backoff: Annotated[
|
|
66
|
+
Optional[float], pydantic.Field(alias="initialBackoff")
|
|
67
|
+
] = 1000
|
|
68
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
69
|
+
|
|
70
|
+
backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
|
|
71
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
72
|
+
|
|
73
|
+
max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
|
|
74
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class OutputChronicleExtraHTTPHeaderTypedDict(TypedDict):
|
|
78
|
+
value: str
|
|
79
|
+
name: NotRequired[str]
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class OutputChronicleExtraHTTPHeader(BaseModel):
|
|
83
|
+
value: str
|
|
84
|
+
|
|
85
|
+
name: Optional[str] = None
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class OutputChronicleFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
89
|
+
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
90
|
+
|
|
91
|
+
# Payload
|
|
92
|
+
PAYLOAD = "payload"
|
|
93
|
+
# Payload + Headers
|
|
94
|
+
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
95
|
+
# None
|
|
96
|
+
NONE = "none"
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class OutputChronicleBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
100
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
101
|
+
|
|
102
|
+
# Block
|
|
103
|
+
BLOCK = "block"
|
|
104
|
+
# Drop
|
|
105
|
+
DROP = "drop"
|
|
106
|
+
# Persistent Queue
|
|
107
|
+
QUEUE = "queue"
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class OutputChronicleCustomLabelTypedDict(TypedDict):
|
|
111
|
+
key: str
|
|
112
|
+
value: str
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class OutputChronicleCustomLabel(BaseModel):
|
|
116
|
+
key: str
|
|
117
|
+
|
|
118
|
+
value: str
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class OutputChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
122
|
+
r"""Codec to use to compress the persisted data"""
|
|
123
|
+
|
|
124
|
+
# None
|
|
125
|
+
NONE = "none"
|
|
126
|
+
# Gzip
|
|
127
|
+
GZIP = "gzip"
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
class OutputChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
131
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
132
|
+
|
|
133
|
+
# Block
|
|
134
|
+
BLOCK = "block"
|
|
135
|
+
# Drop new data
|
|
136
|
+
DROP = "drop"
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class OutputChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
140
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
141
|
+
|
|
142
|
+
# Error
|
|
143
|
+
ERROR = "error"
|
|
144
|
+
# Backpressure
|
|
145
|
+
BACKPRESSURE = "backpressure"
|
|
146
|
+
# Always On
|
|
147
|
+
ALWAYS = "always"
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class OutputChroniclePqControlsTypedDict(TypedDict):
|
|
151
|
+
pass
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class OutputChroniclePqControls(BaseModel):
|
|
155
|
+
pass
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
class OutputChronicleTypedDict(TypedDict):
|
|
159
|
+
type: OutputChronicleType
|
|
160
|
+
region: str
|
|
161
|
+
r"""Regional endpoint to send events to"""
|
|
162
|
+
log_type: str
|
|
163
|
+
r"""Default log type value to send to SecOps. Can be overwritten by event field __logType."""
|
|
164
|
+
gcp_project_id: str
|
|
165
|
+
r"""The Google Cloud Platform (GCP) project ID to send events to"""
|
|
166
|
+
gcp_instance: str
|
|
167
|
+
r"""The Google Cloud Platform (GCP) instance to send events to. This is the Chronicle customer uuid."""
|
|
168
|
+
id: NotRequired[str]
|
|
169
|
+
r"""Unique ID for this output"""
|
|
170
|
+
pipeline: NotRequired[str]
|
|
171
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
172
|
+
system_fields: NotRequired[List[str]]
|
|
173
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
174
|
+
environment: NotRequired[str]
|
|
175
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
176
|
+
streamtags: NotRequired[List[str]]
|
|
177
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
178
|
+
api_version: NotRequired[str]
|
|
179
|
+
authentication_method: NotRequired[OutputChronicleAuthenticationMethod]
|
|
180
|
+
response_retry_settings: NotRequired[
|
|
181
|
+
List[OutputChronicleResponseRetrySettingTypedDict]
|
|
182
|
+
]
|
|
183
|
+
r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
|
|
184
|
+
timeout_retry_settings: NotRequired[OutputChronicleTimeoutRetrySettingsTypedDict]
|
|
185
|
+
response_honor_retry_after_header: NotRequired[bool]
|
|
186
|
+
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
187
|
+
concurrency: NotRequired[float]
|
|
188
|
+
r"""Maximum number of ongoing requests before blocking"""
|
|
189
|
+
max_payload_size_kb: NotRequired[float]
|
|
190
|
+
r"""Maximum size, in KB, of the request body"""
|
|
191
|
+
max_payload_events: NotRequired[float]
|
|
192
|
+
r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
|
|
193
|
+
compress: NotRequired[bool]
|
|
194
|
+
r"""Compress the payload body before sending"""
|
|
195
|
+
reject_unauthorized: NotRequired[bool]
|
|
196
|
+
r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
|
|
197
|
+
Enabled by default. When this setting is also present in TLS Settings (Client Side),
|
|
198
|
+
that value will take precedence.
|
|
199
|
+
"""
|
|
200
|
+
timeout_sec: NotRequired[float]
|
|
201
|
+
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
202
|
+
flush_period_sec: NotRequired[float]
|
|
203
|
+
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
204
|
+
extra_http_headers: NotRequired[List[OutputChronicleExtraHTTPHeaderTypedDict]]
|
|
205
|
+
r"""Headers to add to all events"""
|
|
206
|
+
failed_request_logging_mode: NotRequired[OutputChronicleFailedRequestLoggingMode]
|
|
207
|
+
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
208
|
+
safe_headers: NotRequired[List[str]]
|
|
209
|
+
r"""List of headers that are safe to log in plain text"""
|
|
210
|
+
use_round_robin_dns: NotRequired[bool]
|
|
211
|
+
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
|
|
212
|
+
on_backpressure: NotRequired[OutputChronicleBackpressureBehavior]
|
|
213
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
214
|
+
total_memory_limit_kb: NotRequired[float]
|
|
215
|
+
r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
|
|
216
|
+
ingestion_method: NotRequired[str]
|
|
217
|
+
namespace: NotRequired[str]
|
|
218
|
+
r"""User-configured environment namespace to identify the data domain the logs originated from. This namespace is used as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
|
|
219
|
+
log_text_field: NotRequired[str]
|
|
220
|
+
r"""Name of the event field that contains the log text to send. If not specified, Stream sends a JSON representation of the whole event."""
|
|
221
|
+
custom_labels: NotRequired[List[OutputChronicleCustomLabelTypedDict]]
|
|
222
|
+
r"""Custom labels to be added to every event"""
|
|
223
|
+
description: NotRequired[str]
|
|
224
|
+
service_account_credentials: NotRequired[str]
|
|
225
|
+
r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
|
|
226
|
+
service_account_credentials_secret: NotRequired[str]
|
|
227
|
+
r"""Select or create a stored text secret"""
|
|
228
|
+
pq_max_file_size: NotRequired[str]
|
|
229
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
230
|
+
pq_max_size: NotRequired[str]
|
|
231
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
232
|
+
pq_path: NotRequired[str]
|
|
233
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
234
|
+
pq_compress: NotRequired[OutputChronicleCompression]
|
|
235
|
+
r"""Codec to use to compress the persisted data"""
|
|
236
|
+
pq_on_backpressure: NotRequired[OutputChronicleQueueFullBehavior]
|
|
237
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
238
|
+
pq_mode: NotRequired[OutputChronicleMode]
|
|
239
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
240
|
+
pq_controls: NotRequired[OutputChroniclePqControlsTypedDict]
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
class OutputChronicle(BaseModel):
|
|
244
|
+
type: OutputChronicleType
|
|
245
|
+
|
|
246
|
+
region: str
|
|
247
|
+
r"""Regional endpoint to send events to"""
|
|
248
|
+
|
|
249
|
+
log_type: Annotated[str, pydantic.Field(alias="logType")]
|
|
250
|
+
r"""Default log type value to send to SecOps. Can be overwritten by event field __logType."""
|
|
251
|
+
|
|
252
|
+
gcp_project_id: Annotated[str, pydantic.Field(alias="gcpProjectId")]
|
|
253
|
+
r"""The Google Cloud Platform (GCP) project ID to send events to"""
|
|
254
|
+
|
|
255
|
+
gcp_instance: Annotated[str, pydantic.Field(alias="gcpInstance")]
|
|
256
|
+
r"""The Google Cloud Platform (GCP) instance to send events to. This is the Chronicle customer uuid."""
|
|
257
|
+
|
|
258
|
+
id: Optional[str] = None
|
|
259
|
+
r"""Unique ID for this output"""
|
|
260
|
+
|
|
261
|
+
pipeline: Optional[str] = None
|
|
262
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
263
|
+
|
|
264
|
+
system_fields: Annotated[
|
|
265
|
+
Optional[List[str]], pydantic.Field(alias="systemFields")
|
|
266
|
+
] = None
|
|
267
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
268
|
+
|
|
269
|
+
environment: Optional[str] = None
|
|
270
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
271
|
+
|
|
272
|
+
streamtags: Optional[List[str]] = None
|
|
273
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
274
|
+
|
|
275
|
+
api_version: Annotated[Optional[str], pydantic.Field(alias="apiVersion")] = (
|
|
276
|
+
"v1alpha"
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
authentication_method: Annotated[
|
|
280
|
+
Annotated[
|
|
281
|
+
Optional[OutputChronicleAuthenticationMethod],
|
|
282
|
+
PlainValidator(validate_open_enum(False)),
|
|
283
|
+
],
|
|
284
|
+
pydantic.Field(alias="authenticationMethod"),
|
|
285
|
+
] = OutputChronicleAuthenticationMethod.SERVICE_ACCOUNT
|
|
286
|
+
|
|
287
|
+
response_retry_settings: Annotated[
|
|
288
|
+
Optional[List[OutputChronicleResponseRetrySetting]],
|
|
289
|
+
pydantic.Field(alias="responseRetrySettings"),
|
|
290
|
+
] = None
|
|
291
|
+
r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
|
|
292
|
+
|
|
293
|
+
timeout_retry_settings: Annotated[
|
|
294
|
+
Optional[OutputChronicleTimeoutRetrySettings],
|
|
295
|
+
pydantic.Field(alias="timeoutRetrySettings"),
|
|
296
|
+
] = None
|
|
297
|
+
|
|
298
|
+
response_honor_retry_after_header: Annotated[
|
|
299
|
+
Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
|
|
300
|
+
] = True
|
|
301
|
+
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
302
|
+
|
|
303
|
+
concurrency: Optional[float] = 5
|
|
304
|
+
r"""Maximum number of ongoing requests before blocking"""
|
|
305
|
+
|
|
306
|
+
max_payload_size_kb: Annotated[
|
|
307
|
+
Optional[float], pydantic.Field(alias="maxPayloadSizeKB")
|
|
308
|
+
] = 1024
|
|
309
|
+
r"""Maximum size, in KB, of the request body"""
|
|
310
|
+
|
|
311
|
+
max_payload_events: Annotated[
|
|
312
|
+
Optional[float], pydantic.Field(alias="maxPayloadEvents")
|
|
313
|
+
] = 0
|
|
314
|
+
r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
|
|
315
|
+
|
|
316
|
+
compress: Optional[bool] = True
|
|
317
|
+
r"""Compress the payload body before sending"""
|
|
318
|
+
|
|
319
|
+
reject_unauthorized: Annotated[
|
|
320
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
321
|
+
] = True
|
|
322
|
+
r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
|
|
323
|
+
Enabled by default. When this setting is also present in TLS Settings (Client Side),
|
|
324
|
+
that value will take precedence.
|
|
325
|
+
"""
|
|
326
|
+
|
|
327
|
+
timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 90
|
|
328
|
+
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
329
|
+
|
|
330
|
+
flush_period_sec: Annotated[
|
|
331
|
+
Optional[float], pydantic.Field(alias="flushPeriodSec")
|
|
332
|
+
] = 1
|
|
333
|
+
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
334
|
+
|
|
335
|
+
extra_http_headers: Annotated[
|
|
336
|
+
Optional[List[OutputChronicleExtraHTTPHeader]],
|
|
337
|
+
pydantic.Field(alias="extraHttpHeaders"),
|
|
338
|
+
] = None
|
|
339
|
+
r"""Headers to add to all events"""
|
|
340
|
+
|
|
341
|
+
failed_request_logging_mode: Annotated[
|
|
342
|
+
Annotated[
|
|
343
|
+
Optional[OutputChronicleFailedRequestLoggingMode],
|
|
344
|
+
PlainValidator(validate_open_enum(False)),
|
|
345
|
+
],
|
|
346
|
+
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
347
|
+
] = OutputChronicleFailedRequestLoggingMode.NONE
|
|
348
|
+
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
349
|
+
|
|
350
|
+
safe_headers: Annotated[
|
|
351
|
+
Optional[List[str]], pydantic.Field(alias="safeHeaders")
|
|
352
|
+
] = None
|
|
353
|
+
r"""List of headers that are safe to log in plain text"""
|
|
354
|
+
|
|
355
|
+
use_round_robin_dns: Annotated[
|
|
356
|
+
Optional[bool], pydantic.Field(alias="useRoundRobinDns")
|
|
357
|
+
] = False
|
|
358
|
+
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
|
|
359
|
+
|
|
360
|
+
on_backpressure: Annotated[
|
|
361
|
+
Annotated[
|
|
362
|
+
Optional[OutputChronicleBackpressureBehavior],
|
|
363
|
+
PlainValidator(validate_open_enum(False)),
|
|
364
|
+
],
|
|
365
|
+
pydantic.Field(alias="onBackpressure"),
|
|
366
|
+
] = OutputChronicleBackpressureBehavior.BLOCK
|
|
367
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
368
|
+
|
|
369
|
+
total_memory_limit_kb: Annotated[
|
|
370
|
+
Optional[float], pydantic.Field(alias="totalMemoryLimitKB")
|
|
371
|
+
] = None
|
|
372
|
+
r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
|
|
373
|
+
|
|
374
|
+
ingestion_method: Annotated[
|
|
375
|
+
Optional[str], pydantic.Field(alias="ingestionMethod")
|
|
376
|
+
] = "ImportLogs"
|
|
377
|
+
|
|
378
|
+
namespace: Optional[str] = None
|
|
379
|
+
r"""User-configured environment namespace to identify the data domain the logs originated from. This namespace is used as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
|
|
380
|
+
|
|
381
|
+
log_text_field: Annotated[Optional[str], pydantic.Field(alias="logTextField")] = (
|
|
382
|
+
None
|
|
383
|
+
)
|
|
384
|
+
r"""Name of the event field that contains the log text to send. If not specified, Stream sends a JSON representation of the whole event."""
|
|
385
|
+
|
|
386
|
+
custom_labels: Annotated[
|
|
387
|
+
Optional[List[OutputChronicleCustomLabel]], pydantic.Field(alias="customLabels")
|
|
388
|
+
] = None
|
|
389
|
+
r"""Custom labels to be added to every event"""
|
|
390
|
+
|
|
391
|
+
description: Optional[str] = None
|
|
392
|
+
|
|
393
|
+
service_account_credentials: Annotated[
|
|
394
|
+
Optional[str], pydantic.Field(alias="serviceAccountCredentials")
|
|
395
|
+
] = None
|
|
396
|
+
r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
|
|
397
|
+
|
|
398
|
+
service_account_credentials_secret: Annotated[
|
|
399
|
+
Optional[str], pydantic.Field(alias="serviceAccountCredentialsSecret")
|
|
400
|
+
] = None
|
|
401
|
+
r"""Select or create a stored text secret"""
|
|
402
|
+
|
|
403
|
+
pq_max_file_size: Annotated[
|
|
404
|
+
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
405
|
+
] = "1 MB"
|
|
406
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
407
|
+
|
|
408
|
+
pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
|
|
409
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
410
|
+
|
|
411
|
+
pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
|
|
412
|
+
"$CRIBL_HOME/state/queues"
|
|
413
|
+
)
|
|
414
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
415
|
+
|
|
416
|
+
pq_compress: Annotated[
|
|
417
|
+
Annotated[
|
|
418
|
+
Optional[OutputChronicleCompression],
|
|
419
|
+
PlainValidator(validate_open_enum(False)),
|
|
420
|
+
],
|
|
421
|
+
pydantic.Field(alias="pqCompress"),
|
|
422
|
+
] = OutputChronicleCompression.NONE
|
|
423
|
+
r"""Codec to use to compress the persisted data"""
|
|
424
|
+
|
|
425
|
+
pq_on_backpressure: Annotated[
|
|
426
|
+
Annotated[
|
|
427
|
+
Optional[OutputChronicleQueueFullBehavior],
|
|
428
|
+
PlainValidator(validate_open_enum(False)),
|
|
429
|
+
],
|
|
430
|
+
pydantic.Field(alias="pqOnBackpressure"),
|
|
431
|
+
] = OutputChronicleQueueFullBehavior.BLOCK
|
|
432
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
433
|
+
|
|
434
|
+
pq_mode: Annotated[
|
|
435
|
+
Annotated[
|
|
436
|
+
Optional[OutputChronicleMode], PlainValidator(validate_open_enum(False))
|
|
437
|
+
],
|
|
438
|
+
pydantic.Field(alias="pqMode"),
|
|
439
|
+
] = OutputChronicleMode.ERROR
|
|
440
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
441
|
+
|
|
442
|
+
pq_controls: Annotated[
|
|
443
|
+
Optional[OutputChroniclePqControls], pydantic.Field(alias="pqControls")
|
|
444
|
+
] = None
|
|
@@ -28,14 +28,18 @@ class OutputClickHouseAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta
|
|
|
28
28
|
class OutputClickHouseFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
29
29
|
r"""Data format to use when sending data to ClickHouse. Defaults to JSON Compact."""
|
|
30
30
|
|
|
31
|
+
# JSONCompactEachRowWithNames
|
|
31
32
|
JSON_COMPACT_EACH_ROW_WITH_NAMES = "json-compact-each-row-with-names"
|
|
33
|
+
# JSONEachRow
|
|
32
34
|
JSON_EACH_ROW = "json-each-row"
|
|
33
35
|
|
|
34
36
|
|
|
35
37
|
class MappingType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
36
38
|
r"""How event fields are mapped to ClickHouse columns."""
|
|
37
39
|
|
|
40
|
+
# Automatic
|
|
38
41
|
AUTOMATIC = "automatic"
|
|
42
|
+
# Custom
|
|
39
43
|
CUSTOM = "custom"
|
|
40
44
|
|
|
41
45
|
|
|
@@ -125,8 +129,11 @@ class OutputClickHouseExtraHTTPHeader(BaseModel):
|
|
|
125
129
|
class OutputClickHouseFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
126
130
|
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
127
131
|
|
|
132
|
+
# Payload
|
|
128
133
|
PAYLOAD = "payload"
|
|
134
|
+
# Payload + Headers
|
|
129
135
|
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
136
|
+
# None
|
|
130
137
|
NONE = "none"
|
|
131
138
|
|
|
132
139
|
|
|
@@ -187,8 +194,11 @@ class OutputClickHouseTimeoutRetrySettings(BaseModel):
|
|
|
187
194
|
class OutputClickHouseBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
188
195
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
189
196
|
|
|
197
|
+
# Block
|
|
190
198
|
BLOCK = "block"
|
|
199
|
+
# Drop
|
|
191
200
|
DROP = "drop"
|
|
201
|
+
# Persistent Queue
|
|
192
202
|
QUEUE = "queue"
|
|
193
203
|
|
|
194
204
|
|
|
@@ -247,22 +257,29 @@ class ColumnMapping(BaseModel):
|
|
|
247
257
|
class OutputClickHouseCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
248
258
|
r"""Codec to use to compress the persisted data"""
|
|
249
259
|
|
|
260
|
+
# None
|
|
250
261
|
NONE = "none"
|
|
262
|
+
# Gzip
|
|
251
263
|
GZIP = "gzip"
|
|
252
264
|
|
|
253
265
|
|
|
254
266
|
class OutputClickHouseQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
255
267
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
256
268
|
|
|
269
|
+
# Block
|
|
257
270
|
BLOCK = "block"
|
|
271
|
+
# Drop new data
|
|
258
272
|
DROP = "drop"
|
|
259
273
|
|
|
260
274
|
|
|
261
275
|
class OutputClickHouseMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
262
276
|
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
263
277
|
|
|
278
|
+
# Error
|
|
264
279
|
ERROR = "error"
|
|
280
|
+
# Backpressure
|
|
265
281
|
BACKPRESSURE = "backpressure"
|
|
282
|
+
# Always On
|
|
266
283
|
ALWAYS = "always"
|
|
267
284
|
|
|
268
285
|
|