cribl-control-plane 0.1.0b1__py3-none-any.whl → 0.2.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_hooks/clientcredentials.py +91 -41
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/errors/apierror.py +1 -1
- cribl_control_plane/errors/criblcontrolplaneerror.py +1 -1
- cribl_control_plane/errors/error.py +1 -1
- cribl_control_plane/errors/healthstatus_error.py +1 -1
- cribl_control_plane/errors/no_response_error.py +1 -1
- cribl_control_plane/errors/responsevalidationerror.py +1 -1
- cribl_control_plane/httpclient.py +0 -1
- cribl_control_plane/lakedatasets.py +12 -12
- cribl_control_plane/models/__init__.py +106 -57
- cribl_control_plane/models/appmode.py +14 -0
- cribl_control_plane/models/cribllakedatasetupdate.py +81 -0
- cribl_control_plane/models/gitinfo.py +14 -3
- cribl_control_plane/models/hbcriblinfo.py +3 -14
- cribl_control_plane/models/heartbeatmetadata.py +0 -3
- cribl_control_plane/models/inputconfluentcloud.py +18 -0
- cribl_control_plane/models/inputkafka.py +17 -0
- cribl_control_plane/models/inputmsk.py +17 -0
- cribl_control_plane/models/inputsqs.py +8 -10
- cribl_control_plane/models/nodeprovidedinfo.py +0 -3
- cribl_control_plane/models/output.py +3 -3
- cribl_control_plane/models/outputchronicle.py +431 -0
- cribl_control_plane/models/outputconfluentcloud.py +18 -0
- cribl_control_plane/models/outputgooglechronicle.py +5 -4
- cribl_control_plane/models/outputgooglecloudlogging.py +9 -4
- cribl_control_plane/models/outputkafka.py +17 -0
- cribl_control_plane/models/outputmsk.py +17 -0
- cribl_control_plane/models/outputsqs.py +8 -10
- cribl_control_plane/models/routecloneconf.py +13 -0
- cribl_control_plane/models/routeconf.py +4 -3
- cribl_control_plane/models/updatecribllakedatasetbylakeidandidop.py +9 -5
- {cribl_control_plane-0.1.0b1.dist-info → cribl_control_plane-0.2.0a1.dist-info}/METADATA +1 -8
- {cribl_control_plane-0.1.0b1.dist-info → cribl_control_plane-0.2.0a1.dist-info}/RECORD +35 -32
- cribl_control_plane/models/outputdatabricks.py +0 -439
- {cribl_control_plane-0.1.0b1.dist-info → cribl_control_plane-0.2.0a1.dist-info}/WHEEL +0 -0
|
@@ -187,6 +187,13 @@ class InputConfluentCloudTLSSettingsClientSide(BaseModel):
|
|
|
187
187
|
] = None
|
|
188
188
|
|
|
189
189
|
|
|
190
|
+
class InputConfluentCloudSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
191
|
+
r"""The schema format used to encode and decode event data"""
|
|
192
|
+
|
|
193
|
+
AVRO = "avro"
|
|
194
|
+
JSON = "json"
|
|
195
|
+
|
|
196
|
+
|
|
190
197
|
class InputConfluentCloudAuthTypedDict(TypedDict):
|
|
191
198
|
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
192
199
|
|
|
@@ -297,6 +304,8 @@ class InputConfluentCloudKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
|
|
|
297
304
|
disabled: NotRequired[bool]
|
|
298
305
|
schema_registry_url: NotRequired[str]
|
|
299
306
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
307
|
+
schema_type: NotRequired[InputConfluentCloudSchemaType]
|
|
308
|
+
r"""The schema format used to encode and decode event data"""
|
|
300
309
|
connection_timeout: NotRequired[float]
|
|
301
310
|
r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
|
|
302
311
|
request_timeout: NotRequired[float]
|
|
@@ -318,6 +327,15 @@ class InputConfluentCloudKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
318
327
|
] = "http://localhost:8081"
|
|
319
328
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
320
329
|
|
|
330
|
+
schema_type: Annotated[
|
|
331
|
+
Annotated[
|
|
332
|
+
Optional[InputConfluentCloudSchemaType],
|
|
333
|
+
PlainValidator(validate_open_enum(False)),
|
|
334
|
+
],
|
|
335
|
+
pydantic.Field(alias="schemaType"),
|
|
336
|
+
] = InputConfluentCloudSchemaType.AVRO
|
|
337
|
+
r"""The schema format used to encode and decode event data"""
|
|
338
|
+
|
|
321
339
|
connection_timeout: Annotated[
|
|
322
340
|
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
323
341
|
] = 30000
|
|
@@ -103,6 +103,13 @@ class InputKafkaPq(BaseModel):
|
|
|
103
103
|
] = None
|
|
104
104
|
|
|
105
105
|
|
|
106
|
+
class InputKafkaSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
107
|
+
r"""The schema format used to encode and decode event data"""
|
|
108
|
+
|
|
109
|
+
AVRO = "avro"
|
|
110
|
+
JSON = "json"
|
|
111
|
+
|
|
112
|
+
|
|
106
113
|
class InputKafkaAuthTypedDict(TypedDict):
|
|
107
114
|
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
108
115
|
|
|
@@ -213,6 +220,8 @@ class InputKafkaKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
|
|
|
213
220
|
disabled: NotRequired[bool]
|
|
214
221
|
schema_registry_url: NotRequired[str]
|
|
215
222
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
223
|
+
schema_type: NotRequired[InputKafkaSchemaType]
|
|
224
|
+
r"""The schema format used to encode and decode event data"""
|
|
216
225
|
connection_timeout: NotRequired[float]
|
|
217
226
|
r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
|
|
218
227
|
request_timeout: NotRequired[float]
|
|
@@ -232,6 +241,14 @@ class InputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
232
241
|
] = "http://localhost:8081"
|
|
233
242
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
234
243
|
|
|
244
|
+
schema_type: Annotated[
|
|
245
|
+
Annotated[
|
|
246
|
+
Optional[InputKafkaSchemaType], PlainValidator(validate_open_enum(False))
|
|
247
|
+
],
|
|
248
|
+
pydantic.Field(alias="schemaType"),
|
|
249
|
+
] = InputKafkaSchemaType.AVRO
|
|
250
|
+
r"""The schema format used to encode and decode event data"""
|
|
251
|
+
|
|
235
252
|
connection_timeout: Annotated[
|
|
236
253
|
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
237
254
|
] = 30000
|
|
@@ -116,6 +116,13 @@ class InputMskMetadatum(BaseModel):
|
|
|
116
116
|
r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
|
|
117
117
|
|
|
118
118
|
|
|
119
|
+
class InputMskSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
120
|
+
r"""The schema format used to encode and decode event data"""
|
|
121
|
+
|
|
122
|
+
AVRO = "avro"
|
|
123
|
+
JSON = "json"
|
|
124
|
+
|
|
125
|
+
|
|
119
126
|
class InputMskAuthTypedDict(TypedDict):
|
|
120
127
|
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
121
128
|
|
|
@@ -226,6 +233,8 @@ class InputMskKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
|
|
|
226
233
|
disabled: NotRequired[bool]
|
|
227
234
|
schema_registry_url: NotRequired[str]
|
|
228
235
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
236
|
+
schema_type: NotRequired[InputMskSchemaType]
|
|
237
|
+
r"""The schema format used to encode and decode event data"""
|
|
229
238
|
connection_timeout: NotRequired[float]
|
|
230
239
|
r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
|
|
231
240
|
request_timeout: NotRequired[float]
|
|
@@ -245,6 +254,14 @@ class InputMskKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
245
254
|
] = "http://localhost:8081"
|
|
246
255
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
247
256
|
|
|
257
|
+
schema_type: Annotated[
|
|
258
|
+
Annotated[
|
|
259
|
+
Optional[InputMskSchemaType], PlainValidator(validate_open_enum(False))
|
|
260
|
+
],
|
|
261
|
+
pydantic.Field(alias="schemaType"),
|
|
262
|
+
] = InputMskSchemaType.AVRO
|
|
263
|
+
r"""The schema format used to encode and decode event data"""
|
|
264
|
+
|
|
248
265
|
connection_timeout: Annotated[
|
|
249
266
|
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
250
267
|
] = 30000
|
|
@@ -142,6 +142,8 @@ class InputSqsTypedDict(TypedDict):
|
|
|
142
142
|
type: InputSqsType
|
|
143
143
|
queue_name: str
|
|
144
144
|
r"""The name, URL, or ARN of the SQS queue to read events from. When a non-AWS URL is specified, format must be: '{url}/myQueueName'. Example: 'https://host:port/myQueueName'. Value must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can only be evaluated at init time. Example referencing a Global Variable: `https://host:port/myQueue-${C.vars.myVar}`."""
|
|
145
|
+
queue_type: InputSqsQueueType
|
|
146
|
+
r"""The queue type used (or created)"""
|
|
145
147
|
id: NotRequired[str]
|
|
146
148
|
r"""Unique ID for this input"""
|
|
147
149
|
disabled: NotRequired[bool]
|
|
@@ -158,8 +160,6 @@ class InputSqsTypedDict(TypedDict):
|
|
|
158
160
|
connections: NotRequired[List[InputSqsConnectionTypedDict]]
|
|
159
161
|
r"""Direct connections to Destinations, and optionally via a Pipeline or a Pack"""
|
|
160
162
|
pq: NotRequired[InputSqsPqTypedDict]
|
|
161
|
-
queue_type: NotRequired[InputSqsQueueType]
|
|
162
|
-
r"""The queue type used (or created)"""
|
|
163
163
|
aws_account_id: NotRequired[str]
|
|
164
164
|
r"""SQS queue owner's AWS account ID. Leave empty if SQS queue is in same AWS account."""
|
|
165
165
|
create_queue: NotRequired[bool]
|
|
@@ -207,6 +207,12 @@ class InputSqs(BaseModel):
|
|
|
207
207
|
queue_name: Annotated[str, pydantic.Field(alias="queueName")]
|
|
208
208
|
r"""The name, URL, or ARN of the SQS queue to read events from. When a non-AWS URL is specified, format must be: '{url}/myQueueName'. Example: 'https://host:port/myQueueName'. Value must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can only be evaluated at init time. Example referencing a Global Variable: `https://host:port/myQueue-${C.vars.myVar}`."""
|
|
209
209
|
|
|
210
|
+
queue_type: Annotated[
|
|
211
|
+
Annotated[InputSqsQueueType, PlainValidator(validate_open_enum(False))],
|
|
212
|
+
pydantic.Field(alias="queueType"),
|
|
213
|
+
]
|
|
214
|
+
r"""The queue type used (or created)"""
|
|
215
|
+
|
|
210
216
|
id: Optional[str] = None
|
|
211
217
|
r"""Unique ID for this input"""
|
|
212
218
|
|
|
@@ -234,14 +240,6 @@ class InputSqs(BaseModel):
|
|
|
234
240
|
|
|
235
241
|
pq: Optional[InputSqsPq] = None
|
|
236
242
|
|
|
237
|
-
queue_type: Annotated[
|
|
238
|
-
Annotated[
|
|
239
|
-
Optional[InputSqsQueueType], PlainValidator(validate_open_enum(False))
|
|
240
|
-
],
|
|
241
|
-
pydantic.Field(alias="queueType"),
|
|
242
|
-
] = InputSqsQueueType.STANDARD
|
|
243
|
-
r"""The queue type used (or created)"""
|
|
244
|
-
|
|
245
243
|
aws_account_id: Annotated[Optional[str], pydantic.Field(alias="awsAccountId")] = (
|
|
246
244
|
None
|
|
247
245
|
)
|
|
@@ -19,7 +19,6 @@ class NodeProvidedInfoTags(BaseModel):
|
|
|
19
19
|
|
|
20
20
|
class NodeProvidedInfoAwsTypedDict(TypedDict):
|
|
21
21
|
enabled: bool
|
|
22
|
-
instance_id: str
|
|
23
22
|
region: str
|
|
24
23
|
type: str
|
|
25
24
|
zone: str
|
|
@@ -29,8 +28,6 @@ class NodeProvidedInfoAwsTypedDict(TypedDict):
|
|
|
29
28
|
class NodeProvidedInfoAws(BaseModel):
|
|
30
29
|
enabled: bool
|
|
31
30
|
|
|
32
|
-
instance_id: Annotated[str, pydantic.Field(alias="instanceId")]
|
|
33
|
-
|
|
34
31
|
region: str
|
|
35
32
|
|
|
36
33
|
type: str
|
|
@@ -8,6 +8,7 @@ from .outputazuredataexplorer import (
|
|
|
8
8
|
)
|
|
9
9
|
from .outputazureeventhub import OutputAzureEventhub, OutputAzureEventhubTypedDict
|
|
10
10
|
from .outputazurelogs import OutputAzureLogs, OutputAzureLogsTypedDict
|
|
11
|
+
from .outputchronicle import OutputChronicle, OutputChronicleTypedDict
|
|
11
12
|
from .outputclickhouse import OutputClickHouse, OutputClickHouseTypedDict
|
|
12
13
|
from .outputcloudwatch import OutputCloudwatch, OutputCloudwatchTypedDict
|
|
13
14
|
from .outputconfluentcloud import OutputConfluentCloud, OutputConfluentCloudTypedDict
|
|
@@ -18,7 +19,6 @@ from .outputcrowdstrikenextgensiem import (
|
|
|
18
19
|
OutputCrowdstrikeNextGenSiem,
|
|
19
20
|
OutputCrowdstrikeNextGenSiemTypedDict,
|
|
20
21
|
)
|
|
21
|
-
from .outputdatabricks import OutputDatabricks, OutputDatabricksTypedDict
|
|
22
22
|
from .outputdatadog import OutputDatadog, OutputDatadogTypedDict
|
|
23
23
|
from .outputdataset import OutputDataset, OutputDatasetTypedDict
|
|
24
24
|
from .outputdefault import OutputDefault, OutputDefaultTypedDict
|
|
@@ -130,6 +130,7 @@ OutputTypedDict = TypeAliasType(
|
|
|
130
130
|
OutputSplunkHecTypedDict,
|
|
131
131
|
OutputDynatraceHTTPTypedDict,
|
|
132
132
|
OutputServiceNowTypedDict,
|
|
133
|
+
OutputChronicleTypedDict,
|
|
133
134
|
OutputDynatraceOtlpTypedDict,
|
|
134
135
|
OutputElasticTypedDict,
|
|
135
136
|
OutputGoogleChronicleTypedDict,
|
|
@@ -138,7 +139,6 @@ OutputTypedDict = TypeAliasType(
|
|
|
138
139
|
OutputPrometheusTypedDict,
|
|
139
140
|
OutputMskTypedDict,
|
|
140
141
|
OutputSentinelOneAiSiemTypedDict,
|
|
141
|
-
OutputDatabricksTypedDict,
|
|
142
142
|
OutputSentinelTypedDict,
|
|
143
143
|
OutputInfluxdbTypedDict,
|
|
144
144
|
OutputGoogleCloudStorageTypedDict,
|
|
@@ -202,6 +202,7 @@ Output = TypeAliasType(
|
|
|
202
202
|
OutputSplunkHec,
|
|
203
203
|
OutputDynatraceHTTP,
|
|
204
204
|
OutputServiceNow,
|
|
205
|
+
OutputChronicle,
|
|
205
206
|
OutputDynatraceOtlp,
|
|
206
207
|
OutputElastic,
|
|
207
208
|
OutputGoogleChronicle,
|
|
@@ -210,7 +211,6 @@ Output = TypeAliasType(
|
|
|
210
211
|
OutputPrometheus,
|
|
211
212
|
OutputMsk,
|
|
212
213
|
OutputSentinelOneAiSiem,
|
|
213
|
-
OutputDatabricks,
|
|
214
214
|
OutputSentinel,
|
|
215
215
|
OutputInfluxdb,
|
|
216
216
|
OutputGoogleCloudStorage,
|
|
@@ -0,0 +1,431 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
|
+
from cribl_control_plane.types import BaseModel
|
|
6
|
+
from cribl_control_plane.utils import validate_open_enum
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import pydantic
|
|
9
|
+
from pydantic.functional_validators import PlainValidator
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OutputChronicleType(str, Enum):
|
|
15
|
+
CHRONICLE = "chronicle"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OutputChronicleAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
19
|
+
SERVICE_ACCOUNT = "serviceAccount"
|
|
20
|
+
SERVICE_ACCOUNT_SECRET = "serviceAccountSecret"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class OutputChronicleResponseRetrySettingTypedDict(TypedDict):
|
|
24
|
+
http_status: float
|
|
25
|
+
r"""The HTTP response status code that will trigger retries"""
|
|
26
|
+
initial_backoff: NotRequired[float]
|
|
27
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
28
|
+
backoff_rate: NotRequired[float]
|
|
29
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
30
|
+
max_backoff: NotRequired[float]
|
|
31
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class OutputChronicleResponseRetrySetting(BaseModel):
|
|
35
|
+
http_status: Annotated[float, pydantic.Field(alias="httpStatus")]
|
|
36
|
+
r"""The HTTP response status code that will trigger retries"""
|
|
37
|
+
|
|
38
|
+
initial_backoff: Annotated[
|
|
39
|
+
Optional[float], pydantic.Field(alias="initialBackoff")
|
|
40
|
+
] = 1000
|
|
41
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
42
|
+
|
|
43
|
+
backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
|
|
44
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
45
|
+
|
|
46
|
+
max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
|
|
47
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class OutputChronicleTimeoutRetrySettingsTypedDict(TypedDict):
|
|
51
|
+
timeout_retry: NotRequired[bool]
|
|
52
|
+
initial_backoff: NotRequired[float]
|
|
53
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
54
|
+
backoff_rate: NotRequired[float]
|
|
55
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
56
|
+
max_backoff: NotRequired[float]
|
|
57
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class OutputChronicleTimeoutRetrySettings(BaseModel):
|
|
61
|
+
timeout_retry: Annotated[Optional[bool], pydantic.Field(alias="timeoutRetry")] = (
|
|
62
|
+
False
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
initial_backoff: Annotated[
|
|
66
|
+
Optional[float], pydantic.Field(alias="initialBackoff")
|
|
67
|
+
] = 1000
|
|
68
|
+
r"""How long, in milliseconds, Cribl Stream should wait before initiating backoff. Maximum interval is 600,000 ms (10 minutes)."""
|
|
69
|
+
|
|
70
|
+
backoff_rate: Annotated[Optional[float], pydantic.Field(alias="backoffRate")] = 2
|
|
71
|
+
r"""Base for exponential backoff. A value of 2 (default) means Cribl Stream will retry after 2 seconds, then 4 seconds, then 8 seconds, etc."""
|
|
72
|
+
|
|
73
|
+
max_backoff: Annotated[Optional[float], pydantic.Field(alias="maxBackoff")] = 10000
|
|
74
|
+
r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class OutputChronicleExtraHTTPHeaderTypedDict(TypedDict):
|
|
78
|
+
value: str
|
|
79
|
+
name: NotRequired[str]
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class OutputChronicleExtraHTTPHeader(BaseModel):
|
|
83
|
+
value: str
|
|
84
|
+
|
|
85
|
+
name: Optional[str] = None
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class OutputChronicleFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
89
|
+
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
90
|
+
|
|
91
|
+
PAYLOAD = "payload"
|
|
92
|
+
PAYLOAD_AND_HEADERS = "payloadAndHeaders"
|
|
93
|
+
NONE = "none"
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class OutputChronicleBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
97
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
98
|
+
|
|
99
|
+
BLOCK = "block"
|
|
100
|
+
DROP = "drop"
|
|
101
|
+
QUEUE = "queue"
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class OutputChronicleCustomLabelTypedDict(TypedDict):
|
|
105
|
+
key: str
|
|
106
|
+
value: str
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class OutputChronicleCustomLabel(BaseModel):
|
|
110
|
+
key: str
|
|
111
|
+
|
|
112
|
+
value: str
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class OutputChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
116
|
+
r"""Codec to use to compress the persisted data"""
|
|
117
|
+
|
|
118
|
+
NONE = "none"
|
|
119
|
+
GZIP = "gzip"
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class OutputChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
123
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
124
|
+
|
|
125
|
+
BLOCK = "block"
|
|
126
|
+
DROP = "drop"
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class OutputChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
130
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
131
|
+
|
|
132
|
+
ERROR = "error"
|
|
133
|
+
BACKPRESSURE = "backpressure"
|
|
134
|
+
ALWAYS = "always"
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class OutputChroniclePqControlsTypedDict(TypedDict):
|
|
138
|
+
pass
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class OutputChroniclePqControls(BaseModel):
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class OutputChronicleTypedDict(TypedDict):
|
|
146
|
+
type: OutputChronicleType
|
|
147
|
+
region: str
|
|
148
|
+
r"""Regional endpoint to send events to"""
|
|
149
|
+
log_type: str
|
|
150
|
+
r"""Default log type value to send to SecOps. Can be overwritten by event field __logType."""
|
|
151
|
+
gcp_project_id: str
|
|
152
|
+
r"""The Google Cloud Platform (GCP) project ID to send events to"""
|
|
153
|
+
gcp_instance: str
|
|
154
|
+
r"""The Google Cloud Platform (GCP) instance to send events to. This is the Chronicle customer uuid."""
|
|
155
|
+
id: NotRequired[str]
|
|
156
|
+
r"""Unique ID for this output"""
|
|
157
|
+
pipeline: NotRequired[str]
|
|
158
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
159
|
+
system_fields: NotRequired[List[str]]
|
|
160
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
161
|
+
environment: NotRequired[str]
|
|
162
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
163
|
+
streamtags: NotRequired[List[str]]
|
|
164
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
165
|
+
api_version: NotRequired[str]
|
|
166
|
+
authentication_method: NotRequired[OutputChronicleAuthenticationMethod]
|
|
167
|
+
response_retry_settings: NotRequired[
|
|
168
|
+
List[OutputChronicleResponseRetrySettingTypedDict]
|
|
169
|
+
]
|
|
170
|
+
r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
|
|
171
|
+
timeout_retry_settings: NotRequired[OutputChronicleTimeoutRetrySettingsTypedDict]
|
|
172
|
+
response_honor_retry_after_header: NotRequired[bool]
|
|
173
|
+
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
174
|
+
concurrency: NotRequired[float]
|
|
175
|
+
r"""Maximum number of ongoing requests before blocking"""
|
|
176
|
+
max_payload_size_kb: NotRequired[float]
|
|
177
|
+
r"""Maximum size, in KB, of the request body"""
|
|
178
|
+
max_payload_events: NotRequired[float]
|
|
179
|
+
r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
|
|
180
|
+
compress: NotRequired[bool]
|
|
181
|
+
r"""Compress the payload body before sending"""
|
|
182
|
+
reject_unauthorized: NotRequired[bool]
|
|
183
|
+
r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
|
|
184
|
+
Enabled by default. When this setting is also present in TLS Settings (Client Side),
|
|
185
|
+
that value will take precedence.
|
|
186
|
+
"""
|
|
187
|
+
timeout_sec: NotRequired[float]
|
|
188
|
+
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
189
|
+
flush_period_sec: NotRequired[float]
|
|
190
|
+
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
191
|
+
extra_http_headers: NotRequired[List[OutputChronicleExtraHTTPHeaderTypedDict]]
|
|
192
|
+
r"""Headers to add to all events"""
|
|
193
|
+
failed_request_logging_mode: NotRequired[OutputChronicleFailedRequestLoggingMode]
|
|
194
|
+
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
195
|
+
safe_headers: NotRequired[List[str]]
|
|
196
|
+
r"""List of headers that are safe to log in plain text"""
|
|
197
|
+
use_round_robin_dns: NotRequired[bool]
|
|
198
|
+
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
|
|
199
|
+
on_backpressure: NotRequired[OutputChronicleBackpressureBehavior]
|
|
200
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
201
|
+
total_memory_limit_kb: NotRequired[float]
|
|
202
|
+
r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
|
|
203
|
+
ingestion_method: NotRequired[str]
|
|
204
|
+
namespace: NotRequired[str]
|
|
205
|
+
r"""User-configured environment namespace to identify the data domain the logs originated from. This namespace is used as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
|
|
206
|
+
log_text_field: NotRequired[str]
|
|
207
|
+
r"""Name of the event field that contains the log text to send. If not specified, Stream sends a JSON representation of the whole event."""
|
|
208
|
+
custom_labels: NotRequired[List[OutputChronicleCustomLabelTypedDict]]
|
|
209
|
+
r"""Custom labels to be added to every event"""
|
|
210
|
+
description: NotRequired[str]
|
|
211
|
+
service_account_credentials: NotRequired[str]
|
|
212
|
+
r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
|
|
213
|
+
service_account_credentials_secret: NotRequired[str]
|
|
214
|
+
r"""Select or create a stored text secret"""
|
|
215
|
+
pq_max_file_size: NotRequired[str]
|
|
216
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
217
|
+
pq_max_size: NotRequired[str]
|
|
218
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
219
|
+
pq_path: NotRequired[str]
|
|
220
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
221
|
+
pq_compress: NotRequired[OutputChronicleCompression]
|
|
222
|
+
r"""Codec to use to compress the persisted data"""
|
|
223
|
+
pq_on_backpressure: NotRequired[OutputChronicleQueueFullBehavior]
|
|
224
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
225
|
+
pq_mode: NotRequired[OutputChronicleMode]
|
|
226
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
227
|
+
pq_controls: NotRequired[OutputChroniclePqControlsTypedDict]
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
class OutputChronicle(BaseModel):
|
|
231
|
+
type: OutputChronicleType
|
|
232
|
+
|
|
233
|
+
region: str
|
|
234
|
+
r"""Regional endpoint to send events to"""
|
|
235
|
+
|
|
236
|
+
log_type: Annotated[str, pydantic.Field(alias="logType")]
|
|
237
|
+
r"""Default log type value to send to SecOps. Can be overwritten by event field __logType."""
|
|
238
|
+
|
|
239
|
+
gcp_project_id: Annotated[str, pydantic.Field(alias="gcpProjectId")]
|
|
240
|
+
r"""The Google Cloud Platform (GCP) project ID to send events to"""
|
|
241
|
+
|
|
242
|
+
gcp_instance: Annotated[str, pydantic.Field(alias="gcpInstance")]
|
|
243
|
+
r"""The Google Cloud Platform (GCP) instance to send events to. This is the Chronicle customer uuid."""
|
|
244
|
+
|
|
245
|
+
id: Optional[str] = None
|
|
246
|
+
r"""Unique ID for this output"""
|
|
247
|
+
|
|
248
|
+
pipeline: Optional[str] = None
|
|
249
|
+
r"""Pipeline to process data before sending out to this output"""
|
|
250
|
+
|
|
251
|
+
system_fields: Annotated[
|
|
252
|
+
Optional[List[str]], pydantic.Field(alias="systemFields")
|
|
253
|
+
] = None
|
|
254
|
+
r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
|
|
255
|
+
|
|
256
|
+
environment: Optional[str] = None
|
|
257
|
+
r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
|
|
258
|
+
|
|
259
|
+
streamtags: Optional[List[str]] = None
|
|
260
|
+
r"""Tags for filtering and grouping in @{product}"""
|
|
261
|
+
|
|
262
|
+
api_version: Annotated[Optional[str], pydantic.Field(alias="apiVersion")] = (
|
|
263
|
+
"v1alpha"
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
authentication_method: Annotated[
|
|
267
|
+
Annotated[
|
|
268
|
+
Optional[OutputChronicleAuthenticationMethod],
|
|
269
|
+
PlainValidator(validate_open_enum(False)),
|
|
270
|
+
],
|
|
271
|
+
pydantic.Field(alias="authenticationMethod"),
|
|
272
|
+
] = OutputChronicleAuthenticationMethod.SERVICE_ACCOUNT
|
|
273
|
+
|
|
274
|
+
response_retry_settings: Annotated[
|
|
275
|
+
Optional[List[OutputChronicleResponseRetrySetting]],
|
|
276
|
+
pydantic.Field(alias="responseRetrySettings"),
|
|
277
|
+
] = None
|
|
278
|
+
r"""Automatically retry after unsuccessful response status codes, such as 429 (Too Many Requests) or 503 (Service Unavailable)"""
|
|
279
|
+
|
|
280
|
+
timeout_retry_settings: Annotated[
|
|
281
|
+
Optional[OutputChronicleTimeoutRetrySettings],
|
|
282
|
+
pydantic.Field(alias="timeoutRetrySettings"),
|
|
283
|
+
] = None
|
|
284
|
+
|
|
285
|
+
response_honor_retry_after_header: Annotated[
|
|
286
|
+
Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
|
|
287
|
+
] = True
|
|
288
|
+
r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
|
|
289
|
+
|
|
290
|
+
concurrency: Optional[float] = 5
|
|
291
|
+
r"""Maximum number of ongoing requests before blocking"""
|
|
292
|
+
|
|
293
|
+
max_payload_size_kb: Annotated[
|
|
294
|
+
Optional[float], pydantic.Field(alias="maxPayloadSizeKB")
|
|
295
|
+
] = 1024
|
|
296
|
+
r"""Maximum size, in KB, of the request body"""
|
|
297
|
+
|
|
298
|
+
max_payload_events: Annotated[
|
|
299
|
+
Optional[float], pydantic.Field(alias="maxPayloadEvents")
|
|
300
|
+
] = 0
|
|
301
|
+
r"""Maximum number of events to include in the request body. Default is 0 (unlimited)."""
|
|
302
|
+
|
|
303
|
+
compress: Optional[bool] = True
|
|
304
|
+
r"""Compress the payload body before sending"""
|
|
305
|
+
|
|
306
|
+
reject_unauthorized: Annotated[
|
|
307
|
+
Optional[bool], pydantic.Field(alias="rejectUnauthorized")
|
|
308
|
+
] = True
|
|
309
|
+
r"""Reject certificates not authorized by a CA in the CA certificate path or by another trusted CA (such as the system's).
|
|
310
|
+
Enabled by default. When this setting is also present in TLS Settings (Client Side),
|
|
311
|
+
that value will take precedence.
|
|
312
|
+
"""
|
|
313
|
+
|
|
314
|
+
timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 90
|
|
315
|
+
r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
|
|
316
|
+
|
|
317
|
+
flush_period_sec: Annotated[
|
|
318
|
+
Optional[float], pydantic.Field(alias="flushPeriodSec")
|
|
319
|
+
] = 1
|
|
320
|
+
r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
|
|
321
|
+
|
|
322
|
+
extra_http_headers: Annotated[
|
|
323
|
+
Optional[List[OutputChronicleExtraHTTPHeader]],
|
|
324
|
+
pydantic.Field(alias="extraHttpHeaders"),
|
|
325
|
+
] = None
|
|
326
|
+
r"""Headers to add to all events"""
|
|
327
|
+
|
|
328
|
+
failed_request_logging_mode: Annotated[
|
|
329
|
+
Annotated[
|
|
330
|
+
Optional[OutputChronicleFailedRequestLoggingMode],
|
|
331
|
+
PlainValidator(validate_open_enum(False)),
|
|
332
|
+
],
|
|
333
|
+
pydantic.Field(alias="failedRequestLoggingMode"),
|
|
334
|
+
] = OutputChronicleFailedRequestLoggingMode.NONE
|
|
335
|
+
r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
|
|
336
|
+
|
|
337
|
+
safe_headers: Annotated[
|
|
338
|
+
Optional[List[str]], pydantic.Field(alias="safeHeaders")
|
|
339
|
+
] = None
|
|
340
|
+
r"""List of headers that are safe to log in plain text"""
|
|
341
|
+
|
|
342
|
+
use_round_robin_dns: Annotated[
|
|
343
|
+
Optional[bool], pydantic.Field(alias="useRoundRobinDns")
|
|
344
|
+
] = False
|
|
345
|
+
r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
|
|
346
|
+
|
|
347
|
+
on_backpressure: Annotated[
|
|
348
|
+
Annotated[
|
|
349
|
+
Optional[OutputChronicleBackpressureBehavior],
|
|
350
|
+
PlainValidator(validate_open_enum(False)),
|
|
351
|
+
],
|
|
352
|
+
pydantic.Field(alias="onBackpressure"),
|
|
353
|
+
] = OutputChronicleBackpressureBehavior.BLOCK
|
|
354
|
+
r"""How to handle events when all receivers are exerting backpressure"""
|
|
355
|
+
|
|
356
|
+
total_memory_limit_kb: Annotated[
|
|
357
|
+
Optional[float], pydantic.Field(alias="totalMemoryLimitKB")
|
|
358
|
+
] = None
|
|
359
|
+
r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
|
|
360
|
+
|
|
361
|
+
ingestion_method: Annotated[
|
|
362
|
+
Optional[str], pydantic.Field(alias="ingestionMethod")
|
|
363
|
+
] = "ImportLogs"
|
|
364
|
+
|
|
365
|
+
namespace: Optional[str] = None
|
|
366
|
+
r"""User-configured environment namespace to identify the data domain the logs originated from. This namespace is used as a tag to identify the appropriate data domain for indexing and enrichment functionality. Can be overwritten by event field __namespace."""
|
|
367
|
+
|
|
368
|
+
log_text_field: Annotated[Optional[str], pydantic.Field(alias="logTextField")] = (
|
|
369
|
+
None
|
|
370
|
+
)
|
|
371
|
+
r"""Name of the event field that contains the log text to send. If not specified, Stream sends a JSON representation of the whole event."""
|
|
372
|
+
|
|
373
|
+
custom_labels: Annotated[
|
|
374
|
+
Optional[List[OutputChronicleCustomLabel]], pydantic.Field(alias="customLabels")
|
|
375
|
+
] = None
|
|
376
|
+
r"""Custom labels to be added to every event"""
|
|
377
|
+
|
|
378
|
+
description: Optional[str] = None
|
|
379
|
+
|
|
380
|
+
service_account_credentials: Annotated[
|
|
381
|
+
Optional[str], pydantic.Field(alias="serviceAccountCredentials")
|
|
382
|
+
] = None
|
|
383
|
+
r"""Contents of service account credentials (JSON keys) file downloaded from Google Cloud. To upload a file, click the upload button at this field's upper right."""
|
|
384
|
+
|
|
385
|
+
service_account_credentials_secret: Annotated[
|
|
386
|
+
Optional[str], pydantic.Field(alias="serviceAccountCredentialsSecret")
|
|
387
|
+
] = None
|
|
388
|
+
r"""Select or create a stored text secret"""
|
|
389
|
+
|
|
390
|
+
pq_max_file_size: Annotated[
|
|
391
|
+
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
392
|
+
] = "1 MB"
|
|
393
|
+
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
394
|
+
|
|
395
|
+
pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
|
|
396
|
+
r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
|
|
397
|
+
|
|
398
|
+
pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
|
|
399
|
+
"$CRIBL_HOME/state/queues"
|
|
400
|
+
)
|
|
401
|
+
r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
|
|
402
|
+
|
|
403
|
+
pq_compress: Annotated[
|
|
404
|
+
Annotated[
|
|
405
|
+
Optional[OutputChronicleCompression],
|
|
406
|
+
PlainValidator(validate_open_enum(False)),
|
|
407
|
+
],
|
|
408
|
+
pydantic.Field(alias="pqCompress"),
|
|
409
|
+
] = OutputChronicleCompression.NONE
|
|
410
|
+
r"""Codec to use to compress the persisted data"""
|
|
411
|
+
|
|
412
|
+
pq_on_backpressure: Annotated[
|
|
413
|
+
Annotated[
|
|
414
|
+
Optional[OutputChronicleQueueFullBehavior],
|
|
415
|
+
PlainValidator(validate_open_enum(False)),
|
|
416
|
+
],
|
|
417
|
+
pydantic.Field(alias="pqOnBackpressure"),
|
|
418
|
+
] = OutputChronicleQueueFullBehavior.BLOCK
|
|
419
|
+
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
420
|
+
|
|
421
|
+
pq_mode: Annotated[
|
|
422
|
+
Annotated[
|
|
423
|
+
Optional[OutputChronicleMode], PlainValidator(validate_open_enum(False))
|
|
424
|
+
],
|
|
425
|
+
pydantic.Field(alias="pqMode"),
|
|
426
|
+
] = OutputChronicleMode.ERROR
|
|
427
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
428
|
+
|
|
429
|
+
pq_controls: Annotated[
|
|
430
|
+
Optional[OutputChroniclePqControls], pydantic.Field(alias="pqControls")
|
|
431
|
+
] = None
|