cribl-control-plane 0.2.1rc7__py3-none-any.whl → 0.3.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cribl-control-plane might be problematic. Click here for more details.
- cribl_control_plane/_version.py +4 -4
- cribl_control_plane/errors/__init__.py +5 -8
- cribl_control_plane/errors/{healthserverstatus_error.py → healthstatus_error.py} +9 -10
- cribl_control_plane/groups_sdk.py +28 -52
- cribl_control_plane/health.py +16 -22
- cribl_control_plane/models/__init__.py +54 -217
- cribl_control_plane/models/appmode.py +14 -0
- cribl_control_plane/models/authtoken.py +1 -5
- cribl_control_plane/models/cacheconnection.py +0 -20
- cribl_control_plane/models/configgroup.py +7 -55
- cribl_control_plane/models/configgroupcloud.py +1 -11
- cribl_control_plane/models/createconfiggroupbyproductop.py +5 -17
- cribl_control_plane/models/createroutesappendbyidop.py +2 -2
- cribl_control_plane/models/createversionundoop.py +3 -3
- cribl_control_plane/models/cribllakedataset.py +1 -11
- cribl_control_plane/models/cribllakedatasetupdate.py +1 -11
- cribl_control_plane/models/datasetmetadata.py +1 -11
- cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
- cribl_control_plane/models/distributedsummary.py +0 -6
- cribl_control_plane/models/error.py +16 -0
- cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +0 -20
- cribl_control_plane/models/getconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +0 -11
- cribl_control_plane/models/gethealthinfoop.py +17 -0
- cribl_control_plane/models/getsummaryop.py +0 -11
- cribl_control_plane/models/hbcriblinfo.py +3 -24
- cribl_control_plane/models/{healthserverstatus.py → healthstatus.py} +8 -27
- cribl_control_plane/models/heartbeatmetadata.py +0 -3
- cribl_control_plane/models/input.py +78 -80
- cribl_control_plane/models/inputappscope.py +17 -80
- cribl_control_plane/models/inputazureblob.py +1 -33
- cribl_control_plane/models/inputcollection.py +1 -24
- cribl_control_plane/models/inputconfluentcloud.py +18 -195
- cribl_control_plane/models/inputcribl.py +1 -24
- cribl_control_plane/models/inputcriblhttp.py +17 -62
- cribl_control_plane/models/inputcribllakehttp.py +17 -62
- cribl_control_plane/models/inputcriblmetrics.py +1 -24
- cribl_control_plane/models/inputcribltcp.py +17 -62
- cribl_control_plane/models/inputcrowdstrike.py +1 -54
- cribl_control_plane/models/inputdatadogagent.py +17 -62
- cribl_control_plane/models/inputdatagen.py +1 -24
- cribl_control_plane/models/inputedgeprometheus.py +34 -147
- cribl_control_plane/models/inputelastic.py +27 -119
- cribl_control_plane/models/inputeventhub.py +1 -182
- cribl_control_plane/models/inputexec.py +1 -33
- cribl_control_plane/models/inputfile.py +3 -42
- cribl_control_plane/models/inputfirehose.py +17 -62
- cribl_control_plane/models/inputgooglepubsub.py +1 -36
- cribl_control_plane/models/inputgrafana.py +32 -157
- cribl_control_plane/models/inputhttp.py +17 -62
- cribl_control_plane/models/inputhttpraw.py +17 -62
- cribl_control_plane/models/inputjournalfiles.py +1 -24
- cribl_control_plane/models/inputkafka.py +17 -189
- cribl_control_plane/models/inputkinesis.py +1 -80
- cribl_control_plane/models/inputkubeevents.py +1 -24
- cribl_control_plane/models/inputkubelogs.py +1 -33
- cribl_control_plane/models/inputkubemetrics.py +1 -33
- cribl_control_plane/models/inputloki.py +17 -71
- cribl_control_plane/models/inputmetrics.py +17 -62
- cribl_control_plane/models/inputmodeldriventelemetry.py +17 -62
- cribl_control_plane/models/inputmsk.py +18 -81
- cribl_control_plane/models/inputnetflow.py +1 -24
- cribl_control_plane/models/inputoffice365mgmt.py +1 -67
- cribl_control_plane/models/inputoffice365msgtrace.py +1 -67
- cribl_control_plane/models/inputoffice365service.py +1 -67
- cribl_control_plane/models/inputopentelemetry.py +16 -92
- cribl_control_plane/models/inputprometheus.py +34 -138
- cribl_control_plane/models/inputprometheusrw.py +17 -71
- cribl_control_plane/models/inputrawudp.py +1 -24
- cribl_control_plane/models/inputs3.py +1 -45
- cribl_control_plane/models/inputs3inventory.py +1 -54
- cribl_control_plane/models/inputsecuritylake.py +1 -54
- cribl_control_plane/models/inputsnmp.py +1 -40
- cribl_control_plane/models/inputsplunk.py +17 -85
- cribl_control_plane/models/inputsplunkhec.py +16 -70
- cribl_control_plane/models/inputsplunksearch.py +1 -63
- cribl_control_plane/models/inputsqs.py +1 -56
- cribl_control_plane/models/inputsyslog.py +32 -121
- cribl_control_plane/models/inputsystemmetrics.py +9 -142
- cribl_control_plane/models/inputsystemstate.py +1 -33
- cribl_control_plane/models/inputtcp.py +17 -81
- cribl_control_plane/models/inputtcpjson.py +17 -71
- cribl_control_plane/models/inputwef.py +1 -71
- cribl_control_plane/models/inputwindowsmetrics.py +9 -129
- cribl_control_plane/models/inputwineventlogs.py +1 -60
- cribl_control_plane/models/inputwiz.py +1 -45
- cribl_control_plane/models/inputwizwebhook.py +17 -62
- cribl_control_plane/models/inputzscalerhec.py +16 -70
- cribl_control_plane/models/jobinfo.py +1 -4
- cribl_control_plane/models/jobstatus.py +3 -34
- cribl_control_plane/models/listconfiggroupbyproductop.py +0 -11
- cribl_control_plane/models/logininfo.py +3 -3
- cribl_control_plane/models/masterworkerentry.py +1 -11
- cribl_control_plane/models/nodeprovidedinfo.py +1 -11
- cribl_control_plane/models/nodeupgradestatus.py +0 -38
- cribl_control_plane/models/output.py +88 -93
- cribl_control_plane/models/outputazureblob.py +1 -110
- cribl_control_plane/models/outputazuredataexplorer.py +87 -452
- cribl_control_plane/models/outputazureeventhub.py +19 -281
- cribl_control_plane/models/outputazurelogs.py +19 -115
- cribl_control_plane/models/outputchronicle.py +19 -115
- cribl_control_plane/models/outputclickhouse.py +19 -155
- cribl_control_plane/models/outputcloudwatch.py +19 -106
- cribl_control_plane/models/outputconfluentcloud.py +38 -311
- cribl_control_plane/models/outputcriblhttp.py +19 -135
- cribl_control_plane/models/outputcribllake.py +1 -97
- cribl_control_plane/models/outputcribltcp.py +19 -132
- cribl_control_plane/models/outputcrowdstrikenextgensiem.py +20 -129
- cribl_control_plane/models/outputdatadog.py +19 -159
- cribl_control_plane/models/outputdataset.py +19 -143
- cribl_control_plane/models/outputdiskspool.py +1 -11
- cribl_control_plane/models/outputdls3.py +1 -152
- cribl_control_plane/models/outputdynatracehttp.py +19 -160
- cribl_control_plane/models/outputdynatraceotlp.py +19 -160
- cribl_control_plane/models/outputelastic.py +19 -163
- cribl_control_plane/models/outputelasticcloud.py +19 -140
- cribl_control_plane/models/outputexabeam.py +1 -61
- cribl_control_plane/models/outputfilesystem.py +1 -87
- cribl_control_plane/models/outputgooglechronicle.py +20 -166
- cribl_control_plane/models/outputgooglecloudlogging.py +20 -131
- cribl_control_plane/models/outputgooglecloudstorage.py +1 -136
- cribl_control_plane/models/outputgooglepubsub.py +19 -106
- cribl_control_plane/models/outputgrafanacloud.py +37 -288
- cribl_control_plane/models/outputgraphite.py +19 -105
- cribl_control_plane/models/outputhoneycomb.py +19 -115
- cribl_control_plane/models/outputhumiohec.py +19 -126
- cribl_control_plane/models/outputinfluxdb.py +19 -130
- cribl_control_plane/models/outputkafka.py +34 -302
- cribl_control_plane/models/outputkinesis.py +19 -133
- cribl_control_plane/models/outputloki.py +17 -129
- cribl_control_plane/models/outputminio.py +1 -145
- cribl_control_plane/models/outputmsk.py +34 -193
- cribl_control_plane/models/outputnewrelic.py +19 -136
- cribl_control_plane/models/outputnewrelicevents.py +20 -128
- cribl_control_plane/models/outputopentelemetry.py +19 -178
- cribl_control_plane/models/outputprometheus.py +19 -115
- cribl_control_plane/models/outputring.py +1 -31
- cribl_control_plane/models/outputs3.py +1 -152
- cribl_control_plane/models/outputsecuritylake.py +1 -114
- cribl_control_plane/models/outputsentinel.py +19 -135
- cribl_control_plane/models/outputsentineloneaisiem.py +20 -134
- cribl_control_plane/models/outputservicenow.py +19 -168
- cribl_control_plane/models/outputsignalfx.py +19 -115
- cribl_control_plane/models/outputsns.py +17 -113
- cribl_control_plane/models/outputsplunk.py +19 -153
- cribl_control_plane/models/outputsplunkhec.py +19 -208
- cribl_control_plane/models/outputsplunklb.py +19 -182
- cribl_control_plane/models/outputsqs.py +17 -124
- cribl_control_plane/models/outputstatsd.py +19 -105
- cribl_control_plane/models/outputstatsdext.py +19 -105
- cribl_control_plane/models/outputsumologic.py +19 -117
- cribl_control_plane/models/outputsyslog.py +96 -259
- cribl_control_plane/models/outputtcpjson.py +19 -141
- cribl_control_plane/models/outputwavefront.py +19 -115
- cribl_control_plane/models/outputwebhook.py +19 -161
- cribl_control_plane/models/outputxsiam.py +17 -113
- cribl_control_plane/models/packinfo.py +5 -8
- cribl_control_plane/models/packinstallinfo.py +5 -8
- cribl_control_plane/models/resourcepolicy.py +0 -11
- cribl_control_plane/models/{uploadpackresponse.py → routecloneconf.py} +4 -4
- cribl_control_plane/models/routeconf.py +4 -3
- cribl_control_plane/models/runnablejobcollection.py +9 -72
- cribl_control_plane/models/runnablejobexecutor.py +9 -32
- cribl_control_plane/models/runnablejobscheduledsearch.py +9 -23
- cribl_control_plane/models/updateconfiggroupbyproductandidop.py +0 -11
- cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +0 -11
- cribl_control_plane/packs.py +7 -202
- cribl_control_plane/routes_sdk.py +6 -6
- cribl_control_plane/tokens.py +15 -23
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/METADATA +9 -50
- cribl_control_plane-0.3.0a1.dist-info/RECORD +330 -0
- cribl_control_plane/models/groupcreaterequest.py +0 -171
- cribl_control_plane/models/outpostnodeinfo.py +0 -16
- cribl_control_plane/models/outputdatabricks.py +0 -482
- cribl_control_plane/models/updatepacksop.py +0 -25
- cribl_control_plane-0.2.1rc7.dist-info/RECORD +0 -331
- {cribl_control_plane-0.2.1rc7.dist-info → cribl_control_plane-0.3.0a1.dist-info}/WHEEL +0 -0
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -19,11 +18,8 @@ class OutputMinioType(str, Enum):
|
|
|
19
18
|
class OutputMinioAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
20
19
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
21
20
|
|
|
22
|
-
# Auto
|
|
23
21
|
AUTO = "auto"
|
|
24
|
-
# Manual
|
|
25
22
|
MANUAL = "manual"
|
|
26
|
-
# Secret Key pair
|
|
27
23
|
SECRET = "secret"
|
|
28
24
|
|
|
29
25
|
|
|
@@ -37,64 +33,47 @@ class OutputMinioSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
37
33
|
class OutputMinioObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
38
34
|
r"""Object ACL to assign to uploaded objects"""
|
|
39
35
|
|
|
40
|
-
# Private
|
|
41
36
|
PRIVATE = "private"
|
|
42
|
-
# Public Read Only
|
|
43
37
|
PUBLIC_READ = "public-read"
|
|
44
|
-
# Public Read/Write
|
|
45
38
|
PUBLIC_READ_WRITE = "public-read-write"
|
|
46
|
-
# Authenticated Read Only
|
|
47
39
|
AUTHENTICATED_READ = "authenticated-read"
|
|
48
|
-
# AWS EC2 AMI Read Only
|
|
49
40
|
AWS_EXEC_READ = "aws-exec-read"
|
|
50
|
-
# Bucket Owner Read Only
|
|
51
41
|
BUCKET_OWNER_READ = "bucket-owner-read"
|
|
52
|
-
# Bucket Owner Full Control
|
|
53
42
|
BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
|
|
54
43
|
|
|
55
44
|
|
|
56
45
|
class OutputMinioStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
57
46
|
r"""Storage class to select for uploaded objects"""
|
|
58
47
|
|
|
59
|
-
# Standard
|
|
60
48
|
STANDARD = "STANDARD"
|
|
61
|
-
# Reduced Redundancy Storage
|
|
62
49
|
REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY"
|
|
63
50
|
|
|
64
51
|
|
|
65
52
|
class ServerSideEncryption(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
66
53
|
r"""Server-side encryption for uploaded objects"""
|
|
67
54
|
|
|
68
|
-
# Amazon S3 Managed Key
|
|
69
55
|
AES256 = "AES256"
|
|
70
56
|
|
|
71
57
|
|
|
72
58
|
class OutputMinioDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
73
59
|
r"""Format of the output data"""
|
|
74
60
|
|
|
75
|
-
# JSON
|
|
76
61
|
JSON = "json"
|
|
77
|
-
# Raw
|
|
78
62
|
RAW = "raw"
|
|
79
|
-
# Parquet
|
|
80
63
|
PARQUET = "parquet"
|
|
81
64
|
|
|
82
65
|
|
|
83
66
|
class OutputMinioBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
84
67
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
85
68
|
|
|
86
|
-
# Block
|
|
87
69
|
BLOCK = "block"
|
|
88
|
-
# Drop
|
|
89
70
|
DROP = "drop"
|
|
90
71
|
|
|
91
72
|
|
|
92
73
|
class OutputMinioDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
93
74
|
r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
|
|
94
75
|
|
|
95
|
-
# Block
|
|
96
76
|
BLOCK = "block"
|
|
97
|
-
# Drop
|
|
98
77
|
DROP = "drop"
|
|
99
78
|
|
|
100
79
|
|
|
@@ -108,31 +87,23 @@ class OutputMinioCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
|
108
87
|
class OutputMinioCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
109
88
|
r"""Compression level to apply before moving files to final destination"""
|
|
110
89
|
|
|
111
|
-
# Best Speed
|
|
112
90
|
BEST_SPEED = "best_speed"
|
|
113
|
-
# Normal
|
|
114
91
|
NORMAL = "normal"
|
|
115
|
-
# Best Compression
|
|
116
92
|
BEST_COMPRESSION = "best_compression"
|
|
117
93
|
|
|
118
94
|
|
|
119
95
|
class OutputMinioParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
120
96
|
r"""Determines which data types are supported and how they are represented"""
|
|
121
97
|
|
|
122
|
-
# 1.0
|
|
123
98
|
PARQUET_1_0 = "PARQUET_1_0"
|
|
124
|
-
# 2.4
|
|
125
99
|
PARQUET_2_4 = "PARQUET_2_4"
|
|
126
|
-
# 2.6
|
|
127
100
|
PARQUET_2_6 = "PARQUET_2_6"
|
|
128
101
|
|
|
129
102
|
|
|
130
103
|
class OutputMinioDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
131
104
|
r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
|
|
132
105
|
|
|
133
|
-
# V1
|
|
134
106
|
DATA_PAGE_V1 = "DATA_PAGE_V1"
|
|
135
|
-
# V2
|
|
136
107
|
DATA_PAGE_V2 = "DATA_PAGE_V2"
|
|
137
108
|
|
|
138
109
|
|
|
@@ -230,8 +201,6 @@ class OutputMinioTypedDict(TypedDict):
|
|
|
230
201
|
r"""Compression level to apply before moving files to final destination"""
|
|
231
202
|
automatic_schema: NotRequired[bool]
|
|
232
203
|
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
233
|
-
parquet_schema: NotRequired[str]
|
|
234
|
-
r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
|
|
235
204
|
parquet_version: NotRequired[OutputMinioParquetVersion]
|
|
236
205
|
r"""Determines which data types are supported and how they are represented"""
|
|
237
206
|
parquet_data_page_version: NotRequired[OutputMinioDataPageVersion]
|
|
@@ -473,11 +442,6 @@ class OutputMinio(BaseModel):
|
|
|
473
442
|
] = False
|
|
474
443
|
r"""Automatically calculate the schema based on the events of each Parquet file generated"""
|
|
475
444
|
|
|
476
|
-
parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
|
|
477
|
-
None
|
|
478
|
-
)
|
|
479
|
-
r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
|
|
480
|
-
|
|
481
445
|
parquet_version: Annotated[
|
|
482
446
|
Annotated[
|
|
483
447
|
Optional[OutputMinioParquetVersion],
|
|
@@ -544,111 +508,3 @@ class OutputMinio(BaseModel):
|
|
|
544
508
|
|
|
545
509
|
max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
|
|
546
510
|
r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
|
|
547
|
-
|
|
548
|
-
@field_serializer("aws_authentication_method")
|
|
549
|
-
def serialize_aws_authentication_method(self, value):
|
|
550
|
-
if isinstance(value, str):
|
|
551
|
-
try:
|
|
552
|
-
return models.OutputMinioAuthenticationMethod(value)
|
|
553
|
-
except ValueError:
|
|
554
|
-
return value
|
|
555
|
-
return value
|
|
556
|
-
|
|
557
|
-
@field_serializer("signature_version")
|
|
558
|
-
def serialize_signature_version(self, value):
|
|
559
|
-
if isinstance(value, str):
|
|
560
|
-
try:
|
|
561
|
-
return models.OutputMinioSignatureVersion(value)
|
|
562
|
-
except ValueError:
|
|
563
|
-
return value
|
|
564
|
-
return value
|
|
565
|
-
|
|
566
|
-
@field_serializer("object_acl")
|
|
567
|
-
def serialize_object_acl(self, value):
|
|
568
|
-
if isinstance(value, str):
|
|
569
|
-
try:
|
|
570
|
-
return models.OutputMinioObjectACL(value)
|
|
571
|
-
except ValueError:
|
|
572
|
-
return value
|
|
573
|
-
return value
|
|
574
|
-
|
|
575
|
-
@field_serializer("storage_class")
|
|
576
|
-
def serialize_storage_class(self, value):
|
|
577
|
-
if isinstance(value, str):
|
|
578
|
-
try:
|
|
579
|
-
return models.OutputMinioStorageClass(value)
|
|
580
|
-
except ValueError:
|
|
581
|
-
return value
|
|
582
|
-
return value
|
|
583
|
-
|
|
584
|
-
@field_serializer("server_side_encryption")
|
|
585
|
-
def serialize_server_side_encryption(self, value):
|
|
586
|
-
if isinstance(value, str):
|
|
587
|
-
try:
|
|
588
|
-
return models.ServerSideEncryption(value)
|
|
589
|
-
except ValueError:
|
|
590
|
-
return value
|
|
591
|
-
return value
|
|
592
|
-
|
|
593
|
-
@field_serializer("format_")
|
|
594
|
-
def serialize_format_(self, value):
|
|
595
|
-
if isinstance(value, str):
|
|
596
|
-
try:
|
|
597
|
-
return models.OutputMinioDataFormat(value)
|
|
598
|
-
except ValueError:
|
|
599
|
-
return value
|
|
600
|
-
return value
|
|
601
|
-
|
|
602
|
-
@field_serializer("on_backpressure")
|
|
603
|
-
def serialize_on_backpressure(self, value):
|
|
604
|
-
if isinstance(value, str):
|
|
605
|
-
try:
|
|
606
|
-
return models.OutputMinioBackpressureBehavior(value)
|
|
607
|
-
except ValueError:
|
|
608
|
-
return value
|
|
609
|
-
return value
|
|
610
|
-
|
|
611
|
-
@field_serializer("on_disk_full_backpressure")
|
|
612
|
-
def serialize_on_disk_full_backpressure(self, value):
|
|
613
|
-
if isinstance(value, str):
|
|
614
|
-
try:
|
|
615
|
-
return models.OutputMinioDiskSpaceProtection(value)
|
|
616
|
-
except ValueError:
|
|
617
|
-
return value
|
|
618
|
-
return value
|
|
619
|
-
|
|
620
|
-
@field_serializer("compress")
|
|
621
|
-
def serialize_compress(self, value):
|
|
622
|
-
if isinstance(value, str):
|
|
623
|
-
try:
|
|
624
|
-
return models.OutputMinioCompression(value)
|
|
625
|
-
except ValueError:
|
|
626
|
-
return value
|
|
627
|
-
return value
|
|
628
|
-
|
|
629
|
-
@field_serializer("compression_level")
|
|
630
|
-
def serialize_compression_level(self, value):
|
|
631
|
-
if isinstance(value, str):
|
|
632
|
-
try:
|
|
633
|
-
return models.OutputMinioCompressionLevel(value)
|
|
634
|
-
except ValueError:
|
|
635
|
-
return value
|
|
636
|
-
return value
|
|
637
|
-
|
|
638
|
-
@field_serializer("parquet_version")
|
|
639
|
-
def serialize_parquet_version(self, value):
|
|
640
|
-
if isinstance(value, str):
|
|
641
|
-
try:
|
|
642
|
-
return models.OutputMinioParquetVersion(value)
|
|
643
|
-
except ValueError:
|
|
644
|
-
return value
|
|
645
|
-
return value
|
|
646
|
-
|
|
647
|
-
@field_serializer("parquet_data_page_version")
|
|
648
|
-
def serialize_parquet_data_page_version(self, value):
|
|
649
|
-
if isinstance(value, str):
|
|
650
|
-
try:
|
|
651
|
-
return models.OutputMinioDataPageVersion(value)
|
|
652
|
-
except ValueError:
|
|
653
|
-
return value
|
|
654
|
-
return value
|
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from cribl_control_plane import
|
|
4
|
+
from cribl_control_plane import utils
|
|
5
5
|
from cribl_control_plane.types import BaseModel
|
|
6
6
|
from cribl_control_plane.utils import validate_open_enum
|
|
7
7
|
from enum import Enum
|
|
8
8
|
import pydantic
|
|
9
|
-
from pydantic import field_serializer
|
|
10
9
|
from pydantic.functional_validators import PlainValidator
|
|
11
10
|
from typing import List, Optional
|
|
12
11
|
from typing_extensions import Annotated, NotRequired, TypedDict
|
|
@@ -19,38 +18,35 @@ class OutputMskType(str, Enum):
|
|
|
19
18
|
class OutputMskAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
|
|
20
19
|
r"""Control the number of required acknowledgments."""
|
|
21
20
|
|
|
22
|
-
# Leader
|
|
23
21
|
ONE = 1
|
|
24
|
-
# None
|
|
25
22
|
ZERO = 0
|
|
26
|
-
# All
|
|
27
23
|
MINUS_1 = -1
|
|
28
24
|
|
|
29
25
|
|
|
30
26
|
class OutputMskRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
31
27
|
r"""Format to use to serialize events before writing to Kafka."""
|
|
32
28
|
|
|
33
|
-
# JSON
|
|
34
29
|
JSON = "json"
|
|
35
|
-
# Field _raw
|
|
36
30
|
RAW = "raw"
|
|
37
|
-
# Protobuf
|
|
38
31
|
PROTOBUF = "protobuf"
|
|
39
32
|
|
|
40
33
|
|
|
41
34
|
class OutputMskCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
42
35
|
r"""Codec to use to compress the data before sending to Kafka"""
|
|
43
36
|
|
|
44
|
-
# None
|
|
45
37
|
NONE = "none"
|
|
46
|
-
# Gzip
|
|
47
38
|
GZIP = "gzip"
|
|
48
|
-
# Snappy
|
|
49
39
|
SNAPPY = "snappy"
|
|
50
|
-
# LZ4
|
|
51
40
|
LZ4 = "lz4"
|
|
52
41
|
|
|
53
42
|
|
|
43
|
+
class OutputMskSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
44
|
+
r"""The schema format used to encode and decode event data"""
|
|
45
|
+
|
|
46
|
+
AVRO = "avro"
|
|
47
|
+
JSON = "json"
|
|
48
|
+
|
|
49
|
+
|
|
54
50
|
class OutputMskAuthTypedDict(TypedDict):
|
|
55
51
|
r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
|
|
56
52
|
|
|
@@ -156,29 +152,13 @@ class OutputMskKafkaSchemaRegistryTLSSettingsClientSide(BaseModel):
|
|
|
156
152
|
pydantic.Field(alias="maxVersion"),
|
|
157
153
|
] = None
|
|
158
154
|
|
|
159
|
-
@field_serializer("min_version")
|
|
160
|
-
def serialize_min_version(self, value):
|
|
161
|
-
if isinstance(value, str):
|
|
162
|
-
try:
|
|
163
|
-
return models.OutputMskKafkaSchemaRegistryMinimumTLSVersion(value)
|
|
164
|
-
except ValueError:
|
|
165
|
-
return value
|
|
166
|
-
return value
|
|
167
|
-
|
|
168
|
-
@field_serializer("max_version")
|
|
169
|
-
def serialize_max_version(self, value):
|
|
170
|
-
if isinstance(value, str):
|
|
171
|
-
try:
|
|
172
|
-
return models.OutputMskKafkaSchemaRegistryMaximumTLSVersion(value)
|
|
173
|
-
except ValueError:
|
|
174
|
-
return value
|
|
175
|
-
return value
|
|
176
|
-
|
|
177
155
|
|
|
178
156
|
class OutputMskKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
|
|
179
157
|
disabled: NotRequired[bool]
|
|
180
158
|
schema_registry_url: NotRequired[str]
|
|
181
159
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
160
|
+
schema_type: NotRequired[OutputMskSchemaType]
|
|
161
|
+
r"""The schema format used to encode and decode event data"""
|
|
182
162
|
connection_timeout: NotRequired[float]
|
|
183
163
|
r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
|
|
184
164
|
request_timeout: NotRequired[float]
|
|
@@ -202,6 +182,14 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
202
182
|
] = "http://localhost:8081"
|
|
203
183
|
r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
|
|
204
184
|
|
|
185
|
+
schema_type: Annotated[
|
|
186
|
+
Annotated[
|
|
187
|
+
Optional[OutputMskSchemaType], PlainValidator(validate_open_enum(False))
|
|
188
|
+
],
|
|
189
|
+
pydantic.Field(alias="schemaType"),
|
|
190
|
+
] = OutputMskSchemaType.AVRO
|
|
191
|
+
r"""The schema format used to encode and decode event data"""
|
|
192
|
+
|
|
205
193
|
connection_timeout: Annotated[
|
|
206
194
|
Optional[float], pydantic.Field(alias="connectionTimeout")
|
|
207
195
|
] = 30000
|
|
@@ -234,11 +222,8 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
|
|
|
234
222
|
class OutputMskAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
235
223
|
r"""AWS authentication method. Choose Auto to use IAM roles."""
|
|
236
224
|
|
|
237
|
-
# Auto
|
|
238
225
|
AUTO = "auto"
|
|
239
|
-
# Manual
|
|
240
226
|
MANUAL = "manual"
|
|
241
|
-
# Secret Key pair
|
|
242
227
|
SECRET = "secret"
|
|
243
228
|
|
|
244
229
|
|
|
@@ -331,65 +316,37 @@ class OutputMskTLSSettingsClientSide(BaseModel):
|
|
|
331
316
|
pydantic.Field(alias="maxVersion"),
|
|
332
317
|
] = None
|
|
333
318
|
|
|
334
|
-
@field_serializer("min_version")
|
|
335
|
-
def serialize_min_version(self, value):
|
|
336
|
-
if isinstance(value, str):
|
|
337
|
-
try:
|
|
338
|
-
return models.OutputMskMinimumTLSVersion(value)
|
|
339
|
-
except ValueError:
|
|
340
|
-
return value
|
|
341
|
-
return value
|
|
342
|
-
|
|
343
|
-
@field_serializer("max_version")
|
|
344
|
-
def serialize_max_version(self, value):
|
|
345
|
-
if isinstance(value, str):
|
|
346
|
-
try:
|
|
347
|
-
return models.OutputMskMaximumTLSVersion(value)
|
|
348
|
-
except ValueError:
|
|
349
|
-
return value
|
|
350
|
-
return value
|
|
351
|
-
|
|
352
319
|
|
|
353
320
|
class OutputMskBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
354
321
|
r"""How to handle events when all receivers are exerting backpressure"""
|
|
355
322
|
|
|
356
|
-
# Block
|
|
357
323
|
BLOCK = "block"
|
|
358
|
-
# Drop
|
|
359
324
|
DROP = "drop"
|
|
360
|
-
# Persistent Queue
|
|
361
325
|
QUEUE = "queue"
|
|
362
326
|
|
|
363
327
|
|
|
364
|
-
class OutputMskMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
365
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
366
|
-
|
|
367
|
-
# Error
|
|
368
|
-
ERROR = "error"
|
|
369
|
-
# Backpressure
|
|
370
|
-
ALWAYS = "always"
|
|
371
|
-
# Always On
|
|
372
|
-
BACKPRESSURE = "backpressure"
|
|
373
|
-
|
|
374
|
-
|
|
375
328
|
class OutputMskPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
376
329
|
r"""Codec to use to compress the persisted data"""
|
|
377
330
|
|
|
378
|
-
# None
|
|
379
331
|
NONE = "none"
|
|
380
|
-
# Gzip
|
|
381
332
|
GZIP = "gzip"
|
|
382
333
|
|
|
383
334
|
|
|
384
335
|
class OutputMskQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
385
336
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
386
337
|
|
|
387
|
-
# Block
|
|
388
338
|
BLOCK = "block"
|
|
389
|
-
# Drop new data
|
|
390
339
|
DROP = "drop"
|
|
391
340
|
|
|
392
341
|
|
|
342
|
+
class OutputMskMode(str, Enum, metaclass=utils.OpenEnumMeta):
|
|
343
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
344
|
+
|
|
345
|
+
ERROR = "error"
|
|
346
|
+
BACKPRESSURE = "backpressure"
|
|
347
|
+
ALWAYS = "always"
|
|
348
|
+
|
|
349
|
+
|
|
393
350
|
class OutputMskPqControlsTypedDict(TypedDict):
|
|
394
351
|
pass
|
|
395
352
|
|
|
@@ -475,18 +432,6 @@ class OutputMskTypedDict(TypedDict):
|
|
|
475
432
|
r"""Select or create a stored secret that references your access key and secret key"""
|
|
476
433
|
protobuf_library_id: NotRequired[str]
|
|
477
434
|
r"""Select a set of Protobuf definitions for the events you want to send"""
|
|
478
|
-
protobuf_encoding_id: NotRequired[str]
|
|
479
|
-
r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
|
|
480
|
-
pq_strict_ordering: NotRequired[bool]
|
|
481
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
482
|
-
pq_rate_per_sec: NotRequired[float]
|
|
483
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
484
|
-
pq_mode: NotRequired[OutputMskMode]
|
|
485
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
486
|
-
pq_max_buffer_size: NotRequired[float]
|
|
487
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
488
|
-
pq_max_backpressure_sec: NotRequired[float]
|
|
489
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
490
435
|
pq_max_file_size: NotRequired[str]
|
|
491
436
|
r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
|
|
492
437
|
pq_max_size: NotRequired[str]
|
|
@@ -497,6 +442,8 @@ class OutputMskTypedDict(TypedDict):
|
|
|
497
442
|
r"""Codec to use to compress the persisted data"""
|
|
498
443
|
pq_on_backpressure: NotRequired[OutputMskQueueFullBehavior]
|
|
499
444
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
445
|
+
pq_mode: NotRequired[OutputMskMode]
|
|
446
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
500
447
|
pq_controls: NotRequired[OutputMskPqControlsTypedDict]
|
|
501
448
|
|
|
502
449
|
|
|
@@ -680,37 +627,6 @@ class OutputMsk(BaseModel):
|
|
|
680
627
|
] = None
|
|
681
628
|
r"""Select a set of Protobuf definitions for the events you want to send"""
|
|
682
629
|
|
|
683
|
-
protobuf_encoding_id: Annotated[
|
|
684
|
-
Optional[str], pydantic.Field(alias="protobufEncodingId")
|
|
685
|
-
] = None
|
|
686
|
-
r"""Select the type of object you want the Protobuf definitions to use for event encoding"""
|
|
687
|
-
|
|
688
|
-
pq_strict_ordering: Annotated[
|
|
689
|
-
Optional[bool], pydantic.Field(alias="pqStrictOrdering")
|
|
690
|
-
] = True
|
|
691
|
-
r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
|
|
692
|
-
|
|
693
|
-
pq_rate_per_sec: Annotated[
|
|
694
|
-
Optional[float], pydantic.Field(alias="pqRatePerSec")
|
|
695
|
-
] = 0
|
|
696
|
-
r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
|
|
697
|
-
|
|
698
|
-
pq_mode: Annotated[
|
|
699
|
-
Annotated[Optional[OutputMskMode], PlainValidator(validate_open_enum(False))],
|
|
700
|
-
pydantic.Field(alias="pqMode"),
|
|
701
|
-
] = OutputMskMode.ERROR
|
|
702
|
-
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
703
|
-
|
|
704
|
-
pq_max_buffer_size: Annotated[
|
|
705
|
-
Optional[float], pydantic.Field(alias="pqMaxBufferSize")
|
|
706
|
-
] = 42
|
|
707
|
-
r"""The maximum number of events to hold in memory before writing the events to disk"""
|
|
708
|
-
|
|
709
|
-
pq_max_backpressure_sec: Annotated[
|
|
710
|
-
Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
|
|
711
|
-
] = 30
|
|
712
|
-
r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
|
|
713
|
-
|
|
714
630
|
pq_max_file_size: Annotated[
|
|
715
631
|
Optional[str], pydantic.Field(alias="pqMaxFileSize")
|
|
716
632
|
] = "1 MB"
|
|
@@ -742,87 +658,12 @@ class OutputMsk(BaseModel):
|
|
|
742
658
|
] = OutputMskQueueFullBehavior.BLOCK
|
|
743
659
|
r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
|
|
744
660
|
|
|
661
|
+
pq_mode: Annotated[
|
|
662
|
+
Annotated[Optional[OutputMskMode], PlainValidator(validate_open_enum(False))],
|
|
663
|
+
pydantic.Field(alias="pqMode"),
|
|
664
|
+
] = OutputMskMode.ERROR
|
|
665
|
+
r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
|
|
666
|
+
|
|
745
667
|
pq_controls: Annotated[
|
|
746
668
|
Optional[OutputMskPqControls], pydantic.Field(alias="pqControls")
|
|
747
669
|
] = None
|
|
748
|
-
|
|
749
|
-
@field_serializer("ack")
|
|
750
|
-
def serialize_ack(self, value):
|
|
751
|
-
if isinstance(value, str):
|
|
752
|
-
try:
|
|
753
|
-
return models.OutputMskAcknowledgments(value)
|
|
754
|
-
except ValueError:
|
|
755
|
-
return value
|
|
756
|
-
return value
|
|
757
|
-
|
|
758
|
-
@field_serializer("format_")
|
|
759
|
-
def serialize_format_(self, value):
|
|
760
|
-
if isinstance(value, str):
|
|
761
|
-
try:
|
|
762
|
-
return models.OutputMskRecordDataFormat(value)
|
|
763
|
-
except ValueError:
|
|
764
|
-
return value
|
|
765
|
-
return value
|
|
766
|
-
|
|
767
|
-
@field_serializer("compression")
|
|
768
|
-
def serialize_compression(self, value):
|
|
769
|
-
if isinstance(value, str):
|
|
770
|
-
try:
|
|
771
|
-
return models.OutputMskCompression(value)
|
|
772
|
-
except ValueError:
|
|
773
|
-
return value
|
|
774
|
-
return value
|
|
775
|
-
|
|
776
|
-
@field_serializer("aws_authentication_method")
|
|
777
|
-
def serialize_aws_authentication_method(self, value):
|
|
778
|
-
if isinstance(value, str):
|
|
779
|
-
try:
|
|
780
|
-
return models.OutputMskAuthenticationMethod(value)
|
|
781
|
-
except ValueError:
|
|
782
|
-
return value
|
|
783
|
-
return value
|
|
784
|
-
|
|
785
|
-
@field_serializer("signature_version")
|
|
786
|
-
def serialize_signature_version(self, value):
|
|
787
|
-
if isinstance(value, str):
|
|
788
|
-
try:
|
|
789
|
-
return models.OutputMskSignatureVersion(value)
|
|
790
|
-
except ValueError:
|
|
791
|
-
return value
|
|
792
|
-
return value
|
|
793
|
-
|
|
794
|
-
@field_serializer("on_backpressure")
|
|
795
|
-
def serialize_on_backpressure(self, value):
|
|
796
|
-
if isinstance(value, str):
|
|
797
|
-
try:
|
|
798
|
-
return models.OutputMskBackpressureBehavior(value)
|
|
799
|
-
except ValueError:
|
|
800
|
-
return value
|
|
801
|
-
return value
|
|
802
|
-
|
|
803
|
-
@field_serializer("pq_mode")
|
|
804
|
-
def serialize_pq_mode(self, value):
|
|
805
|
-
if isinstance(value, str):
|
|
806
|
-
try:
|
|
807
|
-
return models.OutputMskMode(value)
|
|
808
|
-
except ValueError:
|
|
809
|
-
return value
|
|
810
|
-
return value
|
|
811
|
-
|
|
812
|
-
@field_serializer("pq_compress")
|
|
813
|
-
def serialize_pq_compress(self, value):
|
|
814
|
-
if isinstance(value, str):
|
|
815
|
-
try:
|
|
816
|
-
return models.OutputMskPqCompressCompression(value)
|
|
817
|
-
except ValueError:
|
|
818
|
-
return value
|
|
819
|
-
return value
|
|
820
|
-
|
|
821
|
-
@field_serializer("pq_on_backpressure")
|
|
822
|
-
def serialize_pq_on_backpressure(self, value):
|
|
823
|
-
if isinstance(value, str):
|
|
824
|
-
try:
|
|
825
|
-
return models.OutputMskQueueFullBehavior(value)
|
|
826
|
-
except ValueError:
|
|
827
|
-
return value
|
|
828
|
-
return value
|