cribl-control-plane 0.2.0b7__py3-none-any.whl → 0.2.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (143) hide show
  1. cribl_control_plane/_version.py +5 -3
  2. cribl_control_plane/groups_sdk.py +12 -12
  3. cribl_control_plane/models/__init__.py +27 -45
  4. cribl_control_plane/models/appmode.py +14 -0
  5. cribl_control_plane/models/authtoken.py +1 -8
  6. cribl_control_plane/models/configgroup.py +3 -29
  7. cribl_control_plane/models/createversionundoop.py +3 -3
  8. cribl_control_plane/models/distributedsummary.py +0 -6
  9. cribl_control_plane/models/hbcriblinfo.py +3 -14
  10. cribl_control_plane/models/heartbeatmetadata.py +0 -3
  11. cribl_control_plane/models/input.py +63 -65
  12. cribl_control_plane/models/inputappscope.py +0 -4
  13. cribl_control_plane/models/inputazureblob.py +0 -4
  14. cribl_control_plane/models/inputcollection.py +0 -4
  15. cribl_control_plane/models/inputconfluentcloud.py +18 -8
  16. cribl_control_plane/models/inputcribl.py +0 -4
  17. cribl_control_plane/models/inputcriblhttp.py +0 -4
  18. cribl_control_plane/models/inputcribllakehttp.py +0 -4
  19. cribl_control_plane/models/inputcriblmetrics.py +0 -4
  20. cribl_control_plane/models/inputcribltcp.py +0 -4
  21. cribl_control_plane/models/inputcrowdstrike.py +0 -7
  22. cribl_control_plane/models/inputdatadogagent.py +0 -4
  23. cribl_control_plane/models/inputdatagen.py +0 -4
  24. cribl_control_plane/models/inputedgeprometheus.py +0 -12
  25. cribl_control_plane/models/inputelastic.py +0 -11
  26. cribl_control_plane/models/inputeventhub.py +0 -6
  27. cribl_control_plane/models/inputexec.py +0 -4
  28. cribl_control_plane/models/inputfile.py +0 -6
  29. cribl_control_plane/models/inputfirehose.py +0 -4
  30. cribl_control_plane/models/inputgooglepubsub.py +0 -7
  31. cribl_control_plane/models/inputgrafana.py +0 -8
  32. cribl_control_plane/models/inputhttp.py +0 -4
  33. cribl_control_plane/models/inputhttpraw.py +0 -4
  34. cribl_control_plane/models/inputjournalfiles.py +0 -4
  35. cribl_control_plane/models/inputkafka.py +17 -8
  36. cribl_control_plane/models/inputkinesis.py +0 -15
  37. cribl_control_plane/models/inputkubeevents.py +0 -4
  38. cribl_control_plane/models/inputkubelogs.py +0 -4
  39. cribl_control_plane/models/inputkubemetrics.py +0 -4
  40. cribl_control_plane/models/inputloki.py +0 -4
  41. cribl_control_plane/models/inputmetrics.py +0 -4
  42. cribl_control_plane/models/inputmodeldriventelemetry.py +0 -4
  43. cribl_control_plane/models/inputmsk.py +17 -7
  44. cribl_control_plane/models/inputnetflow.py +0 -4
  45. cribl_control_plane/models/inputoffice365mgmt.py +0 -11
  46. cribl_control_plane/models/inputoffice365msgtrace.py +0 -11
  47. cribl_control_plane/models/inputoffice365service.py +0 -11
  48. cribl_control_plane/models/inputopentelemetry.py +0 -8
  49. cribl_control_plane/models/inputprometheus.py +0 -10
  50. cribl_control_plane/models/inputprometheusrw.py +0 -4
  51. cribl_control_plane/models/inputrawudp.py +0 -4
  52. cribl_control_plane/models/inputs3.py +0 -7
  53. cribl_control_plane/models/inputs3inventory.py +0 -7
  54. cribl_control_plane/models/inputsecuritylake.py +0 -7
  55. cribl_control_plane/models/inputsnmp.py +0 -11
  56. cribl_control_plane/models/inputsplunk.py +0 -9
  57. cribl_control_plane/models/inputsplunkhec.py +0 -4
  58. cribl_control_plane/models/inputsplunksearch.py +0 -7
  59. cribl_control_plane/models/inputsqs.py +0 -9
  60. cribl_control_plane/models/inputsyslog.py +0 -8
  61. cribl_control_plane/models/inputsystemmetrics.py +0 -32
  62. cribl_control_plane/models/inputsystemstate.py +0 -4
  63. cribl_control_plane/models/inputtcp.py +0 -4
  64. cribl_control_plane/models/inputtcpjson.py +0 -4
  65. cribl_control_plane/models/inputwef.py +0 -6
  66. cribl_control_plane/models/inputwindowsmetrics.py +0 -28
  67. cribl_control_plane/models/inputwineventlogs.py +0 -8
  68. cribl_control_plane/models/inputwiz.py +0 -7
  69. cribl_control_plane/models/inputwizwebhook.py +0 -4
  70. cribl_control_plane/models/inputzscalerhec.py +0 -4
  71. cribl_control_plane/models/jobinfo.py +1 -4
  72. cribl_control_plane/models/nodeprovidedinfo.py +1 -7
  73. cribl_control_plane/models/output.py +80 -85
  74. cribl_control_plane/models/outputazureblob.py +0 -20
  75. cribl_control_plane/models/outputazuredataexplorer.py +0 -28
  76. cribl_control_plane/models/outputazureeventhub.py +0 -17
  77. cribl_control_plane/models/outputazurelogs.py +0 -13
  78. cribl_control_plane/models/outputchronicle.py +0 -13
  79. cribl_control_plane/models/outputclickhouse.py +0 -17
  80. cribl_control_plane/models/outputcloudwatch.py +0 -13
  81. cribl_control_plane/models/outputconfluentcloud.py +18 -24
  82. cribl_control_plane/models/outputcriblhttp.py +0 -15
  83. cribl_control_plane/models/outputcribllake.py +0 -21
  84. cribl_control_plane/models/outputcribltcp.py +0 -12
  85. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +0 -15
  86. cribl_control_plane/models/outputdatadog.py +0 -30
  87. cribl_control_plane/models/outputdataset.py +0 -23
  88. cribl_control_plane/models/outputdls3.py +0 -35
  89. cribl_control_plane/models/outputdynatracehttp.py +0 -22
  90. cribl_control_plane/models/outputdynatraceotlp.py +0 -22
  91. cribl_control_plane/models/outputelastic.py +0 -18
  92. cribl_control_plane/models/outputelasticcloud.py +0 -13
  93. cribl_control_plane/models/outputexabeam.py +0 -14
  94. cribl_control_plane/models/outputfilesystem.py +0 -15
  95. cribl_control_plane/models/outputgooglechronicle.py +0 -21
  96. cribl_control_plane/models/outputgooglecloudlogging.py +0 -19
  97. cribl_control_plane/models/outputgooglecloudstorage.py +0 -28
  98. cribl_control_plane/models/outputgooglepubsub.py +0 -13
  99. cribl_control_plane/models/outputgrafanacloud.py +0 -50
  100. cribl_control_plane/models/outputgraphite.py +0 -12
  101. cribl_control_plane/models/outputhoneycomb.py +0 -13
  102. cribl_control_plane/models/outputhumiohec.py +0 -15
  103. cribl_control_plane/models/outputinfluxdb.py +0 -19
  104. cribl_control_plane/models/outputkafka.py +17 -24
  105. cribl_control_plane/models/outputkinesis.py +0 -15
  106. cribl_control_plane/models/outputloki.py +0 -20
  107. cribl_control_plane/models/outputminio.py +0 -28
  108. cribl_control_plane/models/outputmsk.py +17 -23
  109. cribl_control_plane/models/outputnewrelic.py +0 -16
  110. cribl_control_plane/models/outputnewrelicevents.py +0 -16
  111. cribl_control_plane/models/outputopentelemetry.py +0 -22
  112. cribl_control_plane/models/outputprometheus.py +0 -13
  113. cribl_control_plane/models/outputring.py +0 -2
  114. cribl_control_plane/models/outputs3.py +0 -35
  115. cribl_control_plane/models/outputsecuritylake.py +0 -29
  116. cribl_control_plane/models/outputsentinel.py +0 -15
  117. cribl_control_plane/models/outputsentineloneaisiem.py +0 -13
  118. cribl_control_plane/models/outputservicenow.py +0 -21
  119. cribl_control_plane/models/outputsignalfx.py +0 -13
  120. cribl_control_plane/models/outputsns.py +0 -13
  121. cribl_control_plane/models/outputsplunk.py +0 -15
  122. cribl_control_plane/models/outputsplunkhec.py +0 -13
  123. cribl_control_plane/models/outputsplunklb.py +0 -15
  124. cribl_control_plane/models/outputsqs.py +0 -15
  125. cribl_control_plane/models/outputstatsd.py +0 -12
  126. cribl_control_plane/models/outputstatsdext.py +0 -12
  127. cribl_control_plane/models/outputsumologic.py +0 -15
  128. cribl_control_plane/models/outputsyslog.py +0 -24
  129. cribl_control_plane/models/outputtcpjson.py +0 -12
  130. cribl_control_plane/models/outputwavefront.py +0 -13
  131. cribl_control_plane/models/outputwebhook.py +0 -23
  132. cribl_control_plane/models/outputxsiam.py +0 -13
  133. cribl_control_plane/models/packinfo.py +5 -8
  134. cribl_control_plane/models/packinstallinfo.py +5 -8
  135. cribl_control_plane/models/{uploadpackresponse.py → routecloneconf.py} +4 -4
  136. cribl_control_plane/models/routeconf.py +4 -3
  137. cribl_control_plane/models/runnablejobcollection.py +0 -4
  138. cribl_control_plane/packs.py +7 -202
  139. {cribl_control_plane-0.2.0b7.dist-info → cribl_control_plane-0.2.0rc1.dist-info}/METADATA +16 -39
  140. {cribl_control_plane-0.2.0b7.dist-info → cribl_control_plane-0.2.0rc1.dist-info}/RECORD +141 -142
  141. cribl_control_plane/models/outputdatabricks.py +0 -291
  142. cribl_control_plane/models/updatepacksop.py +0 -27
  143. {cribl_control_plane-0.2.0b7.dist-info → cribl_control_plane-0.2.0rc1.dist-info}/WHEEL +0 -0
@@ -18,17 +18,11 @@ class OutputInfluxdbType(str, Enum):
18
18
  class TimestampPrecision(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Sets the precision for the supplied Unix time values. Defaults to milliseconds."""
20
20
 
21
- # Nanoseconds
22
21
  NS = "ns"
23
- # Microseconds
24
22
  U = "u"
25
- # Milliseconds
26
23
  MS = "ms"
27
- # Seconds
28
24
  S = "s"
29
- # Minutes
30
25
  M = "m"
31
- # Hours
32
26
  H = "h"
33
27
 
34
28
 
@@ -46,11 +40,8 @@ class OutputInfluxdbExtraHTTPHeader(BaseModel):
46
40
  class OutputInfluxdbFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
47
41
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
48
42
 
49
- # Payload
50
43
  PAYLOAD = "payload"
51
- # Payload + Headers
52
44
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
53
- # None
54
45
  NONE = "none"
55
46
 
56
47
 
@@ -111,11 +102,8 @@ class OutputInfluxdbTimeoutRetrySettings(BaseModel):
111
102
  class OutputInfluxdbBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
112
103
  r"""How to handle events when all receivers are exerting backpressure"""
113
104
 
114
- # Block
115
105
  BLOCK = "block"
116
- # Drop
117
106
  DROP = "drop"
118
- # Persistent Queue
119
107
  QUEUE = "queue"
120
108
 
121
109
 
@@ -133,29 +121,22 @@ class OutputInfluxdbAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
133
121
  class OutputInfluxdbCompression(str, Enum, metaclass=utils.OpenEnumMeta):
134
122
  r"""Codec to use to compress the persisted data"""
135
123
 
136
- # None
137
124
  NONE = "none"
138
- # Gzip
139
125
  GZIP = "gzip"
140
126
 
141
127
 
142
128
  class OutputInfluxdbQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
143
129
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
144
130
 
145
- # Block
146
131
  BLOCK = "block"
147
- # Drop new data
148
132
  DROP = "drop"
149
133
 
150
134
 
151
135
  class OutputInfluxdbMode(str, Enum, metaclass=utils.OpenEnumMeta):
152
136
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
153
137
 
154
- # Error
155
138
  ERROR = "error"
156
- # Backpressure
157
139
  BACKPRESSURE = "backpressure"
158
- # Always On
159
140
  ALWAYS = "always"
160
141
 
161
142
 
@@ -18,38 +18,35 @@ class OutputKafkaType(str, Enum):
18
18
  class OutputKafkaAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Control the number of required acknowledgments."""
20
20
 
21
- # Leader
22
21
  ONE = 1
23
- # None
24
22
  ZERO = 0
25
- # All
26
23
  MINUS_1 = -1
27
24
 
28
25
 
29
26
  class OutputKafkaRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
30
27
  r"""Format to use to serialize events before writing to Kafka."""
31
28
 
32
- # JSON
33
29
  JSON = "json"
34
- # Field _raw
35
30
  RAW = "raw"
36
- # Protobuf
37
31
  PROTOBUF = "protobuf"
38
32
 
39
33
 
40
34
  class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
41
35
  r"""Codec to use to compress the data before sending to Kafka"""
42
36
 
43
- # None
44
37
  NONE = "none"
45
- # Gzip
46
38
  GZIP = "gzip"
47
- # Snappy
48
39
  SNAPPY = "snappy"
49
- # LZ4
50
40
  LZ4 = "lz4"
51
41
 
52
42
 
43
+ class OutputKafkaSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
44
+ r"""The schema format used to encode and decode event data"""
45
+
46
+ AVRO = "avro"
47
+ JSON = "json"
48
+
49
+
53
50
  class OutputKafkaAuthTypedDict(TypedDict):
54
51
  r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
55
52
 
@@ -160,6 +157,8 @@ class OutputKafkaKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
160
157
  disabled: NotRequired[bool]
161
158
  schema_registry_url: NotRequired[str]
162
159
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
160
+ schema_type: NotRequired[OutputKafkaSchemaType]
161
+ r"""The schema format used to encode and decode event data"""
163
162
  connection_timeout: NotRequired[float]
164
163
  r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
165
164
  request_timeout: NotRequired[float]
@@ -183,6 +182,14 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
183
182
  ] = "http://localhost:8081"
184
183
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
185
184
 
185
+ schema_type: Annotated[
186
+ Annotated[
187
+ Optional[OutputKafkaSchemaType], PlainValidator(validate_open_enum(False))
188
+ ],
189
+ pydantic.Field(alias="schemaType"),
190
+ ] = OutputKafkaSchemaType.AVRO
191
+ r"""The schema format used to encode and decode event data"""
192
+
186
193
  connection_timeout: Annotated[
187
194
  Optional[float], pydantic.Field(alias="connectionTimeout")
188
195
  ] = 30000
@@ -213,13 +220,9 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
213
220
 
214
221
 
215
222
  class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
216
- # PLAIN
217
223
  PLAIN = "plain"
218
- # SCRAM-SHA-256
219
224
  SCRAM_SHA_256 = "scram-sha-256"
220
- # SCRAM-SHA-512
221
225
  SCRAM_SHA_512 = "scram-sha-512"
222
- # GSSAPI/Kerberos
223
226
  KERBEROS = "kerberos"
224
227
 
225
228
 
@@ -333,40 +336,30 @@ class OutputKafkaTLSSettingsClientSide(BaseModel):
333
336
  class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
334
337
  r"""How to handle events when all receivers are exerting backpressure"""
335
338
 
336
- # Block
337
339
  BLOCK = "block"
338
- # Drop
339
340
  DROP = "drop"
340
- # Persistent Queue
341
341
  QUEUE = "queue"
342
342
 
343
343
 
344
344
  class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
345
345
  r"""Codec to use to compress the persisted data"""
346
346
 
347
- # None
348
347
  NONE = "none"
349
- # Gzip
350
348
  GZIP = "gzip"
351
349
 
352
350
 
353
351
  class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
354
352
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
355
353
 
356
- # Block
357
354
  BLOCK = "block"
358
- # Drop new data
359
355
  DROP = "drop"
360
356
 
361
357
 
362
358
  class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
363
359
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
364
360
 
365
- # Error
366
361
  ERROR = "error"
367
- # Backpressure
368
362
  BACKPRESSURE = "backpressure"
369
- # Always On
370
363
  ALWAYS = "always"
371
364
 
372
365
 
@@ -18,11 +18,8 @@ class OutputKinesisType(str, Enum):
18
18
  class OutputKinesisAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
20
20
 
21
- # Auto
22
21
  AUTO = "auto"
23
- # Manual
24
22
  MANUAL = "manual"
25
- # Secret Key pair
26
23
  SECRET = "secret"
27
24
 
28
25
 
@@ -36,49 +33,37 @@ class OutputKinesisSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
36
33
  class OutputKinesisCompression(str, Enum, metaclass=utils.OpenEnumMeta):
37
34
  r"""Compression type to use for records"""
38
35
 
39
- # None
40
36
  NONE = "none"
41
- # Gzip
42
37
  GZIP = "gzip"
43
38
 
44
39
 
45
40
  class OutputKinesisBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
46
41
  r"""How to handle events when all receivers are exerting backpressure"""
47
42
 
48
- # Block
49
43
  BLOCK = "block"
50
- # Drop
51
44
  DROP = "drop"
52
- # Persistent Queue
53
45
  QUEUE = "queue"
54
46
 
55
47
 
56
48
  class OutputKinesisPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
57
49
  r"""Codec to use to compress the persisted data"""
58
50
 
59
- # None
60
51
  NONE = "none"
61
- # Gzip
62
52
  GZIP = "gzip"
63
53
 
64
54
 
65
55
  class OutputKinesisQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
66
56
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
67
57
 
68
- # Block
69
58
  BLOCK = "block"
70
- # Drop new data
71
59
  DROP = "drop"
72
60
 
73
61
 
74
62
  class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
75
63
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
76
64
 
77
- # Error
78
65
  ERROR = "error"
79
- # Backpressure
80
66
  BACKPRESSURE = "backpressure"
81
- # Always On
82
67
  ALWAYS = "always"
83
68
 
84
69
 
@@ -18,9 +18,7 @@ class OutputLokiType(str, Enum):
18
18
  class OutputLokiMessageFormat(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Format to use when sending logs to Loki (Protobuf or JSON)"""
20
20
 
21
- # Protobuf
22
21
  PROTOBUF = "protobuf"
23
- # JSON
24
22
  JSON = "json"
25
23
 
26
24
 
@@ -36,15 +34,10 @@ class OutputLokiLabel(BaseModel):
36
34
 
37
35
 
38
36
  class OutputLokiAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
39
- # None
40
37
  NONE = "none"
41
- # Auth token
42
38
  TOKEN = "token"
43
- # Auth token (text secret)
44
39
  TEXT_SECRET = "textSecret"
45
- # Basic
46
40
  BASIC = "basic"
47
- # Basic (credentials secret)
48
41
  CREDENTIALS_SECRET = "credentialsSecret"
49
42
 
50
43
 
@@ -62,11 +55,8 @@ class OutputLokiExtraHTTPHeader(BaseModel):
62
55
  class OutputLokiFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
63
56
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
64
57
 
65
- # Payload
66
58
  PAYLOAD = "payload"
67
- # Payload + Headers
68
59
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
69
- # None
70
60
  NONE = "none"
71
61
 
72
62
 
@@ -127,40 +117,30 @@ class OutputLokiTimeoutRetrySettings(BaseModel):
127
117
  class OutputLokiBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
128
118
  r"""How to handle events when all receivers are exerting backpressure"""
129
119
 
130
- # Block
131
120
  BLOCK = "block"
132
- # Drop
133
121
  DROP = "drop"
134
- # Persistent Queue
135
122
  QUEUE = "queue"
136
123
 
137
124
 
138
125
  class OutputLokiCompression(str, Enum, metaclass=utils.OpenEnumMeta):
139
126
  r"""Codec to use to compress the persisted data"""
140
127
 
141
- # None
142
128
  NONE = "none"
143
- # Gzip
144
129
  GZIP = "gzip"
145
130
 
146
131
 
147
132
  class OutputLokiQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
148
133
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
149
134
 
150
- # Block
151
135
  BLOCK = "block"
152
- # Drop new data
153
136
  DROP = "drop"
154
137
 
155
138
 
156
139
  class OutputLokiMode(str, Enum, metaclass=utils.OpenEnumMeta):
157
140
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
158
141
 
159
- # Error
160
142
  ERROR = "error"
161
- # Backpressure
162
143
  BACKPRESSURE = "backpressure"
163
- # Always On
164
144
  ALWAYS = "always"
165
145
 
166
146
 
@@ -18,11 +18,8 @@ class OutputMinioType(str, Enum):
18
18
  class OutputMinioAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
20
20
 
21
- # Auto
22
21
  AUTO = "auto"
23
- # Manual
24
22
  MANUAL = "manual"
25
- # Secret Key pair
26
23
  SECRET = "secret"
27
24
 
28
25
 
@@ -36,64 +33,47 @@ class OutputMinioSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
36
33
  class OutputMinioObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
37
34
  r"""Object ACL to assign to uploaded objects"""
38
35
 
39
- # Private
40
36
  PRIVATE = "private"
41
- # Public Read Only
42
37
  PUBLIC_READ = "public-read"
43
- # Public Read/Write
44
38
  PUBLIC_READ_WRITE = "public-read-write"
45
- # Authenticated Read Only
46
39
  AUTHENTICATED_READ = "authenticated-read"
47
- # AWS EC2 AMI Read Only
48
40
  AWS_EXEC_READ = "aws-exec-read"
49
- # Bucket Owner Read Only
50
41
  BUCKET_OWNER_READ = "bucket-owner-read"
51
- # Bucket Owner Full Control
52
42
  BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
53
43
 
54
44
 
55
45
  class OutputMinioStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
56
46
  r"""Storage class to select for uploaded objects"""
57
47
 
58
- # Standard
59
48
  STANDARD = "STANDARD"
60
- # Reduced Redundancy Storage
61
49
  REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY"
62
50
 
63
51
 
64
52
  class ServerSideEncryption(str, Enum, metaclass=utils.OpenEnumMeta):
65
53
  r"""Server-side encryption for uploaded objects"""
66
54
 
67
- # Amazon S3 Managed Key
68
55
  AES256 = "AES256"
69
56
 
70
57
 
71
58
  class OutputMinioDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
72
59
  r"""Format of the output data"""
73
60
 
74
- # JSON
75
61
  JSON = "json"
76
- # Raw
77
62
  RAW = "raw"
78
- # Parquet
79
63
  PARQUET = "parquet"
80
64
 
81
65
 
82
66
  class OutputMinioBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
83
67
  r"""How to handle events when all receivers are exerting backpressure"""
84
68
 
85
- # Block
86
69
  BLOCK = "block"
87
- # Drop
88
70
  DROP = "drop"
89
71
 
90
72
 
91
73
  class OutputMinioDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
92
74
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
93
75
 
94
- # Block
95
76
  BLOCK = "block"
96
- # Drop
97
77
  DROP = "drop"
98
78
 
99
79
 
@@ -107,31 +87,23 @@ class OutputMinioCompression(str, Enum, metaclass=utils.OpenEnumMeta):
107
87
  class OutputMinioCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
108
88
  r"""Compression level to apply before moving files to final destination"""
109
89
 
110
- # Best Speed
111
90
  BEST_SPEED = "best_speed"
112
- # Normal
113
91
  NORMAL = "normal"
114
- # Best Compression
115
92
  BEST_COMPRESSION = "best_compression"
116
93
 
117
94
 
118
95
  class OutputMinioParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
119
96
  r"""Determines which data types are supported and how they are represented"""
120
97
 
121
- # 1.0
122
98
  PARQUET_1_0 = "PARQUET_1_0"
123
- # 2.4
124
99
  PARQUET_2_4 = "PARQUET_2_4"
125
- # 2.6
126
100
  PARQUET_2_6 = "PARQUET_2_6"
127
101
 
128
102
 
129
103
  class OutputMinioDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
130
104
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
131
105
 
132
- # V1
133
106
  DATA_PAGE_V1 = "DATA_PAGE_V1"
134
- # V2
135
107
  DATA_PAGE_V2 = "DATA_PAGE_V2"
136
108
 
137
109
 
@@ -18,38 +18,35 @@ class OutputMskType(str, Enum):
18
18
  class OutputMskAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Control the number of required acknowledgments."""
20
20
 
21
- # Leader
22
21
  ONE = 1
23
- # None
24
22
  ZERO = 0
25
- # All
26
23
  MINUS_1 = -1
27
24
 
28
25
 
29
26
  class OutputMskRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
30
27
  r"""Format to use to serialize events before writing to Kafka."""
31
28
 
32
- # JSON
33
29
  JSON = "json"
34
- # Field _raw
35
30
  RAW = "raw"
36
- # Protobuf
37
31
  PROTOBUF = "protobuf"
38
32
 
39
33
 
40
34
  class OutputMskCompression(str, Enum, metaclass=utils.OpenEnumMeta):
41
35
  r"""Codec to use to compress the data before sending to Kafka"""
42
36
 
43
- # None
44
37
  NONE = "none"
45
- # Gzip
46
38
  GZIP = "gzip"
47
- # Snappy
48
39
  SNAPPY = "snappy"
49
- # LZ4
50
40
  LZ4 = "lz4"
51
41
 
52
42
 
43
+ class OutputMskSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
44
+ r"""The schema format used to encode and decode event data"""
45
+
46
+ AVRO = "avro"
47
+ JSON = "json"
48
+
49
+
53
50
  class OutputMskAuthTypedDict(TypedDict):
54
51
  r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
55
52
 
@@ -160,6 +157,8 @@ class OutputMskKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
160
157
  disabled: NotRequired[bool]
161
158
  schema_registry_url: NotRequired[str]
162
159
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
160
+ schema_type: NotRequired[OutputMskSchemaType]
161
+ r"""The schema format used to encode and decode event data"""
163
162
  connection_timeout: NotRequired[float]
164
163
  r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
165
164
  request_timeout: NotRequired[float]
@@ -183,6 +182,14 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
183
182
  ] = "http://localhost:8081"
184
183
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
185
184
 
185
+ schema_type: Annotated[
186
+ Annotated[
187
+ Optional[OutputMskSchemaType], PlainValidator(validate_open_enum(False))
188
+ ],
189
+ pydantic.Field(alias="schemaType"),
190
+ ] = OutputMskSchemaType.AVRO
191
+ r"""The schema format used to encode and decode event data"""
192
+
186
193
  connection_timeout: Annotated[
187
194
  Optional[float], pydantic.Field(alias="connectionTimeout")
188
195
  ] = 30000
@@ -215,11 +222,8 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
215
222
  class OutputMskAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
216
223
  r"""AWS authentication method. Choose Auto to use IAM roles."""
217
224
 
218
- # Auto
219
225
  AUTO = "auto"
220
- # Manual
221
226
  MANUAL = "manual"
222
- # Secret Key pair
223
227
  SECRET = "secret"
224
228
 
225
229
 
@@ -316,40 +320,30 @@ class OutputMskTLSSettingsClientSide(BaseModel):
316
320
  class OutputMskBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
317
321
  r"""How to handle events when all receivers are exerting backpressure"""
318
322
 
319
- # Block
320
323
  BLOCK = "block"
321
- # Drop
322
324
  DROP = "drop"
323
- # Persistent Queue
324
325
  QUEUE = "queue"
325
326
 
326
327
 
327
328
  class OutputMskPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
328
329
  r"""Codec to use to compress the persisted data"""
329
330
 
330
- # None
331
331
  NONE = "none"
332
- # Gzip
333
332
  GZIP = "gzip"
334
333
 
335
334
 
336
335
  class OutputMskQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
337
336
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
338
337
 
339
- # Block
340
338
  BLOCK = "block"
341
- # Drop new data
342
339
  DROP = "drop"
343
340
 
344
341
 
345
342
  class OutputMskMode(str, Enum, metaclass=utils.OpenEnumMeta):
346
343
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
347
344
 
348
- # Error
349
345
  ERROR = "error"
350
- # Backpressure
351
346
  BACKPRESSURE = "backpressure"
352
- # Always On
353
347
  ALWAYS = "always"
354
348
 
355
349
 
@@ -18,11 +18,8 @@ class OutputNewrelicType(str, Enum):
18
18
  class OutputNewrelicRegion(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Which New Relic region endpoint to use."""
20
20
 
21
- # US
22
21
  US = "US"
23
- # Europe
24
22
  EU = "EU"
25
- # Custom
26
23
  CUSTOM = "Custom"
27
24
 
28
25
 
@@ -60,11 +57,8 @@ class OutputNewrelicExtraHTTPHeader(BaseModel):
60
57
  class OutputNewrelicFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
61
58
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
62
59
 
63
- # Payload
64
60
  PAYLOAD = "payload"
65
- # Payload + Headers
66
61
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
67
- # None
68
62
  NONE = "none"
69
63
 
70
64
 
@@ -125,11 +119,8 @@ class OutputNewrelicTimeoutRetrySettings(BaseModel):
125
119
  class OutputNewrelicBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
126
120
  r"""How to handle events when all receivers are exerting backpressure"""
127
121
 
128
- # Block
129
122
  BLOCK = "block"
130
- # Drop
131
123
  DROP = "drop"
132
- # Persistent Queue
133
124
  QUEUE = "queue"
134
125
 
135
126
 
@@ -143,29 +134,22 @@ class OutputNewrelicAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta
143
134
  class OutputNewrelicCompression(str, Enum, metaclass=utils.OpenEnumMeta):
144
135
  r"""Codec to use to compress the persisted data"""
145
136
 
146
- # None
147
137
  NONE = "none"
148
- # Gzip
149
138
  GZIP = "gzip"
150
139
 
151
140
 
152
141
  class OutputNewrelicQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
153
142
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
154
143
 
155
- # Block
156
144
  BLOCK = "block"
157
- # Drop new data
158
145
  DROP = "drop"
159
146
 
160
147
 
161
148
  class OutputNewrelicMode(str, Enum, metaclass=utils.OpenEnumMeta):
162
149
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
163
150
 
164
- # Error
165
151
  ERROR = "error"
166
- # Backpressure
167
152
  BACKPRESSURE = "backpressure"
168
- # Always On
169
153
  ALWAYS = "always"
170
154
 
171
155