cribl-control-plane 0.2.1__py3-none-any.whl → 0.2.1rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (157) hide show
  1. cribl_control_plane/_version.py +3 -5
  2. cribl_control_plane/errors/__init__.py +8 -5
  3. cribl_control_plane/errors/{healthstatus_error.py → healthserverstatus_error.py} +10 -9
  4. cribl_control_plane/groups_sdk.py +52 -28
  5. cribl_control_plane/health.py +22 -16
  6. cribl_control_plane/models/__init__.py +103 -50
  7. cribl_control_plane/models/authtoken.py +5 -1
  8. cribl_control_plane/models/configgroup.py +35 -6
  9. cribl_control_plane/models/createconfiggroupbyproductop.py +6 -5
  10. cribl_control_plane/models/createroutesappendbyidop.py +2 -2
  11. cribl_control_plane/models/createversionundoop.py +3 -3
  12. cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
  13. cribl_control_plane/models/distributedsummary.py +6 -0
  14. cribl_control_plane/models/groupcreaterequest.py +152 -0
  15. cribl_control_plane/models/hbcriblinfo.py +14 -3
  16. cribl_control_plane/models/{healthstatus.py → healthserverstatus.py} +7 -7
  17. cribl_control_plane/models/heartbeatmetadata.py +3 -0
  18. cribl_control_plane/models/input.py +65 -63
  19. cribl_control_plane/models/inputappscope.py +4 -0
  20. cribl_control_plane/models/inputazureblob.py +4 -0
  21. cribl_control_plane/models/inputcollection.py +4 -0
  22. cribl_control_plane/models/inputconfluentcloud.py +8 -18
  23. cribl_control_plane/models/inputcribl.py +4 -0
  24. cribl_control_plane/models/inputcriblhttp.py +4 -0
  25. cribl_control_plane/models/inputcribllakehttp.py +4 -0
  26. cribl_control_plane/models/inputcriblmetrics.py +4 -0
  27. cribl_control_plane/models/inputcribltcp.py +4 -0
  28. cribl_control_plane/models/inputcrowdstrike.py +7 -0
  29. cribl_control_plane/models/inputdatadogagent.py +4 -0
  30. cribl_control_plane/models/inputdatagen.py +4 -0
  31. cribl_control_plane/models/inputedgeprometheus.py +12 -0
  32. cribl_control_plane/models/inputelastic.py +11 -0
  33. cribl_control_plane/models/inputeventhub.py +6 -0
  34. cribl_control_plane/models/inputexec.py +4 -0
  35. cribl_control_plane/models/inputfile.py +6 -0
  36. cribl_control_plane/models/inputfirehose.py +4 -0
  37. cribl_control_plane/models/inputgooglepubsub.py +7 -0
  38. cribl_control_plane/models/inputgrafana.py +8 -0
  39. cribl_control_plane/models/inputhttp.py +4 -0
  40. cribl_control_plane/models/inputhttpraw.py +4 -0
  41. cribl_control_plane/models/inputjournalfiles.py +4 -0
  42. cribl_control_plane/models/inputkafka.py +8 -17
  43. cribl_control_plane/models/inputkinesis.py +15 -0
  44. cribl_control_plane/models/inputkubeevents.py +4 -0
  45. cribl_control_plane/models/inputkubelogs.py +4 -0
  46. cribl_control_plane/models/inputkubemetrics.py +4 -0
  47. cribl_control_plane/models/inputloki.py +4 -0
  48. cribl_control_plane/models/inputmetrics.py +4 -0
  49. cribl_control_plane/models/inputmodeldriventelemetry.py +4 -0
  50. cribl_control_plane/models/inputmsk.py +7 -17
  51. cribl_control_plane/models/inputnetflow.py +4 -0
  52. cribl_control_plane/models/inputoffice365mgmt.py +11 -0
  53. cribl_control_plane/models/inputoffice365msgtrace.py +11 -0
  54. cribl_control_plane/models/inputoffice365service.py +11 -0
  55. cribl_control_plane/models/inputopentelemetry.py +8 -0
  56. cribl_control_plane/models/inputprometheus.py +10 -0
  57. cribl_control_plane/models/inputprometheusrw.py +4 -0
  58. cribl_control_plane/models/inputrawudp.py +4 -0
  59. cribl_control_plane/models/inputs3.py +7 -0
  60. cribl_control_plane/models/inputs3inventory.py +7 -0
  61. cribl_control_plane/models/inputsecuritylake.py +7 -0
  62. cribl_control_plane/models/inputsnmp.py +11 -0
  63. cribl_control_plane/models/inputsplunk.py +9 -0
  64. cribl_control_plane/models/inputsplunkhec.py +4 -0
  65. cribl_control_plane/models/inputsplunksearch.py +7 -0
  66. cribl_control_plane/models/inputsqs.py +9 -0
  67. cribl_control_plane/models/inputsyslog.py +8 -0
  68. cribl_control_plane/models/inputsystemmetrics.py +32 -0
  69. cribl_control_plane/models/inputsystemstate.py +4 -0
  70. cribl_control_plane/models/inputtcp.py +4 -0
  71. cribl_control_plane/models/inputtcpjson.py +4 -0
  72. cribl_control_plane/models/inputwef.py +6 -0
  73. cribl_control_plane/models/inputwindowsmetrics.py +28 -0
  74. cribl_control_plane/models/inputwineventlogs.py +8 -0
  75. cribl_control_plane/models/inputwiz.py +7 -0
  76. cribl_control_plane/models/inputwizwebhook.py +4 -0
  77. cribl_control_plane/models/inputzscalerhec.py +4 -0
  78. cribl_control_plane/models/jobinfo.py +4 -1
  79. cribl_control_plane/models/logininfo.py +3 -3
  80. cribl_control_plane/models/nodeprovidedinfo.py +11 -1
  81. cribl_control_plane/models/outpostnodeinfo.py +16 -0
  82. cribl_control_plane/models/output.py +77 -72
  83. cribl_control_plane/models/outputazureblob.py +20 -0
  84. cribl_control_plane/models/outputazuredataexplorer.py +28 -0
  85. cribl_control_plane/models/outputazureeventhub.py +17 -0
  86. cribl_control_plane/models/outputazurelogs.py +13 -0
  87. cribl_control_plane/models/outputchronicle.py +13 -0
  88. cribl_control_plane/models/outputclickhouse.py +17 -0
  89. cribl_control_plane/models/outputcloudwatch.py +13 -0
  90. cribl_control_plane/models/outputconfluentcloud.py +24 -18
  91. cribl_control_plane/models/outputcriblhttp.py +15 -0
  92. cribl_control_plane/models/outputcribllake.py +21 -0
  93. cribl_control_plane/models/outputcribltcp.py +12 -0
  94. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +15 -0
  95. cribl_control_plane/models/outputdatabricks.py +411 -0
  96. cribl_control_plane/models/outputdatadog.py +30 -0
  97. cribl_control_plane/models/outputdataset.py +23 -0
  98. cribl_control_plane/models/outputdls3.py +35 -0
  99. cribl_control_plane/models/outputdynatracehttp.py +22 -0
  100. cribl_control_plane/models/outputdynatraceotlp.py +22 -0
  101. cribl_control_plane/models/outputelastic.py +18 -0
  102. cribl_control_plane/models/outputelasticcloud.py +13 -0
  103. cribl_control_plane/models/outputexabeam.py +14 -0
  104. cribl_control_plane/models/outputfilesystem.py +15 -0
  105. cribl_control_plane/models/outputgooglechronicle.py +21 -0
  106. cribl_control_plane/models/outputgooglecloudlogging.py +19 -0
  107. cribl_control_plane/models/outputgooglecloudstorage.py +28 -0
  108. cribl_control_plane/models/outputgooglepubsub.py +13 -0
  109. cribl_control_plane/models/outputgrafanacloud.py +50 -0
  110. cribl_control_plane/models/outputgraphite.py +12 -0
  111. cribl_control_plane/models/outputhoneycomb.py +13 -0
  112. cribl_control_plane/models/outputhumiohec.py +15 -0
  113. cribl_control_plane/models/outputinfluxdb.py +19 -0
  114. cribl_control_plane/models/outputkafka.py +24 -17
  115. cribl_control_plane/models/outputkinesis.py +15 -0
  116. cribl_control_plane/models/outputloki.py +20 -0
  117. cribl_control_plane/models/outputminio.py +28 -0
  118. cribl_control_plane/models/outputmsk.py +23 -17
  119. cribl_control_plane/models/outputnewrelic.py +16 -0
  120. cribl_control_plane/models/outputnewrelicevents.py +16 -0
  121. cribl_control_plane/models/outputopentelemetry.py +22 -0
  122. cribl_control_plane/models/outputprometheus.py +13 -0
  123. cribl_control_plane/models/outputring.py +2 -0
  124. cribl_control_plane/models/outputs3.py +35 -0
  125. cribl_control_plane/models/outputsecuritylake.py +29 -0
  126. cribl_control_plane/models/outputsentinel.py +15 -0
  127. cribl_control_plane/models/outputsentineloneaisiem.py +13 -0
  128. cribl_control_plane/models/outputservicenow.py +21 -0
  129. cribl_control_plane/models/outputsignalfx.py +13 -0
  130. cribl_control_plane/models/outputsns.py +13 -0
  131. cribl_control_plane/models/outputsplunk.py +15 -0
  132. cribl_control_plane/models/outputsplunkhec.py +13 -0
  133. cribl_control_plane/models/outputsplunklb.py +15 -0
  134. cribl_control_plane/models/outputsqs.py +15 -0
  135. cribl_control_plane/models/outputstatsd.py +12 -0
  136. cribl_control_plane/models/outputstatsdext.py +12 -0
  137. cribl_control_plane/models/outputsumologic.py +15 -0
  138. cribl_control_plane/models/outputsyslog.py +24 -0
  139. cribl_control_plane/models/outputtcpjson.py +12 -0
  140. cribl_control_plane/models/outputwavefront.py +13 -0
  141. cribl_control_plane/models/outputwebhook.py +23 -0
  142. cribl_control_plane/models/outputxsiam.py +13 -0
  143. cribl_control_plane/models/packinfo.py +8 -5
  144. cribl_control_plane/models/packinstallinfo.py +8 -5
  145. cribl_control_plane/models/routeconf.py +3 -4
  146. cribl_control_plane/models/runnablejobcollection.py +4 -0
  147. cribl_control_plane/models/updatepacksop.py +25 -0
  148. cribl_control_plane/models/{routecloneconf.py → uploadpackresponse.py} +4 -4
  149. cribl_control_plane/packs.py +202 -7
  150. cribl_control_plane/routes_sdk.py +6 -6
  151. cribl_control_plane/tokens.py +23 -15
  152. {cribl_control_plane-0.2.1.dist-info → cribl_control_plane-0.2.1rc2.dist-info}/METADATA +37 -5
  153. {cribl_control_plane-0.2.1.dist-info → cribl_control_plane-0.2.1rc2.dist-info}/RECORD +154 -153
  154. cribl_control_plane/models/appmode.py +0 -14
  155. cribl_control_plane/models/error.py +0 -16
  156. cribl_control_plane/models/gethealthinfoop.py +0 -17
  157. {cribl_control_plane-0.2.1.dist-info → cribl_control_plane-0.2.1rc2.dist-info}/WHEEL +0 -0
@@ -18,35 +18,38 @@ class OutputKafkaType(str, Enum):
18
18
  class OutputKafkaAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Control the number of required acknowledgments."""
20
20
 
21
+ # Leader
21
22
  ONE = 1
23
+ # None
22
24
  ZERO = 0
25
+ # All
23
26
  MINUS_1 = -1
24
27
 
25
28
 
26
29
  class OutputKafkaRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
27
30
  r"""Format to use to serialize events before writing to Kafka."""
28
31
 
32
+ # JSON
29
33
  JSON = "json"
34
+ # Field _raw
30
35
  RAW = "raw"
36
+ # Protobuf
31
37
  PROTOBUF = "protobuf"
32
38
 
33
39
 
34
40
  class OutputKafkaCompression(str, Enum, metaclass=utils.OpenEnumMeta):
35
41
  r"""Codec to use to compress the data before sending to Kafka"""
36
42
 
43
+ # None
37
44
  NONE = "none"
45
+ # Gzip
38
46
  GZIP = "gzip"
47
+ # Snappy
39
48
  SNAPPY = "snappy"
49
+ # LZ4
40
50
  LZ4 = "lz4"
41
51
 
42
52
 
43
- class OutputKafkaSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
44
- r"""The schema format used to encode and decode event data"""
45
-
46
- AVRO = "avro"
47
- JSON = "json"
48
-
49
-
50
53
  class OutputKafkaAuthTypedDict(TypedDict):
51
54
  r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
52
55
 
@@ -157,8 +160,6 @@ class OutputKafkaKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
157
160
  disabled: NotRequired[bool]
158
161
  schema_registry_url: NotRequired[str]
159
162
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
160
- schema_type: NotRequired[OutputKafkaSchemaType]
161
- r"""The schema format used to encode and decode event data"""
162
163
  connection_timeout: NotRequired[float]
163
164
  r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
164
165
  request_timeout: NotRequired[float]
@@ -182,14 +183,6 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
182
183
  ] = "http://localhost:8081"
183
184
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
184
185
 
185
- schema_type: Annotated[
186
- Annotated[
187
- Optional[OutputKafkaSchemaType], PlainValidator(validate_open_enum(False))
188
- ],
189
- pydantic.Field(alias="schemaType"),
190
- ] = OutputKafkaSchemaType.AVRO
191
- r"""The schema format used to encode and decode event data"""
192
-
193
186
  connection_timeout: Annotated[
194
187
  Optional[float], pydantic.Field(alias="connectionTimeout")
195
188
  ] = 30000
@@ -220,9 +213,13 @@ class OutputKafkaKafkaSchemaRegistryAuthentication(BaseModel):
220
213
 
221
214
 
222
215
  class OutputKafkaSASLMechanism(str, Enum, metaclass=utils.OpenEnumMeta):
216
+ # PLAIN
223
217
  PLAIN = "plain"
218
+ # SCRAM-SHA-256
224
219
  SCRAM_SHA_256 = "scram-sha-256"
220
+ # SCRAM-SHA-512
225
221
  SCRAM_SHA_512 = "scram-sha-512"
222
+ # GSSAPI/Kerberos
226
223
  KERBEROS = "kerberos"
227
224
 
228
225
 
@@ -336,30 +333,40 @@ class OutputKafkaTLSSettingsClientSide(BaseModel):
336
333
  class OutputKafkaBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
337
334
  r"""How to handle events when all receivers are exerting backpressure"""
338
335
 
336
+ # Block
339
337
  BLOCK = "block"
338
+ # Drop
340
339
  DROP = "drop"
340
+ # Persistent Queue
341
341
  QUEUE = "queue"
342
342
 
343
343
 
344
344
  class OutputKafkaPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
345
345
  r"""Codec to use to compress the persisted data"""
346
346
 
347
+ # None
347
348
  NONE = "none"
349
+ # Gzip
348
350
  GZIP = "gzip"
349
351
 
350
352
 
351
353
  class OutputKafkaQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
352
354
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
353
355
 
356
+ # Block
354
357
  BLOCK = "block"
358
+ # Drop new data
355
359
  DROP = "drop"
356
360
 
357
361
 
358
362
  class OutputKafkaMode(str, Enum, metaclass=utils.OpenEnumMeta):
359
363
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
360
364
 
365
+ # Error
361
366
  ERROR = "error"
367
+ # Backpressure
362
368
  BACKPRESSURE = "backpressure"
369
+ # Always On
363
370
  ALWAYS = "always"
364
371
 
365
372
 
@@ -18,8 +18,11 @@ class OutputKinesisType(str, Enum):
18
18
  class OutputKinesisAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
20
20
 
21
+ # Auto
21
22
  AUTO = "auto"
23
+ # Manual
22
24
  MANUAL = "manual"
25
+ # Secret Key pair
23
26
  SECRET = "secret"
24
27
 
25
28
 
@@ -33,37 +36,49 @@ class OutputKinesisSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
33
36
  class OutputKinesisCompression(str, Enum, metaclass=utils.OpenEnumMeta):
34
37
  r"""Compression type to use for records"""
35
38
 
39
+ # None
36
40
  NONE = "none"
41
+ # Gzip
37
42
  GZIP = "gzip"
38
43
 
39
44
 
40
45
  class OutputKinesisBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
41
46
  r"""How to handle events when all receivers are exerting backpressure"""
42
47
 
48
+ # Block
43
49
  BLOCK = "block"
50
+ # Drop
44
51
  DROP = "drop"
52
+ # Persistent Queue
45
53
  QUEUE = "queue"
46
54
 
47
55
 
48
56
  class OutputKinesisPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
49
57
  r"""Codec to use to compress the persisted data"""
50
58
 
59
+ # None
51
60
  NONE = "none"
61
+ # Gzip
52
62
  GZIP = "gzip"
53
63
 
54
64
 
55
65
  class OutputKinesisQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
56
66
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
57
67
 
68
+ # Block
58
69
  BLOCK = "block"
70
+ # Drop new data
59
71
  DROP = "drop"
60
72
 
61
73
 
62
74
  class OutputKinesisMode(str, Enum, metaclass=utils.OpenEnumMeta):
63
75
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
64
76
 
77
+ # Error
65
78
  ERROR = "error"
79
+ # Backpressure
66
80
  BACKPRESSURE = "backpressure"
81
+ # Always On
67
82
  ALWAYS = "always"
68
83
 
69
84
 
@@ -18,7 +18,9 @@ class OutputLokiType(str, Enum):
18
18
  class OutputLokiMessageFormat(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Format to use when sending logs to Loki (Protobuf or JSON)"""
20
20
 
21
+ # Protobuf
21
22
  PROTOBUF = "protobuf"
23
+ # JSON
22
24
  JSON = "json"
23
25
 
24
26
 
@@ -34,10 +36,15 @@ class OutputLokiLabel(BaseModel):
34
36
 
35
37
 
36
38
  class OutputLokiAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
39
+ # None
37
40
  NONE = "none"
41
+ # Auth token
38
42
  TOKEN = "token"
43
+ # Auth token (text secret)
39
44
  TEXT_SECRET = "textSecret"
45
+ # Basic
40
46
  BASIC = "basic"
47
+ # Basic (credentials secret)
41
48
  CREDENTIALS_SECRET = "credentialsSecret"
42
49
 
43
50
 
@@ -55,8 +62,11 @@ class OutputLokiExtraHTTPHeader(BaseModel):
55
62
  class OutputLokiFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
56
63
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
57
64
 
65
+ # Payload
58
66
  PAYLOAD = "payload"
67
+ # Payload + Headers
59
68
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
69
+ # None
60
70
  NONE = "none"
61
71
 
62
72
 
@@ -117,30 +127,40 @@ class OutputLokiTimeoutRetrySettings(BaseModel):
117
127
  class OutputLokiBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
118
128
  r"""How to handle events when all receivers are exerting backpressure"""
119
129
 
130
+ # Block
120
131
  BLOCK = "block"
132
+ # Drop
121
133
  DROP = "drop"
134
+ # Persistent Queue
122
135
  QUEUE = "queue"
123
136
 
124
137
 
125
138
  class OutputLokiCompression(str, Enum, metaclass=utils.OpenEnumMeta):
126
139
  r"""Codec to use to compress the persisted data"""
127
140
 
141
+ # None
128
142
  NONE = "none"
143
+ # Gzip
129
144
  GZIP = "gzip"
130
145
 
131
146
 
132
147
  class OutputLokiQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
133
148
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
134
149
 
150
+ # Block
135
151
  BLOCK = "block"
152
+ # Drop new data
136
153
  DROP = "drop"
137
154
 
138
155
 
139
156
  class OutputLokiMode(str, Enum, metaclass=utils.OpenEnumMeta):
140
157
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
141
158
 
159
+ # Error
142
160
  ERROR = "error"
161
+ # Backpressure
143
162
  BACKPRESSURE = "backpressure"
163
+ # Always On
144
164
  ALWAYS = "always"
145
165
 
146
166
 
@@ -18,8 +18,11 @@ class OutputMinioType(str, Enum):
18
18
  class OutputMinioAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
20
20
 
21
+ # Auto
21
22
  AUTO = "auto"
23
+ # Manual
22
24
  MANUAL = "manual"
25
+ # Secret Key pair
23
26
  SECRET = "secret"
24
27
 
25
28
 
@@ -33,47 +36,64 @@ class OutputMinioSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
33
36
  class OutputMinioObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
34
37
  r"""Object ACL to assign to uploaded objects"""
35
38
 
39
+ # Private
36
40
  PRIVATE = "private"
41
+ # Public Read Only
37
42
  PUBLIC_READ = "public-read"
43
+ # Public Read/Write
38
44
  PUBLIC_READ_WRITE = "public-read-write"
45
+ # Authenticated Read Only
39
46
  AUTHENTICATED_READ = "authenticated-read"
47
+ # AWS EC2 AMI Read Only
40
48
  AWS_EXEC_READ = "aws-exec-read"
49
+ # Bucket Owner Read Only
41
50
  BUCKET_OWNER_READ = "bucket-owner-read"
51
+ # Bucket Owner Full Control
42
52
  BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
43
53
 
44
54
 
45
55
  class OutputMinioStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
46
56
  r"""Storage class to select for uploaded objects"""
47
57
 
58
+ # Standard
48
59
  STANDARD = "STANDARD"
60
+ # Reduced Redundancy Storage
49
61
  REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY"
50
62
 
51
63
 
52
64
  class ServerSideEncryption(str, Enum, metaclass=utils.OpenEnumMeta):
53
65
  r"""Server-side encryption for uploaded objects"""
54
66
 
67
+ # Amazon S3 Managed Key
55
68
  AES256 = "AES256"
56
69
 
57
70
 
58
71
  class OutputMinioDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
59
72
  r"""Format of the output data"""
60
73
 
74
+ # JSON
61
75
  JSON = "json"
76
+ # Raw
62
77
  RAW = "raw"
78
+ # Parquet
63
79
  PARQUET = "parquet"
64
80
 
65
81
 
66
82
  class OutputMinioBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
67
83
  r"""How to handle events when all receivers are exerting backpressure"""
68
84
 
85
+ # Block
69
86
  BLOCK = "block"
87
+ # Drop
70
88
  DROP = "drop"
71
89
 
72
90
 
73
91
  class OutputMinioDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
74
92
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
75
93
 
94
+ # Block
76
95
  BLOCK = "block"
96
+ # Drop
77
97
  DROP = "drop"
78
98
 
79
99
 
@@ -87,23 +107,31 @@ class OutputMinioCompression(str, Enum, metaclass=utils.OpenEnumMeta):
87
107
  class OutputMinioCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
88
108
  r"""Compression level to apply before moving files to final destination"""
89
109
 
110
+ # Best Speed
90
111
  BEST_SPEED = "best_speed"
112
+ # Normal
91
113
  NORMAL = "normal"
114
+ # Best Compression
92
115
  BEST_COMPRESSION = "best_compression"
93
116
 
94
117
 
95
118
  class OutputMinioParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
96
119
  r"""Determines which data types are supported and how they are represented"""
97
120
 
121
+ # 1.0
98
122
  PARQUET_1_0 = "PARQUET_1_0"
123
+ # 2.4
99
124
  PARQUET_2_4 = "PARQUET_2_4"
125
+ # 2.6
100
126
  PARQUET_2_6 = "PARQUET_2_6"
101
127
 
102
128
 
103
129
  class OutputMinioDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
104
130
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
105
131
 
132
+ # V1
106
133
  DATA_PAGE_V1 = "DATA_PAGE_V1"
134
+ # V2
107
135
  DATA_PAGE_V2 = "DATA_PAGE_V2"
108
136
 
109
137
 
@@ -18,35 +18,38 @@ class OutputMskType(str, Enum):
18
18
  class OutputMskAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Control the number of required acknowledgments."""
20
20
 
21
+ # Leader
21
22
  ONE = 1
23
+ # None
22
24
  ZERO = 0
25
+ # All
23
26
  MINUS_1 = -1
24
27
 
25
28
 
26
29
  class OutputMskRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
27
30
  r"""Format to use to serialize events before writing to Kafka."""
28
31
 
32
+ # JSON
29
33
  JSON = "json"
34
+ # Field _raw
30
35
  RAW = "raw"
36
+ # Protobuf
31
37
  PROTOBUF = "protobuf"
32
38
 
33
39
 
34
40
  class OutputMskCompression(str, Enum, metaclass=utils.OpenEnumMeta):
35
41
  r"""Codec to use to compress the data before sending to Kafka"""
36
42
 
43
+ # None
37
44
  NONE = "none"
45
+ # Gzip
38
46
  GZIP = "gzip"
47
+ # Snappy
39
48
  SNAPPY = "snappy"
49
+ # LZ4
40
50
  LZ4 = "lz4"
41
51
 
42
52
 
43
- class OutputMskSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
44
- r"""The schema format used to encode and decode event data"""
45
-
46
- AVRO = "avro"
47
- JSON = "json"
48
-
49
-
50
53
  class OutputMskAuthTypedDict(TypedDict):
51
54
  r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
52
55
 
@@ -157,8 +160,6 @@ class OutputMskKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
157
160
  disabled: NotRequired[bool]
158
161
  schema_registry_url: NotRequired[str]
159
162
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
160
- schema_type: NotRequired[OutputMskSchemaType]
161
- r"""The schema format used to encode and decode event data"""
162
163
  connection_timeout: NotRequired[float]
163
164
  r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
164
165
  request_timeout: NotRequired[float]
@@ -182,14 +183,6 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
182
183
  ] = "http://localhost:8081"
183
184
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
184
185
 
185
- schema_type: Annotated[
186
- Annotated[
187
- Optional[OutputMskSchemaType], PlainValidator(validate_open_enum(False))
188
- ],
189
- pydantic.Field(alias="schemaType"),
190
- ] = OutputMskSchemaType.AVRO
191
- r"""The schema format used to encode and decode event data"""
192
-
193
186
  connection_timeout: Annotated[
194
187
  Optional[float], pydantic.Field(alias="connectionTimeout")
195
188
  ] = 30000
@@ -222,8 +215,11 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
222
215
  class OutputMskAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
223
216
  r"""AWS authentication method. Choose Auto to use IAM roles."""
224
217
 
218
+ # Auto
225
219
  AUTO = "auto"
220
+ # Manual
226
221
  MANUAL = "manual"
222
+ # Secret Key pair
227
223
  SECRET = "secret"
228
224
 
229
225
 
@@ -320,30 +316,40 @@ class OutputMskTLSSettingsClientSide(BaseModel):
320
316
  class OutputMskBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
321
317
  r"""How to handle events when all receivers are exerting backpressure"""
322
318
 
319
+ # Block
323
320
  BLOCK = "block"
321
+ # Drop
324
322
  DROP = "drop"
323
+ # Persistent Queue
325
324
  QUEUE = "queue"
326
325
 
327
326
 
328
327
  class OutputMskPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
329
328
  r"""Codec to use to compress the persisted data"""
330
329
 
330
+ # None
331
331
  NONE = "none"
332
+ # Gzip
332
333
  GZIP = "gzip"
333
334
 
334
335
 
335
336
  class OutputMskQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
336
337
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
337
338
 
339
+ # Block
338
340
  BLOCK = "block"
341
+ # Drop new data
339
342
  DROP = "drop"
340
343
 
341
344
 
342
345
  class OutputMskMode(str, Enum, metaclass=utils.OpenEnumMeta):
343
346
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
344
347
 
348
+ # Error
345
349
  ERROR = "error"
350
+ # Backpressure
346
351
  BACKPRESSURE = "backpressure"
352
+ # Always On
347
353
  ALWAYS = "always"
348
354
 
349
355
 
@@ -18,8 +18,11 @@ class OutputNewrelicType(str, Enum):
18
18
  class OutputNewrelicRegion(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Which New Relic region endpoint to use."""
20
20
 
21
+ # US
21
22
  US = "US"
23
+ # Europe
22
24
  EU = "EU"
25
+ # Custom
23
26
  CUSTOM = "Custom"
24
27
 
25
28
 
@@ -57,8 +60,11 @@ class OutputNewrelicExtraHTTPHeader(BaseModel):
57
60
  class OutputNewrelicFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
58
61
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
59
62
 
63
+ # Payload
60
64
  PAYLOAD = "payload"
65
+ # Payload + Headers
61
66
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
67
+ # None
62
68
  NONE = "none"
63
69
 
64
70
 
@@ -119,8 +125,11 @@ class OutputNewrelicTimeoutRetrySettings(BaseModel):
119
125
  class OutputNewrelicBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
120
126
  r"""How to handle events when all receivers are exerting backpressure"""
121
127
 
128
+ # Block
122
129
  BLOCK = "block"
130
+ # Drop
123
131
  DROP = "drop"
132
+ # Persistent Queue
124
133
  QUEUE = "queue"
125
134
 
126
135
 
@@ -134,22 +143,29 @@ class OutputNewrelicAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta
134
143
  class OutputNewrelicCompression(str, Enum, metaclass=utils.OpenEnumMeta):
135
144
  r"""Codec to use to compress the persisted data"""
136
145
 
146
+ # None
137
147
  NONE = "none"
148
+ # Gzip
138
149
  GZIP = "gzip"
139
150
 
140
151
 
141
152
  class OutputNewrelicQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
142
153
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
143
154
 
155
+ # Block
144
156
  BLOCK = "block"
157
+ # Drop new data
145
158
  DROP = "drop"
146
159
 
147
160
 
148
161
  class OutputNewrelicMode(str, Enum, metaclass=utils.OpenEnumMeta):
149
162
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
150
163
 
164
+ # Error
151
165
  ERROR = "error"
166
+ # Backpressure
152
167
  BACKPRESSURE = "backpressure"
168
+ # Always On
153
169
  ALWAYS = "always"
154
170
 
155
171
 
@@ -18,8 +18,11 @@ class OutputNewrelicEventsType(str, Enum):
18
18
  class OutputNewrelicEventsRegion(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Which New Relic region endpoint to use."""
20
20
 
21
+ # US
21
22
  US = "US"
23
+ # Europe
22
24
  EU = "EU"
25
+ # Custom
23
26
  CUSTOM = "Custom"
24
27
 
25
28
 
@@ -39,8 +42,11 @@ class OutputNewrelicEventsFailedRequestLoggingMode(
39
42
  ):
40
43
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
41
44
 
45
+ # Payload
42
46
  PAYLOAD = "payload"
47
+ # Payload + Headers
43
48
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
49
+ # None
44
50
  NONE = "none"
45
51
 
46
52
 
@@ -101,8 +107,11 @@ class OutputNewrelicEventsTimeoutRetrySettings(BaseModel):
101
107
  class OutputNewrelicEventsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
102
108
  r"""How to handle events when all receivers are exerting backpressure"""
103
109
 
110
+ # Block
104
111
  BLOCK = "block"
112
+ # Drop
105
113
  DROP = "drop"
114
+ # Persistent Queue
106
115
  QUEUE = "queue"
107
116
 
108
117
 
@@ -116,22 +125,29 @@ class OutputNewrelicEventsAuthenticationMethod(str, Enum, metaclass=utils.OpenEn
116
125
  class OutputNewrelicEventsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
117
126
  r"""Codec to use to compress the persisted data"""
118
127
 
128
+ # None
119
129
  NONE = "none"
130
+ # Gzip
120
131
  GZIP = "gzip"
121
132
 
122
133
 
123
134
  class OutputNewrelicEventsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
124
135
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
125
136
 
137
+ # Block
126
138
  BLOCK = "block"
139
+ # Drop new data
127
140
  DROP = "drop"
128
141
 
129
142
 
130
143
  class OutputNewrelicEventsMode(str, Enum, metaclass=utils.OpenEnumMeta):
131
144
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
132
145
 
146
+ # Error
133
147
  ERROR = "error"
148
+ # Backpressure
134
149
  BACKPRESSURE = "backpressure"
150
+ # Always On
135
151
  ALWAYS = "always"
136
152
 
137
153