cribl-control-plane 0.2.1rc6__py3-none-any.whl → 0.3.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (157) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/errors/__init__.py +5 -8
  3. cribl_control_plane/errors/{healthserverstatus_error.py → healthstatus_error.py} +9 -10
  4. cribl_control_plane/groups_sdk.py +30 -54
  5. cribl_control_plane/health.py +16 -22
  6. cribl_control_plane/models/__init__.py +50 -103
  7. cribl_control_plane/models/appmode.py +14 -0
  8. cribl_control_plane/models/authtoken.py +1 -5
  9. cribl_control_plane/models/configgroup.py +6 -35
  10. cribl_control_plane/models/createconfiggroupbyproductop.py +7 -8
  11. cribl_control_plane/models/createroutesappendbyidop.py +2 -2
  12. cribl_control_plane/models/createversionundoop.py +3 -3
  13. cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
  14. cribl_control_plane/models/distributedsummary.py +0 -6
  15. cribl_control_plane/models/error.py +16 -0
  16. cribl_control_plane/models/gethealthinfoop.py +17 -0
  17. cribl_control_plane/models/hbcriblinfo.py +3 -14
  18. cribl_control_plane/models/{healthserverstatus.py → healthstatus.py} +7 -7
  19. cribl_control_plane/models/heartbeatmetadata.py +0 -3
  20. cribl_control_plane/models/input.py +63 -65
  21. cribl_control_plane/models/inputappscope.py +0 -4
  22. cribl_control_plane/models/inputazureblob.py +0 -4
  23. cribl_control_plane/models/inputcollection.py +0 -4
  24. cribl_control_plane/models/inputconfluentcloud.py +18 -8
  25. cribl_control_plane/models/inputcribl.py +0 -4
  26. cribl_control_plane/models/inputcriblhttp.py +0 -4
  27. cribl_control_plane/models/inputcribllakehttp.py +0 -4
  28. cribl_control_plane/models/inputcriblmetrics.py +0 -4
  29. cribl_control_plane/models/inputcribltcp.py +0 -4
  30. cribl_control_plane/models/inputcrowdstrike.py +0 -7
  31. cribl_control_plane/models/inputdatadogagent.py +0 -4
  32. cribl_control_plane/models/inputdatagen.py +0 -4
  33. cribl_control_plane/models/inputedgeprometheus.py +0 -12
  34. cribl_control_plane/models/inputelastic.py +0 -11
  35. cribl_control_plane/models/inputeventhub.py +0 -6
  36. cribl_control_plane/models/inputexec.py +0 -4
  37. cribl_control_plane/models/inputfile.py +0 -6
  38. cribl_control_plane/models/inputfirehose.py +0 -4
  39. cribl_control_plane/models/inputgooglepubsub.py +0 -7
  40. cribl_control_plane/models/inputgrafana.py +0 -8
  41. cribl_control_plane/models/inputhttp.py +0 -4
  42. cribl_control_plane/models/inputhttpraw.py +0 -4
  43. cribl_control_plane/models/inputjournalfiles.py +0 -4
  44. cribl_control_plane/models/inputkafka.py +17 -8
  45. cribl_control_plane/models/inputkinesis.py +0 -15
  46. cribl_control_plane/models/inputkubeevents.py +0 -4
  47. cribl_control_plane/models/inputkubelogs.py +0 -4
  48. cribl_control_plane/models/inputkubemetrics.py +0 -4
  49. cribl_control_plane/models/inputloki.py +0 -4
  50. cribl_control_plane/models/inputmetrics.py +0 -4
  51. cribl_control_plane/models/inputmodeldriventelemetry.py +0 -4
  52. cribl_control_plane/models/inputmsk.py +17 -7
  53. cribl_control_plane/models/inputnetflow.py +0 -4
  54. cribl_control_plane/models/inputoffice365mgmt.py +0 -11
  55. cribl_control_plane/models/inputoffice365msgtrace.py +0 -11
  56. cribl_control_plane/models/inputoffice365service.py +0 -11
  57. cribl_control_plane/models/inputopentelemetry.py +0 -8
  58. cribl_control_plane/models/inputprometheus.py +0 -10
  59. cribl_control_plane/models/inputprometheusrw.py +0 -4
  60. cribl_control_plane/models/inputrawudp.py +0 -4
  61. cribl_control_plane/models/inputs3.py +0 -7
  62. cribl_control_plane/models/inputs3inventory.py +0 -7
  63. cribl_control_plane/models/inputsecuritylake.py +0 -7
  64. cribl_control_plane/models/inputsnmp.py +0 -11
  65. cribl_control_plane/models/inputsplunk.py +0 -9
  66. cribl_control_plane/models/inputsplunkhec.py +0 -4
  67. cribl_control_plane/models/inputsplunksearch.py +0 -7
  68. cribl_control_plane/models/inputsqs.py +0 -9
  69. cribl_control_plane/models/inputsyslog.py +0 -8
  70. cribl_control_plane/models/inputsystemmetrics.py +0 -32
  71. cribl_control_plane/models/inputsystemstate.py +0 -4
  72. cribl_control_plane/models/inputtcp.py +0 -4
  73. cribl_control_plane/models/inputtcpjson.py +0 -4
  74. cribl_control_plane/models/inputwef.py +0 -6
  75. cribl_control_plane/models/inputwindowsmetrics.py +0 -28
  76. cribl_control_plane/models/inputwineventlogs.py +0 -8
  77. cribl_control_plane/models/inputwiz.py +0 -7
  78. cribl_control_plane/models/inputwizwebhook.py +0 -4
  79. cribl_control_plane/models/inputzscalerhec.py +0 -4
  80. cribl_control_plane/models/jobinfo.py +1 -4
  81. cribl_control_plane/models/logininfo.py +3 -3
  82. cribl_control_plane/models/nodeprovidedinfo.py +1 -11
  83. cribl_control_plane/models/output.py +72 -77
  84. cribl_control_plane/models/outputazureblob.py +0 -20
  85. cribl_control_plane/models/outputazuredataexplorer.py +0 -28
  86. cribl_control_plane/models/outputazureeventhub.py +0 -17
  87. cribl_control_plane/models/outputazurelogs.py +0 -13
  88. cribl_control_plane/models/outputchronicle.py +0 -13
  89. cribl_control_plane/models/outputclickhouse.py +0 -17
  90. cribl_control_plane/models/outputcloudwatch.py +0 -13
  91. cribl_control_plane/models/outputconfluentcloud.py +18 -24
  92. cribl_control_plane/models/outputcriblhttp.py +0 -15
  93. cribl_control_plane/models/outputcribllake.py +0 -21
  94. cribl_control_plane/models/outputcribltcp.py +0 -12
  95. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +0 -15
  96. cribl_control_plane/models/outputdatadog.py +0 -30
  97. cribl_control_plane/models/outputdataset.py +0 -23
  98. cribl_control_plane/models/outputdls3.py +0 -35
  99. cribl_control_plane/models/outputdynatracehttp.py +0 -22
  100. cribl_control_plane/models/outputdynatraceotlp.py +0 -22
  101. cribl_control_plane/models/outputelastic.py +0 -18
  102. cribl_control_plane/models/outputelasticcloud.py +0 -13
  103. cribl_control_plane/models/outputexabeam.py +0 -14
  104. cribl_control_plane/models/outputfilesystem.py +0 -15
  105. cribl_control_plane/models/outputgooglechronicle.py +0 -21
  106. cribl_control_plane/models/outputgooglecloudlogging.py +0 -19
  107. cribl_control_plane/models/outputgooglecloudstorage.py +0 -28
  108. cribl_control_plane/models/outputgooglepubsub.py +0 -13
  109. cribl_control_plane/models/outputgrafanacloud.py +0 -50
  110. cribl_control_plane/models/outputgraphite.py +0 -12
  111. cribl_control_plane/models/outputhoneycomb.py +0 -13
  112. cribl_control_plane/models/outputhumiohec.py +0 -15
  113. cribl_control_plane/models/outputinfluxdb.py +0 -19
  114. cribl_control_plane/models/outputkafka.py +17 -24
  115. cribl_control_plane/models/outputkinesis.py +0 -15
  116. cribl_control_plane/models/outputloki.py +0 -20
  117. cribl_control_plane/models/outputminio.py +0 -28
  118. cribl_control_plane/models/outputmsk.py +17 -23
  119. cribl_control_plane/models/outputnewrelic.py +0 -16
  120. cribl_control_plane/models/outputnewrelicevents.py +0 -16
  121. cribl_control_plane/models/outputopentelemetry.py +0 -22
  122. cribl_control_plane/models/outputprometheus.py +0 -13
  123. cribl_control_plane/models/outputring.py +0 -2
  124. cribl_control_plane/models/outputs3.py +0 -35
  125. cribl_control_plane/models/outputsecuritylake.py +0 -29
  126. cribl_control_plane/models/outputsentinel.py +0 -15
  127. cribl_control_plane/models/outputsentineloneaisiem.py +0 -13
  128. cribl_control_plane/models/outputservicenow.py +0 -21
  129. cribl_control_plane/models/outputsignalfx.py +0 -13
  130. cribl_control_plane/models/outputsns.py +0 -13
  131. cribl_control_plane/models/outputsplunk.py +0 -15
  132. cribl_control_plane/models/outputsplunkhec.py +0 -13
  133. cribl_control_plane/models/outputsplunklb.py +0 -15
  134. cribl_control_plane/models/outputsqs.py +0 -15
  135. cribl_control_plane/models/outputstatsd.py +0 -12
  136. cribl_control_plane/models/outputstatsdext.py +0 -12
  137. cribl_control_plane/models/outputsumologic.py +0 -15
  138. cribl_control_plane/models/outputsyslog.py +0 -24
  139. cribl_control_plane/models/outputtcpjson.py +0 -12
  140. cribl_control_plane/models/outputwavefront.py +0 -13
  141. cribl_control_plane/models/outputwebhook.py +0 -23
  142. cribl_control_plane/models/outputxsiam.py +0 -13
  143. cribl_control_plane/models/packinfo.py +5 -8
  144. cribl_control_plane/models/packinstallinfo.py +5 -8
  145. cribl_control_plane/models/{uploadpackresponse.py → routecloneconf.py} +4 -4
  146. cribl_control_plane/models/routeconf.py +4 -3
  147. cribl_control_plane/models/runnablejobcollection.py +0 -4
  148. cribl_control_plane/packs.py +7 -202
  149. cribl_control_plane/routes_sdk.py +6 -6
  150. cribl_control_plane/tokens.py +15 -23
  151. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.3.0a1.dist-info}/METADATA +9 -50
  152. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.3.0a1.dist-info}/RECORD +153 -154
  153. cribl_control_plane/models/groupcreaterequest.py +0 -152
  154. cribl_control_plane/models/outpostnodeinfo.py +0 -16
  155. cribl_control_plane/models/outputdatabricks.py +0 -411
  156. cribl_control_plane/models/updatepacksop.py +0 -25
  157. {cribl_control_plane-0.2.1rc6.dist-info → cribl_control_plane-0.3.0a1.dist-info}/WHEEL +0 -0
@@ -18,38 +18,35 @@ class OutputMskType(str, Enum):
18
18
  class OutputMskAcknowledgments(int, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Control the number of required acknowledgments."""
20
20
 
21
- # Leader
22
21
  ONE = 1
23
- # None
24
22
  ZERO = 0
25
- # All
26
23
  MINUS_1 = -1
27
24
 
28
25
 
29
26
  class OutputMskRecordDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
30
27
  r"""Format to use to serialize events before writing to Kafka."""
31
28
 
32
- # JSON
33
29
  JSON = "json"
34
- # Field _raw
35
30
  RAW = "raw"
36
- # Protobuf
37
31
  PROTOBUF = "protobuf"
38
32
 
39
33
 
40
34
  class OutputMskCompression(str, Enum, metaclass=utils.OpenEnumMeta):
41
35
  r"""Codec to use to compress the data before sending to Kafka"""
42
36
 
43
- # None
44
37
  NONE = "none"
45
- # Gzip
46
38
  GZIP = "gzip"
47
- # Snappy
48
39
  SNAPPY = "snappy"
49
- # LZ4
50
40
  LZ4 = "lz4"
51
41
 
52
42
 
43
+ class OutputMskSchemaType(str, Enum, metaclass=utils.OpenEnumMeta):
44
+ r"""The schema format used to encode and decode event data"""
45
+
46
+ AVRO = "avro"
47
+ JSON = "json"
48
+
49
+
53
50
  class OutputMskAuthTypedDict(TypedDict):
54
51
  r"""Credentials to use when authenticating with the schema registry using basic HTTP authentication"""
55
52
 
@@ -160,6 +157,8 @@ class OutputMskKafkaSchemaRegistryAuthenticationTypedDict(TypedDict):
160
157
  disabled: NotRequired[bool]
161
158
  schema_registry_url: NotRequired[str]
162
159
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
160
+ schema_type: NotRequired[OutputMskSchemaType]
161
+ r"""The schema format used to encode and decode event data"""
163
162
  connection_timeout: NotRequired[float]
164
163
  r"""Maximum time to wait for a Schema Registry connection to complete successfully"""
165
164
  request_timeout: NotRequired[float]
@@ -183,6 +182,14 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
183
182
  ] = "http://localhost:8081"
184
183
  r"""URL for accessing the Confluent Schema Registry. Example: http://localhost:8081. To connect over TLS, use https instead of http."""
185
184
 
185
+ schema_type: Annotated[
186
+ Annotated[
187
+ Optional[OutputMskSchemaType], PlainValidator(validate_open_enum(False))
188
+ ],
189
+ pydantic.Field(alias="schemaType"),
190
+ ] = OutputMskSchemaType.AVRO
191
+ r"""The schema format used to encode and decode event data"""
192
+
186
193
  connection_timeout: Annotated[
187
194
  Optional[float], pydantic.Field(alias="connectionTimeout")
188
195
  ] = 30000
@@ -215,11 +222,8 @@ class OutputMskKafkaSchemaRegistryAuthentication(BaseModel):
215
222
  class OutputMskAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
216
223
  r"""AWS authentication method. Choose Auto to use IAM roles."""
217
224
 
218
- # Auto
219
225
  AUTO = "auto"
220
- # Manual
221
226
  MANUAL = "manual"
222
- # Secret Key pair
223
227
  SECRET = "secret"
224
228
 
225
229
 
@@ -316,40 +320,30 @@ class OutputMskTLSSettingsClientSide(BaseModel):
316
320
  class OutputMskBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
317
321
  r"""How to handle events when all receivers are exerting backpressure"""
318
322
 
319
- # Block
320
323
  BLOCK = "block"
321
- # Drop
322
324
  DROP = "drop"
323
- # Persistent Queue
324
325
  QUEUE = "queue"
325
326
 
326
327
 
327
328
  class OutputMskPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
328
329
  r"""Codec to use to compress the persisted data"""
329
330
 
330
- # None
331
331
  NONE = "none"
332
- # Gzip
333
332
  GZIP = "gzip"
334
333
 
335
334
 
336
335
  class OutputMskQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
337
336
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
338
337
 
339
- # Block
340
338
  BLOCK = "block"
341
- # Drop new data
342
339
  DROP = "drop"
343
340
 
344
341
 
345
342
  class OutputMskMode(str, Enum, metaclass=utils.OpenEnumMeta):
346
343
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
347
344
 
348
- # Error
349
345
  ERROR = "error"
350
- # Backpressure
351
346
  BACKPRESSURE = "backpressure"
352
- # Always On
353
347
  ALWAYS = "always"
354
348
 
355
349
 
@@ -18,11 +18,8 @@ class OutputNewrelicType(str, Enum):
18
18
  class OutputNewrelicRegion(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Which New Relic region endpoint to use."""
20
20
 
21
- # US
22
21
  US = "US"
23
- # Europe
24
22
  EU = "EU"
25
- # Custom
26
23
  CUSTOM = "Custom"
27
24
 
28
25
 
@@ -60,11 +57,8 @@ class OutputNewrelicExtraHTTPHeader(BaseModel):
60
57
  class OutputNewrelicFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
61
58
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
62
59
 
63
- # Payload
64
60
  PAYLOAD = "payload"
65
- # Payload + Headers
66
61
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
67
- # None
68
62
  NONE = "none"
69
63
 
70
64
 
@@ -125,11 +119,8 @@ class OutputNewrelicTimeoutRetrySettings(BaseModel):
125
119
  class OutputNewrelicBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
126
120
  r"""How to handle events when all receivers are exerting backpressure"""
127
121
 
128
- # Block
129
122
  BLOCK = "block"
130
- # Drop
131
123
  DROP = "drop"
132
- # Persistent Queue
133
124
  QUEUE = "queue"
134
125
 
135
126
 
@@ -143,29 +134,22 @@ class OutputNewrelicAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta
143
134
  class OutputNewrelicCompression(str, Enum, metaclass=utils.OpenEnumMeta):
144
135
  r"""Codec to use to compress the persisted data"""
145
136
 
146
- # None
147
137
  NONE = "none"
148
- # Gzip
149
138
  GZIP = "gzip"
150
139
 
151
140
 
152
141
  class OutputNewrelicQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
153
142
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
154
143
 
155
- # Block
156
144
  BLOCK = "block"
157
- # Drop new data
158
145
  DROP = "drop"
159
146
 
160
147
 
161
148
  class OutputNewrelicMode(str, Enum, metaclass=utils.OpenEnumMeta):
162
149
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
163
150
 
164
- # Error
165
151
  ERROR = "error"
166
- # Backpressure
167
152
  BACKPRESSURE = "backpressure"
168
- # Always On
169
153
  ALWAYS = "always"
170
154
 
171
155
 
@@ -18,11 +18,8 @@ class OutputNewrelicEventsType(str, Enum):
18
18
  class OutputNewrelicEventsRegion(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Which New Relic region endpoint to use."""
20
20
 
21
- # US
22
21
  US = "US"
23
- # Europe
24
22
  EU = "EU"
25
- # Custom
26
23
  CUSTOM = "Custom"
27
24
 
28
25
 
@@ -42,11 +39,8 @@ class OutputNewrelicEventsFailedRequestLoggingMode(
42
39
  ):
43
40
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
44
41
 
45
- # Payload
46
42
  PAYLOAD = "payload"
47
- # Payload + Headers
48
43
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
49
- # None
50
44
  NONE = "none"
51
45
 
52
46
 
@@ -107,11 +101,8 @@ class OutputNewrelicEventsTimeoutRetrySettings(BaseModel):
107
101
  class OutputNewrelicEventsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
108
102
  r"""How to handle events when all receivers are exerting backpressure"""
109
103
 
110
- # Block
111
104
  BLOCK = "block"
112
- # Drop
113
105
  DROP = "drop"
114
- # Persistent Queue
115
106
  QUEUE = "queue"
116
107
 
117
108
 
@@ -125,29 +116,22 @@ class OutputNewrelicEventsAuthenticationMethod(str, Enum, metaclass=utils.OpenEn
125
116
  class OutputNewrelicEventsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
126
117
  r"""Codec to use to compress the persisted data"""
127
118
 
128
- # None
129
119
  NONE = "none"
130
- # Gzip
131
120
  GZIP = "gzip"
132
121
 
133
122
 
134
123
  class OutputNewrelicEventsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
135
124
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
136
125
 
137
- # Block
138
126
  BLOCK = "block"
139
- # Drop new data
140
127
  DROP = "drop"
141
128
 
142
129
 
143
130
  class OutputNewrelicEventsMode(str, Enum, metaclass=utils.OpenEnumMeta):
144
131
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
145
132
 
146
- # Error
147
133
  ERROR = "error"
148
- # Backpressure
149
134
  BACKPRESSURE = "backpressure"
150
- # Always On
151
135
  ALWAYS = "always"
152
136
 
153
137
 
@@ -18,29 +18,22 @@ class OutputOpenTelemetryType(str, Enum):
18
18
  class OutputOpenTelemetryProtocol(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""Select a transport option for OpenTelemetry"""
20
20
 
21
- # gRPC
22
21
  GRPC = "grpc"
23
- # HTTP
24
22
  HTTP = "http"
25
23
 
26
24
 
27
25
  class OutputOpenTelemetryOTLPVersion(str, Enum, metaclass=utils.OpenEnumMeta):
28
26
  r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
29
27
 
30
- # 0.10.0
31
28
  ZERO_DOT_10_DOT_0 = "0.10.0"
32
- # 1.3.1
33
29
  ONE_DOT_3_DOT_1 = "1.3.1"
34
30
 
35
31
 
36
32
  class OutputOpenTelemetryCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
37
33
  r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
38
34
 
39
- # None
40
35
  NONE = "none"
41
- # Deflate
42
36
  DEFLATE = "deflate"
43
- # Gzip
44
37
  GZIP = "gzip"
45
38
 
46
39
 
@@ -49,9 +42,7 @@ class OutputOpenTelemetryHTTPCompressCompression(
49
42
  ):
50
43
  r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
51
44
 
52
- # None
53
45
  NONE = "none"
54
- # Gzip
55
46
  GZIP = "gzip"
56
47
 
57
48
 
@@ -82,22 +73,16 @@ class OutputOpenTelemetryFailedRequestLoggingMode(
82
73
  ):
83
74
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
84
75
 
85
- # Payload
86
76
  PAYLOAD = "payload"
87
- # Payload + Headers
88
77
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
89
- # None
90
78
  NONE = "none"
91
79
 
92
80
 
93
81
  class OutputOpenTelemetryBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
94
82
  r"""How to handle events when all receivers are exerting backpressure"""
95
83
 
96
- # Block
97
84
  BLOCK = "block"
98
- # Drop
99
85
  DROP = "drop"
100
- # Persistent Queue
101
86
  QUEUE = "queue"
102
87
 
103
88
 
@@ -277,29 +262,22 @@ class OutputOpenTelemetryTLSSettingsClientSide(BaseModel):
277
262
  class OutputOpenTelemetryPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
278
263
  r"""Codec to use to compress the persisted data"""
279
264
 
280
- # None
281
265
  NONE = "none"
282
- # Gzip
283
266
  GZIP = "gzip"
284
267
 
285
268
 
286
269
  class OutputOpenTelemetryQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
287
270
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
288
271
 
289
- # Block
290
272
  BLOCK = "block"
291
- # Drop new data
292
273
  DROP = "drop"
293
274
 
294
275
 
295
276
  class OutputOpenTelemetryMode(str, Enum, metaclass=utils.OpenEnumMeta):
296
277
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
297
278
 
298
- # Error
299
279
  ERROR = "error"
300
- # Backpressure
301
280
  BACKPRESSURE = "backpressure"
302
- # Always On
303
281
  ALWAYS = "always"
304
282
 
305
283
 
@@ -29,11 +29,8 @@ class OutputPrometheusExtraHTTPHeader(BaseModel):
29
29
  class OutputPrometheusFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
30
30
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
31
31
 
32
- # Payload
33
32
  PAYLOAD = "payload"
34
- # Payload + Headers
35
33
  PAYLOAD_AND_HEADERS = "payloadAndHeaders"
36
- # None
37
34
  NONE = "none"
38
35
 
39
36
 
@@ -94,11 +91,8 @@ class OutputPrometheusTimeoutRetrySettings(BaseModel):
94
91
  class OutputPrometheusBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
95
92
  r"""How to handle events when all receivers are exerting backpressure"""
96
93
 
97
- # Block
98
94
  BLOCK = "block"
99
- # Drop
100
95
  DROP = "drop"
101
- # Persistent Queue
102
96
  QUEUE = "queue"
103
97
 
104
98
 
@@ -116,29 +110,22 @@ class OutputPrometheusAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta
116
110
  class OutputPrometheusCompression(str, Enum, metaclass=utils.OpenEnumMeta):
117
111
  r"""Codec to use to compress the persisted data"""
118
112
 
119
- # None
120
113
  NONE = "none"
121
- # Gzip
122
114
  GZIP = "gzip"
123
115
 
124
116
 
125
117
  class OutputPrometheusQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
126
118
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
127
119
 
128
- # Block
129
120
  BLOCK = "block"
130
- # Drop new data
131
121
  DROP = "drop"
132
122
 
133
123
 
134
124
  class OutputPrometheusMode(str, Enum, metaclass=utils.OpenEnumMeta):
135
125
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
136
126
 
137
- # Error
138
127
  ERROR = "error"
139
- # Backpressure
140
128
  BACKPRESSURE = "backpressure"
141
- # Always On
142
129
  ALWAYS = "always"
143
130
 
144
131
 
@@ -30,9 +30,7 @@ class OutputRingDataCompressionFormat(str, Enum, metaclass=utils.OpenEnumMeta):
30
30
  class OutputRingBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
31
31
  r"""How to handle events when all receivers are exerting backpressure"""
32
32
 
33
- # Block
34
33
  BLOCK = "block"
35
- # Drop
36
34
  DROP = "drop"
37
35
 
38
36
 
@@ -18,11 +18,8 @@ class OutputS3Type(str, Enum):
18
18
  class OutputS3AuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
20
20
 
21
- # Auto
22
21
  AUTO = "auto"
23
- # Manual
24
22
  MANUAL = "manual"
25
- # Secret Key pair
26
23
  SECRET = "secret"
27
24
 
28
25
 
@@ -36,78 +33,54 @@ class OutputS3SignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
36
33
  class OutputS3ObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
37
34
  r"""Object ACL to assign to uploaded objects"""
38
35
 
39
- # Private
40
36
  PRIVATE = "private"
41
- # Public Read Only
42
37
  PUBLIC_READ = "public-read"
43
- # Public Read/Write
44
38
  PUBLIC_READ_WRITE = "public-read-write"
45
- # Authenticated Read Only
46
39
  AUTHENTICATED_READ = "authenticated-read"
47
- # AWS EC2 AMI Read Only
48
40
  AWS_EXEC_READ = "aws-exec-read"
49
- # Bucket Owner Read Only
50
41
  BUCKET_OWNER_READ = "bucket-owner-read"
51
- # Bucket Owner Full Control
52
42
  BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
53
43
 
54
44
 
55
45
  class OutputS3StorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
56
46
  r"""Storage class to select for uploaded objects"""
57
47
 
58
- # Standard
59
48
  STANDARD = "STANDARD"
60
- # Reduced Redundancy Storage
61
49
  REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY"
62
- # Standard, Infrequent Access
63
50
  STANDARD_IA = "STANDARD_IA"
64
- # One Zone, Infrequent Access
65
51
  ONEZONE_IA = "ONEZONE_IA"
66
- # Intelligent Tiering
67
52
  INTELLIGENT_TIERING = "INTELLIGENT_TIERING"
68
- # Glacier Flexible Retrieval
69
53
  GLACIER = "GLACIER"
70
- # Glacier Instant Retrieval
71
54
  GLACIER_IR = "GLACIER_IR"
72
- # Glacier Deep Archive
73
55
  DEEP_ARCHIVE = "DEEP_ARCHIVE"
74
56
 
75
57
 
76
58
  class OutputS3ServerSideEncryptionForUploadedObjects(
77
59
  str, Enum, metaclass=utils.OpenEnumMeta
78
60
  ):
79
- # Amazon S3 Managed Key
80
61
  AES256 = "AES256"
81
- # AWS KMS Managed Key
82
62
  AWS_KMS = "aws:kms"
83
63
 
84
64
 
85
65
  class OutputS3DataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
86
66
  r"""Format of the output data"""
87
67
 
88
- # JSON
89
68
  JSON = "json"
90
- # Raw
91
69
  RAW = "raw"
92
- # Parquet
93
70
  PARQUET = "parquet"
94
71
 
95
72
 
96
73
  class OutputS3BackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
97
74
  r"""How to handle events when all receivers are exerting backpressure"""
98
75
 
99
- # Block
100
76
  BLOCK = "block"
101
- # Drop
102
77
  DROP = "drop"
103
78
 
104
79
 
105
80
  class OutputS3DiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
106
81
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
107
82
 
108
- # Block
109
83
  BLOCK = "block"
110
- # Drop
111
84
  DROP = "drop"
112
85
 
113
86
 
@@ -121,31 +94,23 @@ class OutputS3Compression(str, Enum, metaclass=utils.OpenEnumMeta):
121
94
  class OutputS3CompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
122
95
  r"""Compression level to apply before moving files to final destination"""
123
96
 
124
- # Best Speed
125
97
  BEST_SPEED = "best_speed"
126
- # Normal
127
98
  NORMAL = "normal"
128
- # Best Compression
129
99
  BEST_COMPRESSION = "best_compression"
130
100
 
131
101
 
132
102
  class OutputS3ParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
133
103
  r"""Determines which data types are supported and how they are represented"""
134
104
 
135
- # 1.0
136
105
  PARQUET_1_0 = "PARQUET_1_0"
137
- # 2.4
138
106
  PARQUET_2_4 = "PARQUET_2_4"
139
- # 2.6
140
107
  PARQUET_2_6 = "PARQUET_2_6"
141
108
 
142
109
 
143
110
  class OutputS3DataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
144
111
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
145
112
 
146
- # V1
147
113
  DATA_PAGE_V1 = "DATA_PAGE_V1"
148
- # V2
149
114
  DATA_PAGE_V2 = "DATA_PAGE_V2"
150
115
 
151
116
 
@@ -18,11 +18,8 @@ class OutputSecurityLakeType(str, Enum):
18
18
  class OutputSecurityLakeAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
19
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
20
20
 
21
- # Auto
22
21
  AUTO = "auto"
23
- # Manual
24
22
  MANUAL = "manual"
25
- # Secret Key pair
26
23
  SECRET = "secret"
27
24
 
28
25
 
@@ -36,87 +33,61 @@ class OutputSecurityLakeSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta
36
33
  class OutputSecurityLakeObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
37
34
  r"""Object ACL to assign to uploaded objects"""
38
35
 
39
- # Private
40
36
  PRIVATE = "private"
41
- # Public Read Only
42
37
  PUBLIC_READ = "public-read"
43
- # Public Read/Write
44
38
  PUBLIC_READ_WRITE = "public-read-write"
45
- # Authenticated Read Only
46
39
  AUTHENTICATED_READ = "authenticated-read"
47
- # AWS EC2 AMI Read Only
48
40
  AWS_EXEC_READ = "aws-exec-read"
49
- # Bucket Owner Read Only
50
41
  BUCKET_OWNER_READ = "bucket-owner-read"
51
- # Bucket Owner Full Control
52
42
  BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
53
43
 
54
44
 
55
45
  class OutputSecurityLakeStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
56
46
  r"""Storage class to select for uploaded objects"""
57
47
 
58
- # Standard
59
48
  STANDARD = "STANDARD"
60
- # Reduced Redundancy Storage
61
49
  REDUCED_REDUNDANCY = "REDUCED_REDUNDANCY"
62
- # Standard, Infrequent Access
63
50
  STANDARD_IA = "STANDARD_IA"
64
- # One Zone, Infrequent Access
65
51
  ONEZONE_IA = "ONEZONE_IA"
66
- # Intelligent Tiering
67
52
  INTELLIGENT_TIERING = "INTELLIGENT_TIERING"
68
- # Glacier Flexible Retrieval
69
53
  GLACIER = "GLACIER"
70
- # Glacier Instant Retrieval
71
54
  GLACIER_IR = "GLACIER_IR"
72
- # Glacier Deep Archive
73
55
  DEEP_ARCHIVE = "DEEP_ARCHIVE"
74
56
 
75
57
 
76
58
  class OutputSecurityLakeServerSideEncryptionForUploadedObjects(
77
59
  str, Enum, metaclass=utils.OpenEnumMeta
78
60
  ):
79
- # Amazon S3 Managed Key
80
61
  AES256 = "AES256"
81
- # AWS KMS Managed Key
82
62
  AWS_KMS = "aws:kms"
83
63
 
84
64
 
85
65
  class OutputSecurityLakeBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
86
66
  r"""How to handle events when all receivers are exerting backpressure"""
87
67
 
88
- # Block
89
68
  BLOCK = "block"
90
- # Drop
91
69
  DROP = "drop"
92
70
 
93
71
 
94
72
  class OutputSecurityLakeDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
95
73
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
96
74
 
97
- # Block
98
75
  BLOCK = "block"
99
- # Drop
100
76
  DROP = "drop"
101
77
 
102
78
 
103
79
  class OutputSecurityLakeParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
104
80
  r"""Determines which data types are supported and how they are represented"""
105
81
 
106
- # 1.0
107
82
  PARQUET_1_0 = "PARQUET_1_0"
108
- # 2.4
109
83
  PARQUET_2_4 = "PARQUET_2_4"
110
- # 2.6
111
84
  PARQUET_2_6 = "PARQUET_2_6"
112
85
 
113
86
 
114
87
  class OutputSecurityLakeDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
115
88
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
116
89
 
117
- # V1
118
90
  DATA_PAGE_V1 = "DATA_PAGE_V1"
119
- # V2
120
91
  DATA_PAGE_V2 = "DATA_PAGE_V2"
121
92
 
122
93