cribl-control-plane 0.0.16__py3-none-any.whl → 0.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (133) hide show
  1. cribl_control_plane/_version.py +3 -3
  2. cribl_control_plane/errors/healthstatus_error.py +2 -8
  3. cribl_control_plane/models/__init__.py +4124 -4124
  4. cribl_control_plane/models/createinputop.py +1734 -2771
  5. cribl_control_plane/models/createoutputop.py +2153 -4314
  6. cribl_control_plane/models/healthstatus.py +4 -7
  7. cribl_control_plane/models/inputappscope.py +16 -36
  8. cribl_control_plane/models/inputazureblob.py +8 -19
  9. cribl_control_plane/models/inputcollection.py +6 -15
  10. cribl_control_plane/models/inputconfluentcloud.py +20 -45
  11. cribl_control_plane/models/inputcribl.py +6 -13
  12. cribl_control_plane/models/inputcriblhttp.py +10 -27
  13. cribl_control_plane/models/inputcribllakehttp.py +12 -26
  14. cribl_control_plane/models/inputcriblmetrics.py +6 -14
  15. cribl_control_plane/models/inputcribltcp.py +10 -27
  16. cribl_control_plane/models/inputcrowdstrike.py +12 -28
  17. cribl_control_plane/models/inputdatadogagent.py +10 -28
  18. cribl_control_plane/models/inputdatagen.py +6 -13
  19. cribl_control_plane/models/inputedgeprometheus.py +31 -64
  20. cribl_control_plane/models/inputelastic.py +16 -44
  21. cribl_control_plane/models/inputeventhub.py +8 -19
  22. cribl_control_plane/models/inputexec.py +8 -16
  23. cribl_control_plane/models/inputfile.py +8 -17
  24. cribl_control_plane/models/inputfirehose.py +10 -27
  25. cribl_control_plane/models/inputgooglepubsub.py +8 -23
  26. cribl_control_plane/models/inputgrafana_union.py +35 -81
  27. cribl_control_plane/models/inputhttp.py +10 -27
  28. cribl_control_plane/models/inputhttpraw.py +10 -27
  29. cribl_control_plane/models/inputjournalfiles.py +6 -16
  30. cribl_control_plane/models/inputkafka.py +16 -45
  31. cribl_control_plane/models/inputkinesis.py +16 -42
  32. cribl_control_plane/models/inputkubeevents.py +6 -13
  33. cribl_control_plane/models/inputkubelogs.py +10 -18
  34. cribl_control_plane/models/inputkubemetrics.py +10 -18
  35. cribl_control_plane/models/inputloki.py +12 -33
  36. cribl_control_plane/models/inputmetrics.py +10 -25
  37. cribl_control_plane/models/inputmodeldriventelemetry.py +12 -32
  38. cribl_control_plane/models/inputmsk.py +18 -52
  39. cribl_control_plane/models/inputnetflow.py +6 -15
  40. cribl_control_plane/models/inputoffice365mgmt.py +16 -37
  41. cribl_control_plane/models/inputoffice365msgtrace.py +18 -39
  42. cribl_control_plane/models/inputoffice365service.py +18 -39
  43. cribl_control_plane/models/inputopentelemetry.py +18 -42
  44. cribl_control_plane/models/inputprometheus.py +20 -54
  45. cribl_control_plane/models/inputprometheusrw.py +12 -34
  46. cribl_control_plane/models/inputrawudp.py +6 -15
  47. cribl_control_plane/models/inputs3.py +10 -23
  48. cribl_control_plane/models/inputs3inventory.py +12 -28
  49. cribl_control_plane/models/inputsecuritylake.py +12 -29
  50. cribl_control_plane/models/inputsnmp.py +8 -20
  51. cribl_control_plane/models/inputsplunk.py +14 -37
  52. cribl_control_plane/models/inputsplunkhec.py +12 -33
  53. cribl_control_plane/models/inputsplunksearch.py +16 -37
  54. cribl_control_plane/models/inputsqs.py +12 -31
  55. cribl_control_plane/models/inputsyslog_union.py +29 -53
  56. cribl_control_plane/models/inputsystemmetrics.py +26 -50
  57. cribl_control_plane/models/inputsystemstate.py +10 -18
  58. cribl_control_plane/models/inputtcp.py +12 -33
  59. cribl_control_plane/models/inputtcpjson.py +12 -33
  60. cribl_control_plane/models/inputwef.py +20 -45
  61. cribl_control_plane/models/inputwindowsmetrics.py +26 -46
  62. cribl_control_plane/models/inputwineventlogs.py +12 -22
  63. cribl_control_plane/models/inputwiz.py +10 -25
  64. cribl_control_plane/models/inputzscalerhec.py +12 -33
  65. cribl_control_plane/models/output.py +3 -6
  66. cribl_control_plane/models/outputazureblob.py +20 -52
  67. cribl_control_plane/models/outputazuredataexplorer.py +30 -77
  68. cribl_control_plane/models/outputazureeventhub.py +20 -44
  69. cribl_control_plane/models/outputazurelogs.py +14 -37
  70. cribl_control_plane/models/outputclickhouse.py +22 -59
  71. cribl_control_plane/models/outputcloudwatch.py +12 -33
  72. cribl_control_plane/models/outputconfluentcloud.py +32 -75
  73. cribl_control_plane/models/outputcriblhttp.py +18 -46
  74. cribl_control_plane/models/outputcribllake.py +18 -48
  75. cribl_control_plane/models/outputcribltcp.py +20 -47
  76. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
  77. cribl_control_plane/models/outputdatadog.py +22 -50
  78. cribl_control_plane/models/outputdataset.py +20 -48
  79. cribl_control_plane/models/outputdefault.py +2 -5
  80. cribl_control_plane/models/outputdevnull.py +2 -5
  81. cribl_control_plane/models/outputdiskspool.py +4 -9
  82. cribl_control_plane/models/outputdls3.py +26 -72
  83. cribl_control_plane/models/outputdynatracehttp.py +22 -57
  84. cribl_control_plane/models/outputdynatraceotlp.py +24 -59
  85. cribl_control_plane/models/outputelastic.py +20 -45
  86. cribl_control_plane/models/outputelasticcloud.py +14 -40
  87. cribl_control_plane/models/outputexabeam.py +12 -33
  88. cribl_control_plane/models/outputfilesystem.py +16 -41
  89. cribl_control_plane/models/outputgooglechronicle.py +18 -54
  90. cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
  91. cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
  92. cribl_control_plane/models/outputgooglepubsub.py +16 -39
  93. cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
  94. cribl_control_plane/models/outputgraphite.py +16 -35
  95. cribl_control_plane/models/outputhoneycomb.py +14 -37
  96. cribl_control_plane/models/outputhumiohec.py +18 -47
  97. cribl_control_plane/models/outputinfluxdb.py +18 -44
  98. cribl_control_plane/models/outputkafka.py +28 -73
  99. cribl_control_plane/models/outputkinesis.py +18 -44
  100. cribl_control_plane/models/outputloki.py +18 -43
  101. cribl_control_plane/models/outputminio.py +26 -69
  102. cribl_control_plane/models/outputmsk.py +30 -81
  103. cribl_control_plane/models/outputnetflow.py +2 -5
  104. cribl_control_plane/models/outputnewrelic.py +20 -45
  105. cribl_control_plane/models/outputnewrelicevents.py +16 -45
  106. cribl_control_plane/models/outputopentelemetry.py +28 -69
  107. cribl_control_plane/models/outputprometheus.py +14 -37
  108. cribl_control_plane/models/outputring.py +10 -21
  109. cribl_control_plane/models/outputrouter.py +2 -5
  110. cribl_control_plane/models/outputs3.py +28 -72
  111. cribl_control_plane/models/outputsecuritylake.py +20 -56
  112. cribl_control_plane/models/outputsentinel.py +20 -49
  113. cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
  114. cribl_control_plane/models/outputservicenow.py +26 -64
  115. cribl_control_plane/models/outputsignalfx.py +16 -39
  116. cribl_control_plane/models/outputsnmp.py +2 -5
  117. cribl_control_plane/models/outputsns.py +16 -40
  118. cribl_control_plane/models/outputsplunk.py +26 -64
  119. cribl_control_plane/models/outputsplunkhec.py +14 -37
  120. cribl_control_plane/models/outputsplunklb.py +36 -83
  121. cribl_control_plane/models/outputsqs.py +18 -45
  122. cribl_control_plane/models/outputstatsd.py +16 -34
  123. cribl_control_plane/models/outputstatsdext.py +14 -33
  124. cribl_control_plane/models/outputsumologic.py +14 -37
  125. cribl_control_plane/models/outputsyslog.py +26 -60
  126. cribl_control_plane/models/outputtcpjson.py +22 -54
  127. cribl_control_plane/models/outputwavefront.py +14 -37
  128. cribl_control_plane/models/outputwebhook.py +24 -60
  129. cribl_control_plane/models/outputxsiam.py +16 -37
  130. {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/METADATA +1 -1
  131. cribl_control_plane-0.0.17.dist-info/RECORD +215 -0
  132. cribl_control_plane-0.0.16.dist-info/RECORD +0 -215
  133. {cribl_control_plane-0.0.16.dist-info → cribl_control_plane-0.0.17.dist-info}/WHEEL +0 -0
@@ -1,28 +1,25 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
13
10
 
14
- class OutputExabeamType(str, Enum, metaclass=utils.OpenEnumMeta):
11
+ class OutputExabeamType(str, Enum):
15
12
  EXABEAM = "exabeam"
16
13
 
17
14
 
18
- class OutputExabeamSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputExabeamSignatureVersion(str, Enum):
19
16
  r"""Signature version to use for signing Google Cloud Storage requests"""
20
17
 
21
18
  V2 = "v2"
22
19
  V4 = "v4"
23
20
 
24
21
 
25
- class OutputExabeamObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
22
+ class OutputExabeamObjectACL(str, Enum):
26
23
  r"""Object ACL to assign to uploaded objects"""
27
24
 
28
25
  PRIVATE = "private"
@@ -33,7 +30,7 @@ class OutputExabeamObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
33
30
  PUBLIC_READ = "public-read"
34
31
 
35
32
 
36
- class OutputExabeamStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
33
+ class OutputExabeamStorageClass(str, Enum):
37
34
  r"""Storage class to select for uploaded objects"""
38
35
 
39
36
  STANDARD = "STANDARD"
@@ -42,14 +39,14 @@ class OutputExabeamStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
42
39
  ARCHIVE = "ARCHIVE"
43
40
 
44
41
 
45
- class OutputExabeamBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
42
+ class OutputExabeamBackpressureBehavior(str, Enum):
46
43
  r"""How to handle events when all receivers are exerting backpressure"""
47
44
 
48
45
  BLOCK = "block"
49
46
  DROP = "drop"
50
47
 
51
48
 
52
- class OutputExabeamDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
49
+ class OutputExabeamDiskSpaceProtection(str, Enum):
53
50
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
54
51
 
55
52
  BLOCK = "block"
@@ -143,9 +140,7 @@ class OutputExabeam(BaseModel):
143
140
  id: Optional[str] = None
144
141
  r"""Unique ID for this output"""
145
142
 
146
- type: Annotated[
147
- Optional[OutputExabeamType], PlainValidator(validate_open_enum(False))
148
- ] = None
143
+ type: Optional[OutputExabeamType] = None
149
144
 
150
145
  pipeline: Optional[str] = None
151
146
  r"""Pipeline to process data before sending out to this output"""
@@ -170,28 +165,18 @@ class OutputExabeam(BaseModel):
170
165
  r"""Google Cloud Storage service endpoint"""
171
166
 
172
167
  signature_version: Annotated[
173
- Annotated[
174
- Optional[OutputExabeamSignatureVersion],
175
- PlainValidator(validate_open_enum(False)),
176
- ],
168
+ Optional[OutputExabeamSignatureVersion],
177
169
  pydantic.Field(alias="signatureVersion"),
178
170
  ] = OutputExabeamSignatureVersion.V4
179
171
  r"""Signature version to use for signing Google Cloud Storage requests"""
180
172
 
181
173
  object_acl: Annotated[
182
- Annotated[
183
- Optional[OutputExabeamObjectACL], PlainValidator(validate_open_enum(False))
184
- ],
185
- pydantic.Field(alias="objectACL"),
174
+ Optional[OutputExabeamObjectACL], pydantic.Field(alias="objectACL")
186
175
  ] = OutputExabeamObjectACL.PRIVATE
187
176
  r"""Object ACL to assign to uploaded objects"""
188
177
 
189
178
  storage_class: Annotated[
190
- Annotated[
191
- Optional[OutputExabeamStorageClass],
192
- PlainValidator(validate_open_enum(False)),
193
- ],
194
- pydantic.Field(alias="storageClass"),
179
+ Optional[OutputExabeamStorageClass], pydantic.Field(alias="storageClass")
195
180
  ] = None
196
181
  r"""Storage class to select for uploaded objects"""
197
182
 
@@ -231,10 +216,7 @@ class OutputExabeam(BaseModel):
231
216
  r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
232
217
 
233
218
  on_backpressure: Annotated[
234
- Annotated[
235
- Optional[OutputExabeamBackpressureBehavior],
236
- PlainValidator(validate_open_enum(False)),
237
- ],
219
+ Optional[OutputExabeamBackpressureBehavior],
238
220
  pydantic.Field(alias="onBackpressure"),
239
221
  ] = OutputExabeamBackpressureBehavior.BLOCK
240
222
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -245,10 +227,7 @@ class OutputExabeam(BaseModel):
245
227
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
246
228
 
247
229
  on_disk_full_backpressure: Annotated[
248
- Annotated[
249
- Optional[OutputExabeamDiskSpaceProtection],
250
- PlainValidator(validate_open_enum(False)),
251
- ],
230
+ Optional[OutputExabeamDiskSpaceProtection],
252
231
  pydantic.Field(alias="onDiskFullBackpressure"),
253
232
  ] = OutputExabeamDiskSpaceProtection.BLOCK
254
233
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
@@ -1,21 +1,18 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
13
10
 
14
- class OutputFilesystemType(str, Enum, metaclass=utils.OpenEnumMeta):
11
+ class OutputFilesystemType(str, Enum):
15
12
  FILESYSTEM = "filesystem"
16
13
 
17
14
 
18
- class OutputFilesystemDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputFilesystemDataFormat(str, Enum):
19
16
  r"""Format of the output data"""
20
17
 
21
18
  JSON = "json"
@@ -23,28 +20,28 @@ class OutputFilesystemDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
23
20
  PARQUET = "parquet"
24
21
 
25
22
 
26
- class OutputFilesystemBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
23
+ class OutputFilesystemBackpressureBehavior(str, Enum):
27
24
  r"""How to handle events when all receivers are exerting backpressure"""
28
25
 
29
26
  BLOCK = "block"
30
27
  DROP = "drop"
31
28
 
32
29
 
33
- class OutputFilesystemDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
30
+ class OutputFilesystemDiskSpaceProtection(str, Enum):
34
31
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
35
32
 
36
33
  BLOCK = "block"
37
34
  DROP = "drop"
38
35
 
39
36
 
40
- class OutputFilesystemCompression(str, Enum, metaclass=utils.OpenEnumMeta):
37
+ class OutputFilesystemCompression(str, Enum):
41
38
  r"""Data compression format to apply to HTTP content before it is delivered"""
42
39
 
43
40
  NONE = "none"
44
41
  GZIP = "gzip"
45
42
 
46
43
 
47
- class OutputFilesystemCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
44
+ class OutputFilesystemCompressionLevel(str, Enum):
48
45
  r"""Compression level to apply before moving files to final destination"""
49
46
 
50
47
  BEST_SPEED = "best_speed"
@@ -52,7 +49,7 @@ class OutputFilesystemCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
52
49
  BEST_COMPRESSION = "best_compression"
53
50
 
54
51
 
55
- class OutputFilesystemParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
52
+ class OutputFilesystemParquetVersion(str, Enum):
56
53
  r"""Determines which data types are supported and how they are represented"""
57
54
 
58
55
  PARQUET_1_0 = "PARQUET_1_0"
@@ -60,7 +57,7 @@ class OutputFilesystemParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
60
57
  PARQUET_2_6 = "PARQUET_2_6"
61
58
 
62
59
 
63
- class OutputFilesystemDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
60
+ class OutputFilesystemDataPageVersion(str, Enum):
64
61
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
65
62
 
66
63
  DATA_PAGE_V1 = "DATA_PAGE_V1"
@@ -158,7 +155,7 @@ class OutputFilesystemTypedDict(TypedDict):
158
155
 
159
156
 
160
157
  class OutputFilesystem(BaseModel):
161
- type: Annotated[OutputFilesystemType, PlainValidator(validate_open_enum(False))]
158
+ type: OutputFilesystemType
162
159
 
163
160
  dest_path: Annotated[str, pydantic.Field(alias="destPath")]
164
161
  r"""Final destination for the output files"""
@@ -199,11 +196,7 @@ class OutputFilesystem(BaseModel):
199
196
  r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
200
197
 
201
198
  format_: Annotated[
202
- Annotated[
203
- Optional[OutputFilesystemDataFormat],
204
- PlainValidator(validate_open_enum(False)),
205
- ],
206
- pydantic.Field(alias="format"),
199
+ Optional[OutputFilesystemDataFormat], pydantic.Field(alias="format")
207
200
  ] = OutputFilesystemDataFormat.JSON
208
201
  r"""Format of the output data"""
209
202
 
@@ -246,10 +239,7 @@ class OutputFilesystem(BaseModel):
246
239
  r"""Buffer size used to write to a file"""
247
240
 
248
241
  on_backpressure: Annotated[
249
- Annotated[
250
- Optional[OutputFilesystemBackpressureBehavior],
251
- PlainValidator(validate_open_enum(False)),
252
- ],
242
+ Optional[OutputFilesystemBackpressureBehavior],
253
243
  pydantic.Field(alias="onBackpressure"),
254
244
  ] = OutputFilesystemBackpressureBehavior.BLOCK
255
245
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -260,26 +250,18 @@ class OutputFilesystem(BaseModel):
260
250
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
261
251
 
262
252
  on_disk_full_backpressure: Annotated[
263
- Annotated[
264
- Optional[OutputFilesystemDiskSpaceProtection],
265
- PlainValidator(validate_open_enum(False)),
266
- ],
253
+ Optional[OutputFilesystemDiskSpaceProtection],
267
254
  pydantic.Field(alias="onDiskFullBackpressure"),
268
255
  ] = OutputFilesystemDiskSpaceProtection.BLOCK
269
256
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
270
257
 
271
258
  description: Optional[str] = None
272
259
 
273
- compress: Annotated[
274
- Optional[OutputFilesystemCompression], PlainValidator(validate_open_enum(False))
275
- ] = OutputFilesystemCompression.GZIP
260
+ compress: Optional[OutputFilesystemCompression] = OutputFilesystemCompression.GZIP
276
261
  r"""Data compression format to apply to HTTP content before it is delivered"""
277
262
 
278
263
  compression_level: Annotated[
279
- Annotated[
280
- Optional[OutputFilesystemCompressionLevel],
281
- PlainValidator(validate_open_enum(False)),
282
- ],
264
+ Optional[OutputFilesystemCompressionLevel],
283
265
  pydantic.Field(alias="compressionLevel"),
284
266
  ] = OutputFilesystemCompressionLevel.BEST_SPEED
285
267
  r"""Compression level to apply before moving files to final destination"""
@@ -290,19 +272,12 @@ class OutputFilesystem(BaseModel):
290
272
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
291
273
 
292
274
  parquet_version: Annotated[
293
- Annotated[
294
- Optional[OutputFilesystemParquetVersion],
295
- PlainValidator(validate_open_enum(False)),
296
- ],
297
- pydantic.Field(alias="parquetVersion"),
275
+ Optional[OutputFilesystemParquetVersion], pydantic.Field(alias="parquetVersion")
298
276
  ] = OutputFilesystemParquetVersion.PARQUET_2_6
299
277
  r"""Determines which data types are supported and how they are represented"""
300
278
 
301
279
  parquet_data_page_version: Annotated[
302
- Annotated[
303
- Optional[OutputFilesystemDataPageVersion],
304
- PlainValidator(validate_open_enum(False)),
305
- ],
280
+ Optional[OutputFilesystemDataPageVersion],
306
281
  pydantic.Field(alias="parquetDataPageVersion"),
307
282
  ] = OutputFilesystemDataPageVersion.DATA_PAGE_V2
308
283
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
@@ -1,28 +1,23 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
13
10
 
14
- class OutputGoogleChronicleType(str, Enum, metaclass=utils.OpenEnumMeta):
11
+ class OutputGoogleChronicleType(str, Enum):
15
12
  GOOGLE_CHRONICLE = "google_chronicle"
16
13
 
17
14
 
18
- class OutputGoogleChronicleAPIVersion(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputGoogleChronicleAPIVersion(str, Enum):
19
16
  V1 = "v1"
20
17
  V2 = "v2"
21
18
 
22
19
 
23
- class OutputGoogleChronicleAuthenticationMethod(
24
- str, Enum, metaclass=utils.OpenEnumMeta
25
- ):
20
+ class OutputGoogleChronicleAuthenticationMethod(str, Enum):
26
21
  MANUAL = "manual"
27
22
  SECRET = "secret"
28
23
  SERVICE_ACCOUNT = "serviceAccount"
@@ -83,7 +78,7 @@ class OutputGoogleChronicleTimeoutRetrySettings(BaseModel):
83
78
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
84
79
 
85
80
 
86
- class SendEventsAs(str, Enum, metaclass=utils.OpenEnumMeta):
81
+ class SendEventsAs(str, Enum):
87
82
  UNSTRUCTURED = "unstructured"
88
83
  UDM = "udm"
89
84
 
@@ -99,9 +94,7 @@ class OutputGoogleChronicleExtraHTTPHeader(BaseModel):
99
94
  name: Optional[str] = None
100
95
 
101
96
 
102
- class OutputGoogleChronicleFailedRequestLoggingMode(
103
- str, Enum, metaclass=utils.OpenEnumMeta
104
- ):
97
+ class OutputGoogleChronicleFailedRequestLoggingMode(str, Enum):
105
98
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
106
99
 
107
100
  PAYLOAD = "payload"
@@ -109,9 +102,7 @@ class OutputGoogleChronicleFailedRequestLoggingMode(
109
102
  NONE = "none"
110
103
 
111
104
 
112
- class OutputGoogleChronicleBackpressureBehavior(
113
- str, Enum, metaclass=utils.OpenEnumMeta
114
- ):
105
+ class OutputGoogleChronicleBackpressureBehavior(str, Enum):
115
106
  r"""How to handle events when all receivers are exerting backpressure"""
116
107
 
117
108
  BLOCK = "block"
@@ -141,21 +132,21 @@ class CustomLabel(BaseModel):
141
132
  value: str
142
133
 
143
134
 
144
- class OutputGoogleChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
135
+ class OutputGoogleChronicleCompression(str, Enum):
145
136
  r"""Codec to use to compress the persisted data"""
146
137
 
147
138
  NONE = "none"
148
139
  GZIP = "gzip"
149
140
 
150
141
 
151
- class OutputGoogleChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
142
+ class OutputGoogleChronicleQueueFullBehavior(str, Enum):
152
143
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
153
144
 
154
145
  BLOCK = "block"
155
146
  DROP = "drop"
156
147
 
157
148
 
158
- class OutputGoogleChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
149
+ class OutputGoogleChronicleMode(str, Enum):
159
150
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
160
151
 
161
152
  ERROR = "error"
@@ -265,9 +256,7 @@ class OutputGoogleChronicleTypedDict(TypedDict):
265
256
 
266
257
 
267
258
  class OutputGoogleChronicle(BaseModel):
268
- type: Annotated[
269
- OutputGoogleChronicleType, PlainValidator(validate_open_enum(False))
270
- ]
259
+ type: OutputGoogleChronicleType
271
260
 
272
261
  id: Optional[str] = None
273
262
  r"""Unique ID for this output"""
@@ -287,18 +276,11 @@ class OutputGoogleChronicle(BaseModel):
287
276
  r"""Tags for filtering and grouping in @{product}"""
288
277
 
289
278
  api_version: Annotated[
290
- Annotated[
291
- Optional[OutputGoogleChronicleAPIVersion],
292
- PlainValidator(validate_open_enum(False)),
293
- ],
294
- pydantic.Field(alias="apiVersion"),
279
+ Optional[OutputGoogleChronicleAPIVersion], pydantic.Field(alias="apiVersion")
295
280
  ] = OutputGoogleChronicleAPIVersion.V1
296
281
 
297
282
  authentication_method: Annotated[
298
- Annotated[
299
- Optional[OutputGoogleChronicleAuthenticationMethod],
300
- PlainValidator(validate_open_enum(False)),
301
- ],
283
+ Optional[OutputGoogleChronicleAuthenticationMethod],
302
284
  pydantic.Field(alias="authenticationMethod"),
303
285
  ] = OutputGoogleChronicleAuthenticationMethod.SERVICE_ACCOUNT
304
286
 
@@ -319,8 +301,7 @@ class OutputGoogleChronicle(BaseModel):
319
301
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
320
302
 
321
303
  log_format_type: Annotated[
322
- Annotated[Optional[SendEventsAs], PlainValidator(validate_open_enum(False))],
323
- pydantic.Field(alias="logFormatType"),
304
+ Optional[SendEventsAs], pydantic.Field(alias="logFormatType")
324
305
  ] = SendEventsAs.UNSTRUCTURED
325
306
 
326
307
  region: Optional[str] = None
@@ -365,10 +346,7 @@ class OutputGoogleChronicle(BaseModel):
365
346
  r"""Headers to add to all events"""
366
347
 
367
348
  failed_request_logging_mode: Annotated[
368
- Annotated[
369
- Optional[OutputGoogleChronicleFailedRequestLoggingMode],
370
- PlainValidator(validate_open_enum(False)),
371
- ],
349
+ Optional[OutputGoogleChronicleFailedRequestLoggingMode],
372
350
  pydantic.Field(alias="failedRequestLoggingMode"),
373
351
  ] = OutputGoogleChronicleFailedRequestLoggingMode.NONE
374
352
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -384,10 +362,7 @@ class OutputGoogleChronicle(BaseModel):
384
362
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
385
363
 
386
364
  on_backpressure: Annotated[
387
- Annotated[
388
- Optional[OutputGoogleChronicleBackpressureBehavior],
389
- PlainValidator(validate_open_enum(False)),
390
- ],
365
+ Optional[OutputGoogleChronicleBackpressureBehavior],
391
366
  pydantic.Field(alias="onBackpressure"),
392
367
  ] = OutputGoogleChronicleBackpressureBehavior.BLOCK
393
368
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -455,29 +430,18 @@ class OutputGoogleChronicle(BaseModel):
455
430
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
456
431
 
457
432
  pq_compress: Annotated[
458
- Annotated[
459
- Optional[OutputGoogleChronicleCompression],
460
- PlainValidator(validate_open_enum(False)),
461
- ],
462
- pydantic.Field(alias="pqCompress"),
433
+ Optional[OutputGoogleChronicleCompression], pydantic.Field(alias="pqCompress")
463
434
  ] = OutputGoogleChronicleCompression.NONE
464
435
  r"""Codec to use to compress the persisted data"""
465
436
 
466
437
  pq_on_backpressure: Annotated[
467
- Annotated[
468
- Optional[OutputGoogleChronicleQueueFullBehavior],
469
- PlainValidator(validate_open_enum(False)),
470
- ],
438
+ Optional[OutputGoogleChronicleQueueFullBehavior],
471
439
  pydantic.Field(alias="pqOnBackpressure"),
472
440
  ] = OutputGoogleChronicleQueueFullBehavior.BLOCK
473
441
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
474
442
 
475
443
  pq_mode: Annotated[
476
- Annotated[
477
- Optional[OutputGoogleChronicleMode],
478
- PlainValidator(validate_open_enum(False)),
479
- ],
480
- pydantic.Field(alias="pqMode"),
444
+ Optional[OutputGoogleChronicleMode], pydantic.Field(alias="pqMode")
481
445
  ] = OutputGoogleChronicleMode.ERROR
482
446
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
483
447
 
@@ -1,28 +1,25 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
13
10
 
14
- class OutputGoogleCloudLoggingType(str, Enum, metaclass=utils.OpenEnumMeta):
11
+ class OutputGoogleCloudLoggingType(str, Enum):
15
12
  GOOGLE_CLOUD_LOGGING = "google_cloud_logging"
16
13
 
17
14
 
18
- class LogLocationType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class LogLocationType(str, Enum):
19
16
  PROJECT = "project"
20
17
  ORGANIZATION = "organization"
21
18
  BILLING_ACCOUNT = "billingAccount"
22
19
  FOLDER = "folder"
23
20
 
24
21
 
25
- class PayloadFormat(str, Enum, metaclass=utils.OpenEnumMeta):
22
+ class PayloadFormat(str, Enum):
26
23
  r"""Format to use when sending payload. Defaults to Text."""
27
24
 
28
25
  TEXT = "text"
@@ -59,9 +56,7 @@ class ResourceTypeLabel(BaseModel):
59
56
  r"""JavaScript expression to compute the label's value."""
60
57
 
61
58
 
62
- class OutputGoogleCloudLoggingGoogleAuthenticationMethod(
63
- str, Enum, metaclass=utils.OpenEnumMeta
64
- ):
59
+ class OutputGoogleCloudLoggingGoogleAuthenticationMethod(str, Enum):
65
60
  r"""Choose Auto to use Google Application Default Credentials (ADC), Manual to enter Google service account credentials directly, or Secret to select or create a stored secret that references Google service account credentials."""
66
61
 
67
62
  AUTO = "auto"
@@ -69,9 +64,7 @@ class OutputGoogleCloudLoggingGoogleAuthenticationMethod(
69
64
  SECRET = "secret"
70
65
 
71
66
 
72
- class OutputGoogleCloudLoggingBackpressureBehavior(
73
- str, Enum, metaclass=utils.OpenEnumMeta
74
- ):
67
+ class OutputGoogleCloudLoggingBackpressureBehavior(str, Enum):
75
68
  r"""How to handle events when all receivers are exerting backpressure"""
76
69
 
77
70
  BLOCK = "block"
@@ -79,23 +72,21 @@ class OutputGoogleCloudLoggingBackpressureBehavior(
79
72
  QUEUE = "queue"
80
73
 
81
74
 
82
- class OutputGoogleCloudLoggingCompression(str, Enum, metaclass=utils.OpenEnumMeta):
75
+ class OutputGoogleCloudLoggingCompression(str, Enum):
83
76
  r"""Codec to use to compress the persisted data"""
84
77
 
85
78
  NONE = "none"
86
79
  GZIP = "gzip"
87
80
 
88
81
 
89
- class OutputGoogleCloudLoggingQueueFullBehavior(
90
- str, Enum, metaclass=utils.OpenEnumMeta
91
- ):
82
+ class OutputGoogleCloudLoggingQueueFullBehavior(str, Enum):
92
83
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
93
84
 
94
85
  BLOCK = "block"
95
86
  DROP = "drop"
96
87
 
97
88
 
98
- class OutputGoogleCloudLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
89
+ class OutputGoogleCloudLoggingMode(str, Enum):
99
90
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
100
91
 
101
92
  ERROR = "error"
@@ -240,8 +231,7 @@ class OutputGoogleCloudLoggingTypedDict(TypedDict):
240
231
 
241
232
  class OutputGoogleCloudLogging(BaseModel):
242
233
  log_location_type: Annotated[
243
- Annotated[LogLocationType, PlainValidator(validate_open_enum(False))],
244
- pydantic.Field(alias="logLocationType"),
234
+ LogLocationType, pydantic.Field(alias="logLocationType")
245
235
  ]
246
236
 
247
237
  log_name_expression: Annotated[str, pydantic.Field(alias="logNameExpression")]
@@ -255,10 +245,7 @@ class OutputGoogleCloudLogging(BaseModel):
255
245
  id: Optional[str] = None
256
246
  r"""Unique ID for this output"""
257
247
 
258
- type: Annotated[
259
- Optional[OutputGoogleCloudLoggingType],
260
- PlainValidator(validate_open_enum(False)),
261
- ] = None
248
+ type: Optional[OutputGoogleCloudLoggingType] = None
262
249
 
263
250
  pipeline: Optional[str] = None
264
251
  r"""Pipeline to process data before sending out to this output"""
@@ -275,8 +262,7 @@ class OutputGoogleCloudLogging(BaseModel):
275
262
  r"""Tags for filtering and grouping in @{product}"""
276
263
 
277
264
  payload_format: Annotated[
278
- Annotated[Optional[PayloadFormat], PlainValidator(validate_open_enum(False))],
279
- pydantic.Field(alias="payloadFormat"),
265
+ Optional[PayloadFormat], pydantic.Field(alias="payloadFormat")
280
266
  ] = PayloadFormat.TEXT
281
267
  r"""Format to use when sending payload. Defaults to Text."""
282
268
 
@@ -306,10 +292,7 @@ class OutputGoogleCloudLogging(BaseModel):
306
292
  r"""JavaScript expression to compute the value of the insert ID field."""
307
293
 
308
294
  google_auth_method: Annotated[
309
- Annotated[
310
- Optional[OutputGoogleCloudLoggingGoogleAuthenticationMethod],
311
- PlainValidator(validate_open_enum(False)),
312
- ],
295
+ Optional[OutputGoogleCloudLoggingGoogleAuthenticationMethod],
313
296
  pydantic.Field(alias="googleAuthMethod"),
314
297
  ] = OutputGoogleCloudLoggingGoogleAuthenticationMethod.MANUAL
315
298
  r"""Choose Auto to use Google Application Default Credentials (ADC), Manual to enter Google service account credentials directly, or Secret to select or create a stored secret that references Google service account credentials."""
@@ -492,10 +475,7 @@ class OutputGoogleCloudLogging(BaseModel):
492
475
  r"""A JavaScript expression that evaluates to the the sampling decision of the span associated with the log entry. See the [documentation](https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry) for details."""
493
476
 
494
477
  on_backpressure: Annotated[
495
- Annotated[
496
- Optional[OutputGoogleCloudLoggingBackpressureBehavior],
497
- PlainValidator(validate_open_enum(False)),
498
- ],
478
+ Optional[OutputGoogleCloudLoggingBackpressureBehavior],
499
479
  pydantic.Field(alias="onBackpressure"),
500
480
  ] = OutputGoogleCloudLoggingBackpressureBehavior.BLOCK
501
481
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -526,29 +506,19 @@ class OutputGoogleCloudLogging(BaseModel):
526
506
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
527
507
 
528
508
  pq_compress: Annotated[
529
- Annotated[
530
- Optional[OutputGoogleCloudLoggingCompression],
531
- PlainValidator(validate_open_enum(False)),
532
- ],
509
+ Optional[OutputGoogleCloudLoggingCompression],
533
510
  pydantic.Field(alias="pqCompress"),
534
511
  ] = OutputGoogleCloudLoggingCompression.NONE
535
512
  r"""Codec to use to compress the persisted data"""
536
513
 
537
514
  pq_on_backpressure: Annotated[
538
- Annotated[
539
- Optional[OutputGoogleCloudLoggingQueueFullBehavior],
540
- PlainValidator(validate_open_enum(False)),
541
- ],
515
+ Optional[OutputGoogleCloudLoggingQueueFullBehavior],
542
516
  pydantic.Field(alias="pqOnBackpressure"),
543
517
  ] = OutputGoogleCloudLoggingQueueFullBehavior.BLOCK
544
518
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
545
519
 
546
520
  pq_mode: Annotated[
547
- Annotated[
548
- Optional[OutputGoogleCloudLoggingMode],
549
- PlainValidator(validate_open_enum(False)),
550
- ],
551
- pydantic.Field(alias="pqMode"),
521
+ Optional[OutputGoogleCloudLoggingMode], pydantic.Field(alias="pqMode")
552
522
  ] = OutputGoogleCloudLoggingMode.ERROR
553
523
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
554
524