cribl-control-plane 0.0.50rc2__py3-none-any.whl → 0.0.52__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (182) hide show
  1. cribl_control_plane/_hooks/clientcredentials.py +91 -41
  2. cribl_control_plane/_version.py +6 -4
  3. cribl_control_plane/errors/apierror.py +1 -1
  4. cribl_control_plane/errors/criblcontrolplaneerror.py +1 -1
  5. cribl_control_plane/errors/error.py +1 -1
  6. cribl_control_plane/errors/healthstatus_error.py +3 -9
  7. cribl_control_plane/errors/no_response_error.py +1 -1
  8. cribl_control_plane/errors/responsevalidationerror.py +1 -1
  9. cribl_control_plane/groups_sdk.py +4 -4
  10. cribl_control_plane/health.py +2 -6
  11. cribl_control_plane/models/__init__.py +31 -56
  12. cribl_control_plane/models/appmode.py +13 -0
  13. cribl_control_plane/models/cacheconnection.py +2 -10
  14. cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
  15. cribl_control_plane/models/cloudprovider.py +1 -2
  16. cribl_control_plane/models/configgroup.py +4 -24
  17. cribl_control_plane/models/configgroupcloud.py +2 -6
  18. cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
  19. cribl_control_plane/models/createinputhectokenbyidop.py +5 -6
  20. cribl_control_plane/models/createversionpushop.py +5 -5
  21. cribl_control_plane/models/cribllakedataset.py +2 -8
  22. cribl_control_plane/models/datasetmetadata.py +2 -8
  23. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
  24. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
  25. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
  26. cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
  27. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
  28. cribl_control_plane/models/getsummaryop.py +2 -7
  29. cribl_control_plane/models/getversionshowop.py +5 -6
  30. cribl_control_plane/models/gitinfo.py +3 -14
  31. cribl_control_plane/models/hbcriblinfo.py +3 -24
  32. cribl_control_plane/models/healthstatus.py +4 -7
  33. cribl_control_plane/models/heartbeatmetadata.py +0 -3
  34. cribl_control_plane/models/input.py +63 -65
  35. cribl_control_plane/models/inputappscope.py +14 -34
  36. cribl_control_plane/models/inputazureblob.py +6 -17
  37. cribl_control_plane/models/inputcollection.py +4 -11
  38. cribl_control_plane/models/inputconfluentcloud.py +32 -41
  39. cribl_control_plane/models/inputcribl.py +4 -11
  40. cribl_control_plane/models/inputcriblhttp.py +8 -23
  41. cribl_control_plane/models/inputcribllakehttp.py +10 -22
  42. cribl_control_plane/models/inputcriblmetrics.py +4 -12
  43. cribl_control_plane/models/inputcribltcp.py +8 -23
  44. cribl_control_plane/models/inputcrowdstrike.py +10 -26
  45. cribl_control_plane/models/inputdatadogagent.py +8 -24
  46. cribl_control_plane/models/inputdatagen.py +4 -11
  47. cribl_control_plane/models/inputedgeprometheus.py +24 -58
  48. cribl_control_plane/models/inputelastic.py +14 -40
  49. cribl_control_plane/models/inputeventhub.py +6 -15
  50. cribl_control_plane/models/inputexec.py +6 -14
  51. cribl_control_plane/models/inputfile.py +6 -15
  52. cribl_control_plane/models/inputfirehose.py +8 -23
  53. cribl_control_plane/models/inputgooglepubsub.py +6 -19
  54. cribl_control_plane/models/inputgrafana.py +24 -67
  55. cribl_control_plane/models/inputhttp.py +8 -23
  56. cribl_control_plane/models/inputhttpraw.py +8 -23
  57. cribl_control_plane/models/inputjournalfiles.py +4 -12
  58. cribl_control_plane/models/inputkafka.py +28 -41
  59. cribl_control_plane/models/inputkinesis.py +14 -38
  60. cribl_control_plane/models/inputkubeevents.py +4 -11
  61. cribl_control_plane/models/inputkubelogs.py +8 -16
  62. cribl_control_plane/models/inputkubemetrics.py +8 -16
  63. cribl_control_plane/models/inputloki.py +10 -29
  64. cribl_control_plane/models/inputmetrics.py +8 -23
  65. cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
  66. cribl_control_plane/models/inputmsk.py +30 -48
  67. cribl_control_plane/models/inputnetflow.py +4 -11
  68. cribl_control_plane/models/inputoffice365mgmt.py +14 -33
  69. cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
  70. cribl_control_plane/models/inputoffice365service.py +16 -35
  71. cribl_control_plane/models/inputopentelemetry.py +16 -38
  72. cribl_control_plane/models/inputprometheus.py +18 -50
  73. cribl_control_plane/models/inputprometheusrw.py +10 -30
  74. cribl_control_plane/models/inputrawudp.py +4 -11
  75. cribl_control_plane/models/inputs3.py +8 -21
  76. cribl_control_plane/models/inputs3inventory.py +10 -26
  77. cribl_control_plane/models/inputsecuritylake.py +10 -27
  78. cribl_control_plane/models/inputsnmp.py +6 -16
  79. cribl_control_plane/models/inputsplunk.py +12 -33
  80. cribl_control_plane/models/inputsplunkhec.py +10 -29
  81. cribl_control_plane/models/inputsplunksearch.py +14 -33
  82. cribl_control_plane/models/inputsqs.py +10 -27
  83. cribl_control_plane/models/inputsyslog.py +16 -43
  84. cribl_control_plane/models/inputsystemmetrics.py +24 -48
  85. cribl_control_plane/models/inputsystemstate.py +8 -16
  86. cribl_control_plane/models/inputtcp.py +10 -29
  87. cribl_control_plane/models/inputtcpjson.py +10 -29
  88. cribl_control_plane/models/inputwef.py +14 -37
  89. cribl_control_plane/models/inputwindowsmetrics.py +24 -44
  90. cribl_control_plane/models/inputwineventlogs.py +10 -20
  91. cribl_control_plane/models/inputwiz.py +8 -21
  92. cribl_control_plane/models/inputwizwebhook.py +8 -23
  93. cribl_control_plane/models/inputzscalerhec.py +10 -29
  94. cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
  95. cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
  96. cribl_control_plane/models/masterworkerentry.py +2 -7
  97. cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
  98. cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
  99. cribl_control_plane/models/nodeprovidedinfo.py +0 -3
  100. cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
  101. cribl_control_plane/models/nodeupgradestate.py +1 -2
  102. cribl_control_plane/models/nodeupgradestatus.py +5 -13
  103. cribl_control_plane/models/output.py +79 -84
  104. cribl_control_plane/models/outputazureblob.py +18 -48
  105. cribl_control_plane/models/outputazuredataexplorer.py +28 -73
  106. cribl_control_plane/models/outputazureeventhub.py +18 -40
  107. cribl_control_plane/models/outputazurelogs.py +12 -35
  108. cribl_control_plane/models/outputclickhouse.py +20 -55
  109. cribl_control_plane/models/outputcloudwatch.py +10 -29
  110. cribl_control_plane/models/outputconfluentcloud.py +44 -71
  111. cribl_control_plane/models/outputcriblhttp.py +16 -44
  112. cribl_control_plane/models/outputcribllake.py +16 -46
  113. cribl_control_plane/models/outputcribltcp.py +18 -45
  114. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
  115. cribl_control_plane/models/outputdatadog.py +20 -48
  116. cribl_control_plane/models/outputdataset.py +18 -46
  117. cribl_control_plane/models/outputdiskspool.py +2 -7
  118. cribl_control_plane/models/outputdls3.py +24 -68
  119. cribl_control_plane/models/outputdynatracehttp.py +20 -53
  120. cribl_control_plane/models/outputdynatraceotlp.py +22 -55
  121. cribl_control_plane/models/outputelastic.py +18 -43
  122. cribl_control_plane/models/outputelasticcloud.py +12 -36
  123. cribl_control_plane/models/outputexabeam.py +10 -29
  124. cribl_control_plane/models/outputfilesystem.py +14 -39
  125. cribl_control_plane/models/outputgooglechronicle.py +16 -50
  126. cribl_control_plane/models/outputgooglecloudlogging.py +18 -50
  127. cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
  128. cribl_control_plane/models/outputgooglepubsub.py +10 -31
  129. cribl_control_plane/models/outputgrafanacloud.py +32 -97
  130. cribl_control_plane/models/outputgraphite.py +14 -31
  131. cribl_control_plane/models/outputhoneycomb.py +12 -35
  132. cribl_control_plane/models/outputhumiohec.py +16 -43
  133. cribl_control_plane/models/outputinfluxdb.py +16 -42
  134. cribl_control_plane/models/outputkafka.py +40 -69
  135. cribl_control_plane/models/outputkinesis.py +16 -40
  136. cribl_control_plane/models/outputloki.py +16 -41
  137. cribl_control_plane/models/outputminio.py +24 -65
  138. cribl_control_plane/models/outputmsk.py +42 -77
  139. cribl_control_plane/models/outputnewrelic.py +18 -43
  140. cribl_control_plane/models/outputnewrelicevents.py +14 -41
  141. cribl_control_plane/models/outputopentelemetry.py +26 -67
  142. cribl_control_plane/models/outputprometheus.py +12 -35
  143. cribl_control_plane/models/outputring.py +8 -19
  144. cribl_control_plane/models/outputs3.py +26 -68
  145. cribl_control_plane/models/outputsecuritylake.py +18 -52
  146. cribl_control_plane/models/outputsentinel.py +18 -45
  147. cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
  148. cribl_control_plane/models/outputservicenow.py +24 -60
  149. cribl_control_plane/models/outputsignalfx.py +14 -37
  150. cribl_control_plane/models/outputsns.py +14 -36
  151. cribl_control_plane/models/outputsplunk.py +24 -60
  152. cribl_control_plane/models/outputsplunkhec.py +12 -35
  153. cribl_control_plane/models/outputsplunklb.py +30 -77
  154. cribl_control_plane/models/outputsqs.py +16 -41
  155. cribl_control_plane/models/outputstatsd.py +14 -30
  156. cribl_control_plane/models/outputstatsdext.py +12 -29
  157. cribl_control_plane/models/outputsumologic.py +12 -35
  158. cribl_control_plane/models/outputsyslog.py +24 -58
  159. cribl_control_plane/models/outputtcpjson.py +20 -52
  160. cribl_control_plane/models/outputwavefront.py +12 -35
  161. cribl_control_plane/models/outputwebhook.py +22 -58
  162. cribl_control_plane/models/outputxsiam.py +14 -35
  163. cribl_control_plane/models/productscore.py +1 -2
  164. cribl_control_plane/models/rbacresource.py +1 -2
  165. cribl_control_plane/models/resourcepolicy.py +2 -4
  166. cribl_control_plane/models/routecloneconf.py +13 -0
  167. cribl_control_plane/models/routeconf.py +4 -3
  168. cribl_control_plane/models/runnablejobcollection.py +13 -30
  169. cribl_control_plane/models/runnablejobexecutor.py +4 -13
  170. cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
  171. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
  172. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
  173. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +5 -6
  174. cribl_control_plane/models/workertypes.py +1 -2
  175. {cribl_control_plane-0.0.50rc2.dist-info → cribl_control_plane-0.0.52.dist-info}/METADATA +14 -12
  176. cribl_control_plane-0.0.52.dist-info/RECORD +325 -0
  177. cribl_control_plane/models/error.py +0 -16
  178. cribl_control_plane/models/gethealthinfoop.py +0 -17
  179. cribl_control_plane/models/gitshowresult.py +0 -19
  180. cribl_control_plane/models/outputdatabricks.py +0 -282
  181. cribl_control_plane-0.0.50rc2.dist-info/RECORD +0 -327
  182. {cribl_control_plane-0.0.50rc2.dist-info → cribl_control_plane-0.0.52.dist-info}/WHEEL +0 -0
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -26,9 +23,7 @@ class OutputElasticCloudExtraHTTPHeader(BaseModel):
26
23
  name: Optional[str] = None
27
24
 
28
25
 
29
- class OutputElasticCloudFailedRequestLoggingMode(
30
- str, Enum, metaclass=utils.OpenEnumMeta
31
- ):
26
+ class OutputElasticCloudFailedRequestLoggingMode(str, Enum):
32
27
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
33
28
 
34
29
  PAYLOAD = "payload"
@@ -47,7 +42,7 @@ class OutputElasticCloudExtraParam(BaseModel):
47
42
  value: str
48
43
 
49
44
 
50
- class OutputElasticCloudAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
45
+ class OutputElasticCloudAuthenticationMethod(str, Enum):
51
46
  r"""Enter credentials directly, or select a stored secret"""
52
47
 
53
48
  MANUAL = "manual"
@@ -66,10 +61,7 @@ class OutputElasticCloudAuth(BaseModel):
66
61
  disabled: Optional[bool] = False
67
62
 
68
63
  auth_type: Annotated[
69
- Annotated[
70
- Optional[OutputElasticCloudAuthenticationMethod],
71
- PlainValidator(validate_open_enum(False)),
72
- ],
64
+ Optional[OutputElasticCloudAuthenticationMethod],
73
65
  pydantic.Field(alias="authType"),
74
66
  ] = OutputElasticCloudAuthenticationMethod.MANUAL
75
67
  r"""Enter credentials directly, or select a stored secret"""
@@ -129,7 +121,7 @@ class OutputElasticCloudTimeoutRetrySettings(BaseModel):
129
121
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
130
122
 
131
123
 
132
- class OutputElasticCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
124
+ class OutputElasticCloudBackpressureBehavior(str, Enum):
133
125
  r"""How to handle events when all receivers are exerting backpressure"""
134
126
 
135
127
  BLOCK = "block"
@@ -137,21 +129,21 @@ class OutputElasticCloudBackpressureBehavior(str, Enum, metaclass=utils.OpenEnum
137
129
  QUEUE = "queue"
138
130
 
139
131
 
140
- class OutputElasticCloudCompression(str, Enum, metaclass=utils.OpenEnumMeta):
132
+ class OutputElasticCloudCompression(str, Enum):
141
133
  r"""Codec to use to compress the persisted data"""
142
134
 
143
135
  NONE = "none"
144
136
  GZIP = "gzip"
145
137
 
146
138
 
147
- class OutputElasticCloudQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
139
+ class OutputElasticCloudQueueFullBehavior(str, Enum):
148
140
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
149
141
 
150
142
  BLOCK = "block"
151
143
  DROP = "drop"
152
144
 
153
145
 
154
- class OutputElasticCloudMode(str, Enum, metaclass=utils.OpenEnumMeta):
146
+ class OutputElasticCloudMode(str, Enum):
155
147
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
156
148
 
157
149
  ERROR = "error"
@@ -303,10 +295,7 @@ class OutputElasticCloud(BaseModel):
303
295
  r"""Headers to add to all events"""
304
296
 
305
297
  failed_request_logging_mode: Annotated[
306
- Annotated[
307
- Optional[OutputElasticCloudFailedRequestLoggingMode],
308
- PlainValidator(validate_open_enum(False)),
309
- ],
298
+ Optional[OutputElasticCloudFailedRequestLoggingMode],
310
299
  pydantic.Field(alias="failedRequestLoggingMode"),
311
300
  ] = OutputElasticCloudFailedRequestLoggingMode.NONE
312
301
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -351,10 +340,7 @@ class OutputElasticCloud(BaseModel):
351
340
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
352
341
 
353
342
  on_backpressure: Annotated[
354
- Annotated[
355
- Optional[OutputElasticCloudBackpressureBehavior],
356
- PlainValidator(validate_open_enum(False)),
357
- ],
343
+ Optional[OutputElasticCloudBackpressureBehavior],
358
344
  pydantic.Field(alias="onBackpressure"),
359
345
  ] = OutputElasticCloudBackpressureBehavior.BLOCK
360
346
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -375,28 +361,18 @@ class OutputElasticCloud(BaseModel):
375
361
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
376
362
 
377
363
  pq_compress: Annotated[
378
- Annotated[
379
- Optional[OutputElasticCloudCompression],
380
- PlainValidator(validate_open_enum(False)),
381
- ],
382
- pydantic.Field(alias="pqCompress"),
364
+ Optional[OutputElasticCloudCompression], pydantic.Field(alias="pqCompress")
383
365
  ] = OutputElasticCloudCompression.NONE
384
366
  r"""Codec to use to compress the persisted data"""
385
367
 
386
368
  pq_on_backpressure: Annotated[
387
- Annotated[
388
- Optional[OutputElasticCloudQueueFullBehavior],
389
- PlainValidator(validate_open_enum(False)),
390
- ],
369
+ Optional[OutputElasticCloudQueueFullBehavior],
391
370
  pydantic.Field(alias="pqOnBackpressure"),
392
371
  ] = OutputElasticCloudQueueFullBehavior.BLOCK
393
372
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
394
373
 
395
374
  pq_mode: Annotated[
396
- Annotated[
397
- Optional[OutputElasticCloudMode], PlainValidator(validate_open_enum(False))
398
- ],
399
- pydantic.Field(alias="pqMode"),
375
+ Optional[OutputElasticCloudMode], pydantic.Field(alias="pqMode")
400
376
  ] = OutputElasticCloudMode.ERROR
401
377
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
402
378
 
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,14 +12,14 @@ class OutputExabeamType(str, Enum):
15
12
  EXABEAM = "exabeam"
16
13
 
17
14
 
18
- class OutputExabeamSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputExabeamSignatureVersion(str, Enum):
19
16
  r"""Signature version to use for signing Google Cloud Storage requests"""
20
17
 
21
18
  V2 = "v2"
22
19
  V4 = "v4"
23
20
 
24
21
 
25
- class OutputExabeamObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
22
+ class OutputExabeamObjectACL(str, Enum):
26
23
  r"""Object ACL to assign to uploaded objects"""
27
24
 
28
25
  PRIVATE = "private"
@@ -33,7 +30,7 @@ class OutputExabeamObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
33
30
  PUBLIC_READ = "public-read"
34
31
 
35
32
 
36
- class OutputExabeamStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
33
+ class OutputExabeamStorageClass(str, Enum):
37
34
  r"""Storage class to select for uploaded objects"""
38
35
 
39
36
  STANDARD = "STANDARD"
@@ -42,14 +39,14 @@ class OutputExabeamStorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
42
39
  ARCHIVE = "ARCHIVE"
43
40
 
44
41
 
45
- class OutputExabeamBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
42
+ class OutputExabeamBackpressureBehavior(str, Enum):
46
43
  r"""How to handle events when all receivers are exerting backpressure"""
47
44
 
48
45
  BLOCK = "block"
49
46
  DROP = "drop"
50
47
 
51
48
 
52
- class OutputExabeamDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
49
+ class OutputExabeamDiskSpaceProtection(str, Enum):
53
50
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
54
51
 
55
52
  BLOCK = "block"
@@ -168,28 +165,18 @@ class OutputExabeam(BaseModel):
168
165
  r"""Google Cloud Storage service endpoint"""
169
166
 
170
167
  signature_version: Annotated[
171
- Annotated[
172
- Optional[OutputExabeamSignatureVersion],
173
- PlainValidator(validate_open_enum(False)),
174
- ],
168
+ Optional[OutputExabeamSignatureVersion],
175
169
  pydantic.Field(alias="signatureVersion"),
176
170
  ] = OutputExabeamSignatureVersion.V4
177
171
  r"""Signature version to use for signing Google Cloud Storage requests"""
178
172
 
179
173
  object_acl: Annotated[
180
- Annotated[
181
- Optional[OutputExabeamObjectACL], PlainValidator(validate_open_enum(False))
182
- ],
183
- pydantic.Field(alias="objectACL"),
174
+ Optional[OutputExabeamObjectACL], pydantic.Field(alias="objectACL")
184
175
  ] = OutputExabeamObjectACL.PRIVATE
185
176
  r"""Object ACL to assign to uploaded objects"""
186
177
 
187
178
  storage_class: Annotated[
188
- Annotated[
189
- Optional[OutputExabeamStorageClass],
190
- PlainValidator(validate_open_enum(False)),
191
- ],
192
- pydantic.Field(alias="storageClass"),
179
+ Optional[OutputExabeamStorageClass], pydantic.Field(alias="storageClass")
193
180
  ] = None
194
181
  r"""Storage class to select for uploaded objects"""
195
182
 
@@ -229,10 +216,7 @@ class OutputExabeam(BaseModel):
229
216
  r"""Maximum number of files to keep open concurrently. When exceeded, @{product} will close the oldest open files and move them to the final output location."""
230
217
 
231
218
  on_backpressure: Annotated[
232
- Annotated[
233
- Optional[OutputExabeamBackpressureBehavior],
234
- PlainValidator(validate_open_enum(False)),
235
- ],
219
+ Optional[OutputExabeamBackpressureBehavior],
236
220
  pydantic.Field(alias="onBackpressure"),
237
221
  ] = OutputExabeamBackpressureBehavior.BLOCK
238
222
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -243,10 +227,7 @@ class OutputExabeam(BaseModel):
243
227
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
244
228
 
245
229
  on_disk_full_backpressure: Annotated[
246
- Annotated[
247
- Optional[OutputExabeamDiskSpaceProtection],
248
- PlainValidator(validate_open_enum(False)),
249
- ],
230
+ Optional[OutputExabeamDiskSpaceProtection],
250
231
  pydantic.Field(alias="onDiskFullBackpressure"),
251
232
  ] = OutputExabeamDiskSpaceProtection.BLOCK
252
233
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputFilesystemType(str, Enum):
15
12
  FILESYSTEM = "filesystem"
16
13
 
17
14
 
18
- class OutputFilesystemDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputFilesystemDataFormat(str, Enum):
19
16
  r"""Format of the output data"""
20
17
 
21
18
  JSON = "json"
@@ -23,28 +20,28 @@ class OutputFilesystemDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
23
20
  PARQUET = "parquet"
24
21
 
25
22
 
26
- class OutputFilesystemBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
23
+ class OutputFilesystemBackpressureBehavior(str, Enum):
27
24
  r"""How to handle events when all receivers are exerting backpressure"""
28
25
 
29
26
  BLOCK = "block"
30
27
  DROP = "drop"
31
28
 
32
29
 
33
- class OutputFilesystemDiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
30
+ class OutputFilesystemDiskSpaceProtection(str, Enum):
34
31
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
35
32
 
36
33
  BLOCK = "block"
37
34
  DROP = "drop"
38
35
 
39
36
 
40
- class OutputFilesystemCompression(str, Enum, metaclass=utils.OpenEnumMeta):
37
+ class OutputFilesystemCompression(str, Enum):
41
38
  r"""Data compression format to apply to HTTP content before it is delivered"""
42
39
 
43
40
  NONE = "none"
44
41
  GZIP = "gzip"
45
42
 
46
43
 
47
- class OutputFilesystemCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
44
+ class OutputFilesystemCompressionLevel(str, Enum):
48
45
  r"""Compression level to apply before moving files to final destination"""
49
46
 
50
47
  BEST_SPEED = "best_speed"
@@ -52,7 +49,7 @@ class OutputFilesystemCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
52
49
  BEST_COMPRESSION = "best_compression"
53
50
 
54
51
 
55
- class OutputFilesystemParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
52
+ class OutputFilesystemParquetVersion(str, Enum):
56
53
  r"""Determines which data types are supported and how they are represented"""
57
54
 
58
55
  PARQUET_1_0 = "PARQUET_1_0"
@@ -60,7 +57,7 @@ class OutputFilesystemParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
60
57
  PARQUET_2_6 = "PARQUET_2_6"
61
58
 
62
59
 
63
- class OutputFilesystemDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
60
+ class OutputFilesystemDataPageVersion(str, Enum):
64
61
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
65
62
 
66
63
  DATA_PAGE_V1 = "DATA_PAGE_V1"
@@ -199,11 +196,7 @@ class OutputFilesystem(BaseModel):
199
196
  r"""JavaScript expression defining how files are partitioned and organized. Default is date-based. If blank, Stream will fall back to the event's __partition field value – if present – otherwise to each location's root directory."""
200
197
 
201
198
  format_: Annotated[
202
- Annotated[
203
- Optional[OutputFilesystemDataFormat],
204
- PlainValidator(validate_open_enum(False)),
205
- ],
206
- pydantic.Field(alias="format"),
199
+ Optional[OutputFilesystemDataFormat], pydantic.Field(alias="format")
207
200
  ] = OutputFilesystemDataFormat.JSON
208
201
  r"""Format of the output data"""
209
202
 
@@ -246,10 +239,7 @@ class OutputFilesystem(BaseModel):
246
239
  r"""Buffer size used to write to a file"""
247
240
 
248
241
  on_backpressure: Annotated[
249
- Annotated[
250
- Optional[OutputFilesystemBackpressureBehavior],
251
- PlainValidator(validate_open_enum(False)),
252
- ],
242
+ Optional[OutputFilesystemBackpressureBehavior],
253
243
  pydantic.Field(alias="onBackpressure"),
254
244
  ] = OutputFilesystemBackpressureBehavior.BLOCK
255
245
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -260,26 +250,18 @@ class OutputFilesystem(BaseModel):
260
250
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
261
251
 
262
252
  on_disk_full_backpressure: Annotated[
263
- Annotated[
264
- Optional[OutputFilesystemDiskSpaceProtection],
265
- PlainValidator(validate_open_enum(False)),
266
- ],
253
+ Optional[OutputFilesystemDiskSpaceProtection],
267
254
  pydantic.Field(alias="onDiskFullBackpressure"),
268
255
  ] = OutputFilesystemDiskSpaceProtection.BLOCK
269
256
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
270
257
 
271
258
  description: Optional[str] = None
272
259
 
273
- compress: Annotated[
274
- Optional[OutputFilesystemCompression], PlainValidator(validate_open_enum(False))
275
- ] = OutputFilesystemCompression.GZIP
260
+ compress: Optional[OutputFilesystemCompression] = OutputFilesystemCompression.GZIP
276
261
  r"""Data compression format to apply to HTTP content before it is delivered"""
277
262
 
278
263
  compression_level: Annotated[
279
- Annotated[
280
- Optional[OutputFilesystemCompressionLevel],
281
- PlainValidator(validate_open_enum(False)),
282
- ],
264
+ Optional[OutputFilesystemCompressionLevel],
283
265
  pydantic.Field(alias="compressionLevel"),
284
266
  ] = OutputFilesystemCompressionLevel.BEST_SPEED
285
267
  r"""Compression level to apply before moving files to final destination"""
@@ -290,19 +272,12 @@ class OutputFilesystem(BaseModel):
290
272
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
291
273
 
292
274
  parquet_version: Annotated[
293
- Annotated[
294
- Optional[OutputFilesystemParquetVersion],
295
- PlainValidator(validate_open_enum(False)),
296
- ],
297
- pydantic.Field(alias="parquetVersion"),
275
+ Optional[OutputFilesystemParquetVersion], pydantic.Field(alias="parquetVersion")
298
276
  ] = OutputFilesystemParquetVersion.PARQUET_2_6
299
277
  r"""Determines which data types are supported and how they are represented"""
300
278
 
301
279
  parquet_data_page_version: Annotated[
302
- Annotated[
303
- Optional[OutputFilesystemDataPageVersion],
304
- PlainValidator(validate_open_enum(False)),
305
- ],
280
+ Optional[OutputFilesystemDataPageVersion],
306
281
  pydantic.Field(alias="parquetDataPageVersion"),
307
282
  ] = OutputFilesystemDataPageVersion.DATA_PAGE_V2
308
283
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,14 +12,12 @@ class OutputGoogleChronicleType(str, Enum):
15
12
  GOOGLE_CHRONICLE = "google_chronicle"
16
13
 
17
14
 
18
- class OutputGoogleChronicleAPIVersion(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputGoogleChronicleAPIVersion(str, Enum):
19
16
  V1 = "v1"
20
17
  V2 = "v2"
21
18
 
22
19
 
23
- class OutputGoogleChronicleAuthenticationMethod(
24
- str, Enum, metaclass=utils.OpenEnumMeta
25
- ):
20
+ class OutputGoogleChronicleAuthenticationMethod(str, Enum):
26
21
  MANUAL = "manual"
27
22
  SECRET = "secret"
28
23
  SERVICE_ACCOUNT = "serviceAccount"
@@ -83,7 +78,7 @@ class OutputGoogleChronicleTimeoutRetrySettings(BaseModel):
83
78
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
84
79
 
85
80
 
86
- class SendEventsAs(str, Enum, metaclass=utils.OpenEnumMeta):
81
+ class SendEventsAs(str, Enum):
87
82
  UNSTRUCTURED = "unstructured"
88
83
  UDM = "udm"
89
84
 
@@ -99,9 +94,7 @@ class OutputGoogleChronicleExtraHTTPHeader(BaseModel):
99
94
  name: Optional[str] = None
100
95
 
101
96
 
102
- class OutputGoogleChronicleFailedRequestLoggingMode(
103
- str, Enum, metaclass=utils.OpenEnumMeta
104
- ):
97
+ class OutputGoogleChronicleFailedRequestLoggingMode(str, Enum):
105
98
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
106
99
 
107
100
  PAYLOAD = "payload"
@@ -109,9 +102,7 @@ class OutputGoogleChronicleFailedRequestLoggingMode(
109
102
  NONE = "none"
110
103
 
111
104
 
112
- class OutputGoogleChronicleBackpressureBehavior(
113
- str, Enum, metaclass=utils.OpenEnumMeta
114
- ):
105
+ class OutputGoogleChronicleBackpressureBehavior(str, Enum):
115
106
  r"""How to handle events when all receivers are exerting backpressure"""
116
107
 
117
108
  BLOCK = "block"
@@ -141,21 +132,21 @@ class CustomLabel(BaseModel):
141
132
  value: str
142
133
 
143
134
 
144
- class OutputGoogleChronicleCompression(str, Enum, metaclass=utils.OpenEnumMeta):
135
+ class OutputGoogleChronicleCompression(str, Enum):
145
136
  r"""Codec to use to compress the persisted data"""
146
137
 
147
138
  NONE = "none"
148
139
  GZIP = "gzip"
149
140
 
150
141
 
151
- class OutputGoogleChronicleQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
142
+ class OutputGoogleChronicleQueueFullBehavior(str, Enum):
152
143
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
153
144
 
154
145
  BLOCK = "block"
155
146
  DROP = "drop"
156
147
 
157
148
 
158
- class OutputGoogleChronicleMode(str, Enum, metaclass=utils.OpenEnumMeta):
149
+ class OutputGoogleChronicleMode(str, Enum):
159
150
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
160
151
 
161
152
  ERROR = "error"
@@ -285,18 +276,11 @@ class OutputGoogleChronicle(BaseModel):
285
276
  r"""Tags for filtering and grouping in @{product}"""
286
277
 
287
278
  api_version: Annotated[
288
- Annotated[
289
- Optional[OutputGoogleChronicleAPIVersion],
290
- PlainValidator(validate_open_enum(False)),
291
- ],
292
- pydantic.Field(alias="apiVersion"),
279
+ Optional[OutputGoogleChronicleAPIVersion], pydantic.Field(alias="apiVersion")
293
280
  ] = OutputGoogleChronicleAPIVersion.V1
294
281
 
295
282
  authentication_method: Annotated[
296
- Annotated[
297
- Optional[OutputGoogleChronicleAuthenticationMethod],
298
- PlainValidator(validate_open_enum(False)),
299
- ],
283
+ Optional[OutputGoogleChronicleAuthenticationMethod],
300
284
  pydantic.Field(alias="authenticationMethod"),
301
285
  ] = OutputGoogleChronicleAuthenticationMethod.SERVICE_ACCOUNT
302
286
 
@@ -317,8 +301,7 @@ class OutputGoogleChronicle(BaseModel):
317
301
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
318
302
 
319
303
  log_format_type: Annotated[
320
- Annotated[Optional[SendEventsAs], PlainValidator(validate_open_enum(False))],
321
- pydantic.Field(alias="logFormatType"),
304
+ Optional[SendEventsAs], pydantic.Field(alias="logFormatType")
322
305
  ] = SendEventsAs.UNSTRUCTURED
323
306
 
324
307
  region: Optional[str] = None
@@ -363,10 +346,7 @@ class OutputGoogleChronicle(BaseModel):
363
346
  r"""Headers to add to all events"""
364
347
 
365
348
  failed_request_logging_mode: Annotated[
366
- Annotated[
367
- Optional[OutputGoogleChronicleFailedRequestLoggingMode],
368
- PlainValidator(validate_open_enum(False)),
369
- ],
349
+ Optional[OutputGoogleChronicleFailedRequestLoggingMode],
370
350
  pydantic.Field(alias="failedRequestLoggingMode"),
371
351
  ] = OutputGoogleChronicleFailedRequestLoggingMode.NONE
372
352
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -382,10 +362,7 @@ class OutputGoogleChronicle(BaseModel):
382
362
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned."""
383
363
 
384
364
  on_backpressure: Annotated[
385
- Annotated[
386
- Optional[OutputGoogleChronicleBackpressureBehavior],
387
- PlainValidator(validate_open_enum(False)),
388
- ],
365
+ Optional[OutputGoogleChronicleBackpressureBehavior],
389
366
  pydantic.Field(alias="onBackpressure"),
390
367
  ] = OutputGoogleChronicleBackpressureBehavior.BLOCK
391
368
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -453,29 +430,18 @@ class OutputGoogleChronicle(BaseModel):
453
430
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
454
431
 
455
432
  pq_compress: Annotated[
456
- Annotated[
457
- Optional[OutputGoogleChronicleCompression],
458
- PlainValidator(validate_open_enum(False)),
459
- ],
460
- pydantic.Field(alias="pqCompress"),
433
+ Optional[OutputGoogleChronicleCompression], pydantic.Field(alias="pqCompress")
461
434
  ] = OutputGoogleChronicleCompression.NONE
462
435
  r"""Codec to use to compress the persisted data"""
463
436
 
464
437
  pq_on_backpressure: Annotated[
465
- Annotated[
466
- Optional[OutputGoogleChronicleQueueFullBehavior],
467
- PlainValidator(validate_open_enum(False)),
468
- ],
438
+ Optional[OutputGoogleChronicleQueueFullBehavior],
469
439
  pydantic.Field(alias="pqOnBackpressure"),
470
440
  ] = OutputGoogleChronicleQueueFullBehavior.BLOCK
471
441
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
472
442
 
473
443
  pq_mode: Annotated[
474
- Annotated[
475
- Optional[OutputGoogleChronicleMode],
476
- PlainValidator(validate_open_enum(False)),
477
- ],
478
- pydantic.Field(alias="pqMode"),
444
+ Optional[OutputGoogleChronicleMode], pydantic.Field(alias="pqMode")
479
445
  ] = OutputGoogleChronicleMode.ERROR
480
446
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
481
447