cribl-control-plane 0.0.49__py3-none-any.whl → 0.0.50rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (173) hide show
  1. cribl_control_plane/_version.py +4 -6
  2. cribl_control_plane/errors/healthstatus_error.py +8 -2
  3. cribl_control_plane/health.py +6 -2
  4. cribl_control_plane/models/__init__.py +21 -4
  5. cribl_control_plane/models/appmode.py +2 -1
  6. cribl_control_plane/models/cacheconnection.py +10 -2
  7. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  8. cribl_control_plane/models/cloudprovider.py +2 -1
  9. cribl_control_plane/models/configgroup.py +7 -2
  10. cribl_control_plane/models/configgroupcloud.py +6 -2
  11. cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
  12. cribl_control_plane/models/createinputhectokenbyidop.py +6 -5
  13. cribl_control_plane/models/createversionpushop.py +5 -5
  14. cribl_control_plane/models/createversionundoop.py +3 -3
  15. cribl_control_plane/models/cribllakedataset.py +8 -2
  16. cribl_control_plane/models/datasetmetadata.py +8 -2
  17. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
  18. cribl_control_plane/models/error.py +16 -0
  19. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
  20. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
  21. cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
  22. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
  23. cribl_control_plane/models/gethealthinfoop.py +17 -0
  24. cribl_control_plane/models/getsummaryop.py +7 -2
  25. cribl_control_plane/models/getversionshowop.py +6 -5
  26. cribl_control_plane/models/gitinfo.py +14 -3
  27. cribl_control_plane/models/gitshowresult.py +19 -0
  28. cribl_control_plane/models/hbcriblinfo.py +11 -1
  29. cribl_control_plane/models/healthstatus.py +7 -4
  30. cribl_control_plane/models/inputappscope.py +34 -14
  31. cribl_control_plane/models/inputazureblob.py +17 -6
  32. cribl_control_plane/models/inputcollection.py +11 -4
  33. cribl_control_plane/models/inputconfluentcloud.py +47 -20
  34. cribl_control_plane/models/inputcribl.py +11 -4
  35. cribl_control_plane/models/inputcriblhttp.py +23 -8
  36. cribl_control_plane/models/inputcribllakehttp.py +22 -10
  37. cribl_control_plane/models/inputcriblmetrics.py +12 -4
  38. cribl_control_plane/models/inputcribltcp.py +23 -8
  39. cribl_control_plane/models/inputcrowdstrike.py +26 -10
  40. cribl_control_plane/models/inputdatadogagent.py +24 -8
  41. cribl_control_plane/models/inputdatagen.py +11 -4
  42. cribl_control_plane/models/inputedgeprometheus.py +58 -24
  43. cribl_control_plane/models/inputelastic.py +40 -14
  44. cribl_control_plane/models/inputeventhub.py +15 -6
  45. cribl_control_plane/models/inputexec.py +14 -6
  46. cribl_control_plane/models/inputfile.py +15 -6
  47. cribl_control_plane/models/inputfirehose.py +23 -8
  48. cribl_control_plane/models/inputgooglepubsub.py +19 -6
  49. cribl_control_plane/models/inputgrafana.py +67 -24
  50. cribl_control_plane/models/inputhttp.py +23 -8
  51. cribl_control_plane/models/inputhttpraw.py +23 -8
  52. cribl_control_plane/models/inputjournalfiles.py +12 -4
  53. cribl_control_plane/models/inputkafka.py +46 -16
  54. cribl_control_plane/models/inputkinesis.py +38 -14
  55. cribl_control_plane/models/inputkubeevents.py +11 -4
  56. cribl_control_plane/models/inputkubelogs.py +16 -8
  57. cribl_control_plane/models/inputkubemetrics.py +16 -8
  58. cribl_control_plane/models/inputloki.py +29 -10
  59. cribl_control_plane/models/inputmetrics.py +23 -8
  60. cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
  61. cribl_control_plane/models/inputmsk.py +53 -18
  62. cribl_control_plane/models/inputnetflow.py +11 -4
  63. cribl_control_plane/models/inputoffice365mgmt.py +33 -14
  64. cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
  65. cribl_control_plane/models/inputoffice365service.py +35 -16
  66. cribl_control_plane/models/inputopentelemetry.py +38 -16
  67. cribl_control_plane/models/inputprometheus.py +50 -18
  68. cribl_control_plane/models/inputprometheusrw.py +30 -10
  69. cribl_control_plane/models/inputrawudp.py +11 -4
  70. cribl_control_plane/models/inputs3.py +21 -8
  71. cribl_control_plane/models/inputs3inventory.py +26 -10
  72. cribl_control_plane/models/inputsecuritylake.py +27 -10
  73. cribl_control_plane/models/inputsnmp.py +16 -6
  74. cribl_control_plane/models/inputsplunk.py +33 -12
  75. cribl_control_plane/models/inputsplunkhec.py +29 -10
  76. cribl_control_plane/models/inputsplunksearch.py +33 -14
  77. cribl_control_plane/models/inputsqs.py +27 -10
  78. cribl_control_plane/models/inputsyslog.py +43 -16
  79. cribl_control_plane/models/inputsystemmetrics.py +48 -24
  80. cribl_control_plane/models/inputsystemstate.py +16 -8
  81. cribl_control_plane/models/inputtcp.py +29 -10
  82. cribl_control_plane/models/inputtcpjson.py +29 -10
  83. cribl_control_plane/models/inputwef.py +37 -14
  84. cribl_control_plane/models/inputwindowsmetrics.py +44 -24
  85. cribl_control_plane/models/inputwineventlogs.py +20 -10
  86. cribl_control_plane/models/inputwiz.py +21 -8
  87. cribl_control_plane/models/inputwizwebhook.py +23 -8
  88. cribl_control_plane/models/inputzscalerhec.py +29 -10
  89. cribl_control_plane/models/jobinfo.py +4 -1
  90. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  91. cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
  92. cribl_control_plane/models/masterworkerentry.py +7 -2
  93. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  94. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  95. cribl_control_plane/models/nodeprovidedinfo.py +4 -1
  96. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  97. cribl_control_plane/models/nodeupgradestate.py +2 -1
  98. cribl_control_plane/models/nodeupgradestatus.py +13 -5
  99. cribl_control_plane/models/outputazureblob.py +48 -18
  100. cribl_control_plane/models/outputazuredataexplorer.py +73 -28
  101. cribl_control_plane/models/outputazureeventhub.py +40 -18
  102. cribl_control_plane/models/outputazurelogs.py +35 -12
  103. cribl_control_plane/models/outputclickhouse.py +55 -20
  104. cribl_control_plane/models/outputcloudwatch.py +29 -10
  105. cribl_control_plane/models/outputconfluentcloud.py +77 -32
  106. cribl_control_plane/models/outputcriblhttp.py +44 -16
  107. cribl_control_plane/models/outputcribllake.py +46 -16
  108. cribl_control_plane/models/outputcribltcp.py +45 -18
  109. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
  110. cribl_control_plane/models/outputdatadog.py +48 -20
  111. cribl_control_plane/models/outputdataset.py +46 -18
  112. cribl_control_plane/models/outputdiskspool.py +7 -2
  113. cribl_control_plane/models/outputdls3.py +68 -24
  114. cribl_control_plane/models/outputdynatracehttp.py +53 -20
  115. cribl_control_plane/models/outputdynatraceotlp.py +55 -22
  116. cribl_control_plane/models/outputelastic.py +43 -18
  117. cribl_control_plane/models/outputelasticcloud.py +36 -12
  118. cribl_control_plane/models/outputexabeam.py +29 -10
  119. cribl_control_plane/models/outputfilesystem.py +39 -14
  120. cribl_control_plane/models/outputgooglechronicle.py +50 -16
  121. cribl_control_plane/models/outputgooglecloudlogging.py +50 -18
  122. cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
  123. cribl_control_plane/models/outputgooglepubsub.py +31 -10
  124. cribl_control_plane/models/outputgrafanacloud.py +97 -32
  125. cribl_control_plane/models/outputgraphite.py +31 -14
  126. cribl_control_plane/models/outputhoneycomb.py +35 -12
  127. cribl_control_plane/models/outputhumiohec.py +43 -16
  128. cribl_control_plane/models/outputinfluxdb.py +42 -16
  129. cribl_control_plane/models/outputkafka.py +74 -28
  130. cribl_control_plane/models/outputkinesis.py +40 -16
  131. cribl_control_plane/models/outputloki.py +41 -16
  132. cribl_control_plane/models/outputminio.py +65 -24
  133. cribl_control_plane/models/outputmsk.py +82 -30
  134. cribl_control_plane/models/outputnewrelic.py +43 -18
  135. cribl_control_plane/models/outputnewrelicevents.py +41 -14
  136. cribl_control_plane/models/outputopentelemetry.py +67 -26
  137. cribl_control_plane/models/outputprometheus.py +35 -12
  138. cribl_control_plane/models/outputring.py +19 -8
  139. cribl_control_plane/models/outputs3.py +68 -26
  140. cribl_control_plane/models/outputsecuritylake.py +52 -18
  141. cribl_control_plane/models/outputsentinel.py +45 -18
  142. cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
  143. cribl_control_plane/models/outputservicenow.py +60 -24
  144. cribl_control_plane/models/outputsignalfx.py +37 -14
  145. cribl_control_plane/models/outputsns.py +36 -14
  146. cribl_control_plane/models/outputsplunk.py +60 -24
  147. cribl_control_plane/models/outputsplunkhec.py +35 -12
  148. cribl_control_plane/models/outputsplunklb.py +77 -30
  149. cribl_control_plane/models/outputsqs.py +41 -16
  150. cribl_control_plane/models/outputstatsd.py +30 -14
  151. cribl_control_plane/models/outputstatsdext.py +29 -12
  152. cribl_control_plane/models/outputsumologic.py +35 -12
  153. cribl_control_plane/models/outputsyslog.py +58 -24
  154. cribl_control_plane/models/outputtcpjson.py +52 -20
  155. cribl_control_plane/models/outputwavefront.py +35 -12
  156. cribl_control_plane/models/outputwebhook.py +58 -22
  157. cribl_control_plane/models/outputxsiam.py +35 -14
  158. cribl_control_plane/models/packinfo.py +3 -0
  159. cribl_control_plane/models/packinstallinfo.py +3 -0
  160. cribl_control_plane/models/productscore.py +2 -1
  161. cribl_control_plane/models/rbacresource.py +2 -1
  162. cribl_control_plane/models/resourcepolicy.py +4 -2
  163. cribl_control_plane/models/runnablejobcollection.py +30 -13
  164. cribl_control_plane/models/runnablejobexecutor.py +13 -4
  165. cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
  166. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
  167. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
  168. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +6 -5
  169. cribl_control_plane/models/workertypes.py +2 -1
  170. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.0.50rc1.dist-info}/METADATA +1 -1
  171. cribl_control_plane-0.0.50rc1.dist-info/RECORD +328 -0
  172. cribl_control_plane-0.0.49.dist-info/RECORD +0 -325
  173. {cribl_control_plane-0.0.49.dist-info → cribl_control_plane-0.0.50rc1.dist-info}/WHEEL +0 -0
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,21 +15,21 @@ class OutputSplunkType(str, Enum):
12
15
  SPLUNK = "splunk"
13
16
 
14
17
 
15
- class OutputSplunkNestedFieldSerialization(str, Enum):
18
+ class OutputSplunkNestedFieldSerialization(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""How to serialize nested fields into index-time fields"""
17
20
 
18
21
  JSON = "json"
19
22
  NONE = "none"
20
23
 
21
24
 
22
- class OutputSplunkMinimumTLSVersion(str, Enum):
25
+ class OutputSplunkMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
23
26
  TL_SV1 = "TLSv1"
24
27
  TL_SV1_1 = "TLSv1.1"
25
28
  TL_SV1_2 = "TLSv1.2"
26
29
  TL_SV1_3 = "TLSv1.3"
27
30
 
28
31
 
29
- class OutputSplunkMaximumTLSVersion(str, Enum):
32
+ class OutputSplunkMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
30
33
  TL_SV1 = "TLSv1"
31
34
  TL_SV1_1 = "TLSv1.1"
32
35
  TL_SV1_2 = "TLSv1.2"
@@ -86,22 +89,30 @@ class OutputSplunkTLSSettingsClientSide(BaseModel):
86
89
  r"""Passphrase to use to decrypt private key"""
87
90
 
88
91
  min_version: Annotated[
89
- Optional[OutputSplunkMinimumTLSVersion], pydantic.Field(alias="minVersion")
92
+ Annotated[
93
+ Optional[OutputSplunkMinimumTLSVersion],
94
+ PlainValidator(validate_open_enum(False)),
95
+ ],
96
+ pydantic.Field(alias="minVersion"),
90
97
  ] = None
91
98
 
92
99
  max_version: Annotated[
93
- Optional[OutputSplunkMaximumTLSVersion], pydantic.Field(alias="maxVersion")
100
+ Annotated[
101
+ Optional[OutputSplunkMaximumTLSVersion],
102
+ PlainValidator(validate_open_enum(False)),
103
+ ],
104
+ pydantic.Field(alias="maxVersion"),
94
105
  ] = None
95
106
 
96
107
 
97
- class OutputSplunkMaxS2SVersion(str, Enum):
108
+ class OutputSplunkMaxS2SVersion(str, Enum, metaclass=utils.OpenEnumMeta):
98
109
  r"""The highest S2S protocol version to advertise during handshake"""
99
110
 
100
111
  V3 = "v3"
101
112
  V4 = "v4"
102
113
 
103
114
 
104
- class OutputSplunkBackpressureBehavior(str, Enum):
115
+ class OutputSplunkBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
105
116
  r"""How to handle events when all receivers are exerting backpressure"""
106
117
 
107
118
  BLOCK = "block"
@@ -109,14 +120,14 @@ class OutputSplunkBackpressureBehavior(str, Enum):
109
120
  QUEUE = "queue"
110
121
 
111
122
 
112
- class OutputSplunkAuthenticationMethod(str, Enum):
123
+ class OutputSplunkAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
113
124
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
114
125
 
115
126
  MANUAL = "manual"
116
127
  SECRET = "secret"
117
128
 
118
129
 
119
- class OutputSplunkCompressCompression(str, Enum):
130
+ class OutputSplunkCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
120
131
  r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
121
132
 
122
133
  DISABLED = "disabled"
@@ -124,21 +135,21 @@ class OutputSplunkCompressCompression(str, Enum):
124
135
  ALWAYS = "always"
125
136
 
126
137
 
127
- class OutputSplunkPqCompressCompression(str, Enum):
138
+ class OutputSplunkPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
128
139
  r"""Codec to use to compress the persisted data"""
129
140
 
130
141
  NONE = "none"
131
142
  GZIP = "gzip"
132
143
 
133
144
 
134
- class OutputSplunkQueueFullBehavior(str, Enum):
145
+ class OutputSplunkQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
135
146
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
136
147
 
137
148
  BLOCK = "block"
138
149
  DROP = "drop"
139
150
 
140
151
 
141
- class OutputSplunkMode(str, Enum):
152
+ class OutputSplunkMode(str, Enum, metaclass=utils.OpenEnumMeta):
142
153
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
143
154
 
144
155
  ERROR = "error"
@@ -242,7 +253,10 @@ class OutputSplunk(BaseModel):
242
253
  r"""The port to connect to on the provided host"""
243
254
 
244
255
  nested_fields: Annotated[
245
- Optional[OutputSplunkNestedFieldSerialization],
256
+ Annotated[
257
+ Optional[OutputSplunkNestedFieldSerialization],
258
+ PlainValidator(validate_open_enum(False)),
259
+ ],
246
260
  pydantic.Field(alias="nestedFields"),
247
261
  ] = OutputSplunkNestedFieldSerialization.NONE
248
262
  r"""How to serialize nested fields into index-time fields"""
@@ -278,18 +292,29 @@ class OutputSplunk(BaseModel):
278
292
  r"""Use to troubleshoot issues with sending data"""
279
293
 
280
294
  max_s2_sversion: Annotated[
281
- Optional[OutputSplunkMaxS2SVersion], pydantic.Field(alias="maxS2Sversion")
295
+ Annotated[
296
+ Optional[OutputSplunkMaxS2SVersion],
297
+ PlainValidator(validate_open_enum(False)),
298
+ ],
299
+ pydantic.Field(alias="maxS2Sversion"),
282
300
  ] = OutputSplunkMaxS2SVersion.V3
283
301
  r"""The highest S2S protocol version to advertise during handshake"""
284
302
 
285
303
  on_backpressure: Annotated[
286
- Optional[OutputSplunkBackpressureBehavior],
304
+ Annotated[
305
+ Optional[OutputSplunkBackpressureBehavior],
306
+ PlainValidator(validate_open_enum(False)),
307
+ ],
287
308
  pydantic.Field(alias="onBackpressure"),
288
309
  ] = OutputSplunkBackpressureBehavior.BLOCK
289
310
  r"""How to handle events when all receivers are exerting backpressure"""
290
311
 
291
312
  auth_type: Annotated[
292
- Optional[OutputSplunkAuthenticationMethod], pydantic.Field(alias="authType")
313
+ Annotated[
314
+ Optional[OutputSplunkAuthenticationMethod],
315
+ PlainValidator(validate_open_enum(False)),
316
+ ],
317
+ pydantic.Field(alias="authType"),
293
318
  ] = OutputSplunkAuthenticationMethod.MANUAL
294
319
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
295
320
 
@@ -300,9 +325,10 @@ class OutputSplunk(BaseModel):
300
325
  ] = 1
301
326
  r"""Maximum number of times healthcheck can fail before we close connection. If set to 0 (disabled), and the connection to Splunk is forcibly closed, some data loss might occur."""
302
327
 
303
- compress: Optional[OutputSplunkCompressCompression] = (
304
- OutputSplunkCompressCompression.DISABLED
305
- )
328
+ compress: Annotated[
329
+ Optional[OutputSplunkCompressCompression],
330
+ PlainValidator(validate_open_enum(False)),
331
+ ] = OutputSplunkCompressCompression.DISABLED
306
332
  r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
307
333
 
308
334
  pq_max_file_size: Annotated[
@@ -319,19 +345,29 @@ class OutputSplunk(BaseModel):
319
345
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
320
346
 
321
347
  pq_compress: Annotated[
322
- Optional[OutputSplunkPqCompressCompression], pydantic.Field(alias="pqCompress")
348
+ Annotated[
349
+ Optional[OutputSplunkPqCompressCompression],
350
+ PlainValidator(validate_open_enum(False)),
351
+ ],
352
+ pydantic.Field(alias="pqCompress"),
323
353
  ] = OutputSplunkPqCompressCompression.NONE
324
354
  r"""Codec to use to compress the persisted data"""
325
355
 
326
356
  pq_on_backpressure: Annotated[
327
- Optional[OutputSplunkQueueFullBehavior],
357
+ Annotated[
358
+ Optional[OutputSplunkQueueFullBehavior],
359
+ PlainValidator(validate_open_enum(False)),
360
+ ],
328
361
  pydantic.Field(alias="pqOnBackpressure"),
329
362
  ] = OutputSplunkQueueFullBehavior.BLOCK
330
363
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
331
364
 
332
- pq_mode: Annotated[Optional[OutputSplunkMode], pydantic.Field(alias="pqMode")] = (
333
- OutputSplunkMode.ERROR
334
- )
365
+ pq_mode: Annotated[
366
+ Annotated[
367
+ Optional[OutputSplunkMode], PlainValidator(validate_open_enum(False))
368
+ ],
369
+ pydantic.Field(alias="pqMode"),
370
+ ] = OutputSplunkMode.ERROR
335
371
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
336
372
 
337
373
  pq_controls: Annotated[
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -23,7 +26,7 @@ class OutputSplunkHecExtraHTTPHeader(BaseModel):
23
26
  name: Optional[str] = None
24
27
 
25
28
 
26
- class OutputSplunkHecFailedRequestLoggingMode(str, Enum):
29
+ class OutputSplunkHecFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
27
30
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
28
31
 
29
32
  PAYLOAD = "payload"
@@ -31,7 +34,7 @@ class OutputSplunkHecFailedRequestLoggingMode(str, Enum):
31
34
  NONE = "none"
32
35
 
33
36
 
34
- class OutputSplunkHecAuthenticationMethod(str, Enum):
37
+ class OutputSplunkHecAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
35
38
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
36
39
 
37
40
  MANUAL = "manual"
@@ -92,7 +95,7 @@ class OutputSplunkHecTimeoutRetrySettings(BaseModel):
92
95
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
93
96
 
94
97
 
95
- class OutputSplunkHecBackpressureBehavior(str, Enum):
98
+ class OutputSplunkHecBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
96
99
  r"""How to handle events when all receivers are exerting backpressure"""
97
100
 
98
101
  BLOCK = "block"
@@ -115,21 +118,21 @@ class OutputSplunkHecURL(BaseModel):
115
118
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
116
119
 
117
120
 
118
- class OutputSplunkHecCompression(str, Enum):
121
+ class OutputSplunkHecCompression(str, Enum, metaclass=utils.OpenEnumMeta):
119
122
  r"""Codec to use to compress the persisted data"""
120
123
 
121
124
  NONE = "none"
122
125
  GZIP = "gzip"
123
126
 
124
127
 
125
- class OutputSplunkHecQueueFullBehavior(str, Enum):
128
+ class OutputSplunkHecQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
126
129
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
127
130
 
128
131
  BLOCK = "block"
129
132
  DROP = "drop"
130
133
 
131
134
 
132
- class OutputSplunkHecMode(str, Enum):
135
+ class OutputSplunkHecMode(str, Enum, metaclass=utils.OpenEnumMeta):
133
136
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
134
137
 
135
138
  ERROR = "error"
@@ -304,7 +307,10 @@ class OutputSplunkHec(BaseModel):
304
307
  r"""Headers to add to all events"""
305
308
 
306
309
  failed_request_logging_mode: Annotated[
307
- Optional[OutputSplunkHecFailedRequestLoggingMode],
310
+ Annotated[
311
+ Optional[OutputSplunkHecFailedRequestLoggingMode],
312
+ PlainValidator(validate_open_enum(False)),
313
+ ],
308
314
  pydantic.Field(alias="failedRequestLoggingMode"),
309
315
  ] = OutputSplunkHecFailedRequestLoggingMode.NONE
310
316
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -320,7 +326,11 @@ class OutputSplunkHec(BaseModel):
320
326
  r"""Output metrics in multiple-metric format, supported in Splunk 8.0 and above to allow multiple metrics in a single event."""
321
327
 
322
328
  auth_type: Annotated[
323
- Optional[OutputSplunkHecAuthenticationMethod], pydantic.Field(alias="authType")
329
+ Annotated[
330
+ Optional[OutputSplunkHecAuthenticationMethod],
331
+ PlainValidator(validate_open_enum(False)),
332
+ ],
333
+ pydantic.Field(alias="authType"),
324
334
  ] = OutputSplunkHecAuthenticationMethod.MANUAL
325
335
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
326
336
 
@@ -341,7 +351,10 @@ class OutputSplunkHec(BaseModel):
341
351
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
342
352
 
343
353
  on_backpressure: Annotated[
344
- Optional[OutputSplunkHecBackpressureBehavior],
354
+ Annotated[
355
+ Optional[OutputSplunkHecBackpressureBehavior],
356
+ PlainValidator(validate_open_enum(False)),
357
+ ],
345
358
  pydantic.Field(alias="onBackpressure"),
346
359
  ] = OutputSplunkHecBackpressureBehavior.BLOCK
347
360
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -391,18 +404,28 @@ class OutputSplunkHec(BaseModel):
391
404
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
392
405
 
393
406
  pq_compress: Annotated[
394
- Optional[OutputSplunkHecCompression], pydantic.Field(alias="pqCompress")
407
+ Annotated[
408
+ Optional[OutputSplunkHecCompression],
409
+ PlainValidator(validate_open_enum(False)),
410
+ ],
411
+ pydantic.Field(alias="pqCompress"),
395
412
  ] = OutputSplunkHecCompression.NONE
396
413
  r"""Codec to use to compress the persisted data"""
397
414
 
398
415
  pq_on_backpressure: Annotated[
399
- Optional[OutputSplunkHecQueueFullBehavior],
416
+ Annotated[
417
+ Optional[OutputSplunkHecQueueFullBehavior],
418
+ PlainValidator(validate_open_enum(False)),
419
+ ],
400
420
  pydantic.Field(alias="pqOnBackpressure"),
401
421
  ] = OutputSplunkHecQueueFullBehavior.BLOCK
402
422
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
403
423
 
404
424
  pq_mode: Annotated[
405
- Optional[OutputSplunkHecMode], pydantic.Field(alias="pqMode")
425
+ Annotated[
426
+ Optional[OutputSplunkHecMode], PlainValidator(validate_open_enum(False))
427
+ ],
428
+ pydantic.Field(alias="pqMode"),
406
429
  ] = OutputSplunkHecMode.ERROR
407
430
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
408
431
 
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,21 +15,21 @@ class OutputSplunkLbType(str, Enum):
12
15
  SPLUNK_LB = "splunk_lb"
13
16
 
14
17
 
15
- class OutputSplunkLbNestedFieldSerialization(str, Enum):
18
+ class OutputSplunkLbNestedFieldSerialization(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""How to serialize nested fields into index-time fields"""
17
20
 
18
21
  JSON = "json"
19
22
  NONE = "none"
20
23
 
21
24
 
22
- class OutputSplunkLbMinimumTLSVersion(str, Enum):
25
+ class OutputSplunkLbMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
23
26
  TL_SV1 = "TLSv1"
24
27
  TL_SV1_1 = "TLSv1.1"
25
28
  TL_SV1_2 = "TLSv1.2"
26
29
  TL_SV1_3 = "TLSv1.3"
27
30
 
28
31
 
29
- class OutputSplunkLbMaximumTLSVersion(str, Enum):
32
+ class OutputSplunkLbMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
30
33
  TL_SV1 = "TLSv1"
31
34
  TL_SV1_1 = "TLSv1.1"
32
35
  TL_SV1_2 = "TLSv1.2"
@@ -86,22 +89,30 @@ class OutputSplunkLbTLSSettingsClientSide(BaseModel):
86
89
  r"""Passphrase to use to decrypt private key"""
87
90
 
88
91
  min_version: Annotated[
89
- Optional[OutputSplunkLbMinimumTLSVersion], pydantic.Field(alias="minVersion")
92
+ Annotated[
93
+ Optional[OutputSplunkLbMinimumTLSVersion],
94
+ PlainValidator(validate_open_enum(False)),
95
+ ],
96
+ pydantic.Field(alias="minVersion"),
90
97
  ] = None
91
98
 
92
99
  max_version: Annotated[
93
- Optional[OutputSplunkLbMaximumTLSVersion], pydantic.Field(alias="maxVersion")
100
+ Annotated[
101
+ Optional[OutputSplunkLbMaximumTLSVersion],
102
+ PlainValidator(validate_open_enum(False)),
103
+ ],
104
+ pydantic.Field(alias="maxVersion"),
94
105
  ] = None
95
106
 
96
107
 
97
- class OutputSplunkLbMaxS2SVersion(str, Enum):
108
+ class OutputSplunkLbMaxS2SVersion(str, Enum, metaclass=utils.OpenEnumMeta):
98
109
  r"""The highest S2S protocol version to advertise during handshake"""
99
110
 
100
111
  V3 = "v3"
101
112
  V4 = "v4"
102
113
 
103
114
 
104
- class OutputSplunkLbBackpressureBehavior(str, Enum):
115
+ class OutputSplunkLbBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
105
116
  r"""How to handle events when all receivers are exerting backpressure"""
106
117
 
107
118
  BLOCK = "block"
@@ -109,14 +120,14 @@ class OutputSplunkLbBackpressureBehavior(str, Enum):
109
120
  QUEUE = "queue"
110
121
 
111
122
 
112
- class OutputSplunkLbAuthenticationMethod(str, Enum):
123
+ class OutputSplunkLbAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
113
124
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
114
125
 
115
126
  MANUAL = "manual"
116
127
  SECRET = "secret"
117
128
 
118
129
 
119
- class OutputSplunkLbCompressCompression(str, Enum):
130
+ class OutputSplunkLbCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
120
131
  r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
121
132
 
122
133
  DISABLED = "disabled"
@@ -124,7 +135,9 @@ class OutputSplunkLbCompressCompression(str, Enum):
124
135
  ALWAYS = "always"
125
136
 
126
137
 
127
- class IndexerDiscoveryConfigsAuthTokenAuthenticationMethod(str, Enum):
138
+ class IndexerDiscoveryConfigsAuthTokenAuthenticationMethod(
139
+ str, Enum, metaclass=utils.OpenEnumMeta
140
+ ):
128
141
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
129
142
 
130
143
  MANUAL = "manual"
@@ -138,13 +151,18 @@ class OutputSplunkLbAuthTokenTypedDict(TypedDict):
138
151
 
139
152
  class OutputSplunkLbAuthToken(BaseModel):
140
153
  auth_type: Annotated[
141
- Optional[IndexerDiscoveryConfigsAuthTokenAuthenticationMethod],
154
+ Annotated[
155
+ Optional[IndexerDiscoveryConfigsAuthTokenAuthenticationMethod],
156
+ PlainValidator(validate_open_enum(False)),
157
+ ],
142
158
  pydantic.Field(alias="authType"),
143
159
  ] = IndexerDiscoveryConfigsAuthTokenAuthenticationMethod.MANUAL
144
160
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
145
161
 
146
162
 
147
- class IndexerDiscoveryConfigsAuthenticationMethod(str, Enum):
163
+ class IndexerDiscoveryConfigsAuthenticationMethod(
164
+ str, Enum, metaclass=utils.OpenEnumMeta
165
+ ):
148
166
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
149
167
 
150
168
  MANUAL = "manual"
@@ -197,7 +215,10 @@ class IndexerDiscoveryConfigs(BaseModel):
197
215
  r"""Tokens required to authenticate to cluster manager for indexer discovery"""
198
216
 
199
217
  auth_type: Annotated[
200
- Optional[IndexerDiscoveryConfigsAuthenticationMethod],
218
+ Annotated[
219
+ Optional[IndexerDiscoveryConfigsAuthenticationMethod],
220
+ PlainValidator(validate_open_enum(False)),
221
+ ],
201
222
  pydantic.Field(alias="authType"),
202
223
  ] = IndexerDiscoveryConfigsAuthenticationMethod.MANUAL
203
224
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
@@ -209,7 +230,7 @@ class IndexerDiscoveryConfigs(BaseModel):
209
230
  r"""Select or create a stored text secret"""
210
231
 
211
232
 
212
- class OutputSplunkLbTLS(str, Enum):
233
+ class OutputSplunkLbTLS(str, Enum, metaclass=utils.OpenEnumMeta):
213
234
  r"""Whether to inherit TLS configs from group setting or disable TLS"""
214
235
 
215
236
  INHERIT = "inherit"
@@ -236,7 +257,9 @@ class OutputSplunkLbHost(BaseModel):
236
257
  port: Optional[float] = 9997
237
258
  r"""The port to connect to on the provided host"""
238
259
 
239
- tls: Optional[OutputSplunkLbTLS] = OutputSplunkLbTLS.INHERIT
260
+ tls: Annotated[
261
+ Optional[OutputSplunkLbTLS], PlainValidator(validate_open_enum(False))
262
+ ] = OutputSplunkLbTLS.INHERIT
240
263
  r"""Whether to inherit TLS configs from group setting or disable TLS"""
241
264
 
242
265
  servername: Optional[str] = None
@@ -246,21 +269,21 @@ class OutputSplunkLbHost(BaseModel):
246
269
  r"""Assign a weight (>0) to each endpoint to indicate its traffic-handling capability"""
247
270
 
248
271
 
249
- class OutputSplunkLbPqCompressCompression(str, Enum):
272
+ class OutputSplunkLbPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
250
273
  r"""Codec to use to compress the persisted data"""
251
274
 
252
275
  NONE = "none"
253
276
  GZIP = "gzip"
254
277
 
255
278
 
256
- class OutputSplunkLbQueueFullBehavior(str, Enum):
279
+ class OutputSplunkLbQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
257
280
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
258
281
 
259
282
  BLOCK = "block"
260
283
  DROP = "drop"
261
284
 
262
285
 
263
- class OutputSplunkLbMode(str, Enum):
286
+ class OutputSplunkLbMode(str, Enum, metaclass=utils.OpenEnumMeta):
264
287
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
265
288
 
266
289
  ERROR = "error"
@@ -388,7 +411,10 @@ class OutputSplunkLb(BaseModel):
388
411
  r"""Maximum number of concurrent connections (per Worker Process). A random set of IPs will be picked on every DNS resolution period. Use 0 for unlimited."""
389
412
 
390
413
  nested_fields: Annotated[
391
- Optional[OutputSplunkLbNestedFieldSerialization],
414
+ Annotated[
415
+ Optional[OutputSplunkLbNestedFieldSerialization],
416
+ PlainValidator(validate_open_enum(False)),
417
+ ],
392
418
  pydantic.Field(alias="nestedFields"),
393
419
  ] = OutputSplunkLbNestedFieldSerialization.NONE
394
420
  r"""How to serialize nested fields into index-time fields"""
@@ -424,12 +450,19 @@ class OutputSplunkLb(BaseModel):
424
450
  r"""Use to troubleshoot issues with sending data"""
425
451
 
426
452
  max_s2_sversion: Annotated[
427
- Optional[OutputSplunkLbMaxS2SVersion], pydantic.Field(alias="maxS2Sversion")
453
+ Annotated[
454
+ Optional[OutputSplunkLbMaxS2SVersion],
455
+ PlainValidator(validate_open_enum(False)),
456
+ ],
457
+ pydantic.Field(alias="maxS2Sversion"),
428
458
  ] = OutputSplunkLbMaxS2SVersion.V3
429
459
  r"""The highest S2S protocol version to advertise during handshake"""
430
460
 
431
461
  on_backpressure: Annotated[
432
- Optional[OutputSplunkLbBackpressureBehavior],
462
+ Annotated[
463
+ Optional[OutputSplunkLbBackpressureBehavior],
464
+ PlainValidator(validate_open_enum(False)),
465
+ ],
433
466
  pydantic.Field(alias="onBackpressure"),
434
467
  ] = OutputSplunkLbBackpressureBehavior.BLOCK
435
468
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -445,7 +478,11 @@ class OutputSplunkLb(BaseModel):
445
478
  r"""How long (in milliseconds) each LB endpoint can report blocked before the Destination reports unhealthy, blocking the sender. (Grace period for fluctuations.) Use 0 to disable; max 1 minute."""
446
479
 
447
480
  auth_type: Annotated[
448
- Optional[OutputSplunkLbAuthenticationMethod], pydantic.Field(alias="authType")
481
+ Annotated[
482
+ Optional[OutputSplunkLbAuthenticationMethod],
483
+ PlainValidator(validate_open_enum(False)),
484
+ ],
485
+ pydantic.Field(alias="authType"),
449
486
  ] = OutputSplunkLbAuthenticationMethod.MANUAL
450
487
  r"""Select Manual to enter an auth token directly, or select Secret to use a text secret to authenticate"""
451
488
 
@@ -456,9 +493,10 @@ class OutputSplunkLb(BaseModel):
456
493
  ] = 1
457
494
  r"""Maximum number of times healthcheck can fail before we close connection. If set to 0 (disabled), and the connection to Splunk is forcibly closed, some data loss might occur."""
458
495
 
459
- compress: Optional[OutputSplunkLbCompressCompression] = (
460
- OutputSplunkLbCompressCompression.DISABLED
461
- )
496
+ compress: Annotated[
497
+ Optional[OutputSplunkLbCompressCompression],
498
+ PlainValidator(validate_open_enum(False)),
499
+ ] = OutputSplunkLbCompressCompression.DISABLED
462
500
  r"""Controls whether the sender should send compressed data to the server. Select 'Disabled' to reject compressed connections or 'Always' to ignore server's configuration and send compressed data."""
463
501
 
464
502
  indexer_discovery_configs: Annotated[
@@ -484,20 +522,29 @@ class OutputSplunkLb(BaseModel):
484
522
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
485
523
 
486
524
  pq_compress: Annotated[
487
- Optional[OutputSplunkLbPqCompressCompression],
525
+ Annotated[
526
+ Optional[OutputSplunkLbPqCompressCompression],
527
+ PlainValidator(validate_open_enum(False)),
528
+ ],
488
529
  pydantic.Field(alias="pqCompress"),
489
530
  ] = OutputSplunkLbPqCompressCompression.NONE
490
531
  r"""Codec to use to compress the persisted data"""
491
532
 
492
533
  pq_on_backpressure: Annotated[
493
- Optional[OutputSplunkLbQueueFullBehavior],
534
+ Annotated[
535
+ Optional[OutputSplunkLbQueueFullBehavior],
536
+ PlainValidator(validate_open_enum(False)),
537
+ ],
494
538
  pydantic.Field(alias="pqOnBackpressure"),
495
539
  ] = OutputSplunkLbQueueFullBehavior.BLOCK
496
540
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
497
541
 
498
- pq_mode: Annotated[Optional[OutputSplunkLbMode], pydantic.Field(alias="pqMode")] = (
499
- OutputSplunkLbMode.ERROR
500
- )
542
+ pq_mode: Annotated[
543
+ Annotated[
544
+ Optional[OutputSplunkLbMode], PlainValidator(validate_open_enum(False))
545
+ ],
546
+ pydantic.Field(alias="pqMode"),
547
+ ] = OutputSplunkLbMode.ERROR
501
548
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
502
549
 
503
550
  pq_controls: Annotated[