cribl-control-plane 0.0.47__py3-none-any.whl → 0.0.48a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (162) hide show
  1. cribl_control_plane/_version.py +3 -5
  2. cribl_control_plane/errors/healthstatus_error.py +8 -2
  3. cribl_control_plane/models/__init__.py +12 -12
  4. cribl_control_plane/models/cacheconnection.py +10 -2
  5. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  6. cribl_control_plane/models/cloudprovider.py +2 -1
  7. cribl_control_plane/models/configgroup.py +7 -2
  8. cribl_control_plane/models/configgroupcloud.py +6 -2
  9. cribl_control_plane/models/createconfiggroupbyproductop.py +8 -2
  10. cribl_control_plane/models/cribllakedataset.py +8 -2
  11. cribl_control_plane/models/datasetmetadata.py +8 -2
  12. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +7 -2
  13. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +4 -2
  14. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +4 -2
  15. cribl_control_plane/models/getconfiggroupbyproductandidop.py +3 -1
  16. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +7 -2
  17. cribl_control_plane/models/getsummaryop.py +7 -2
  18. cribl_control_plane/models/hbcriblinfo.py +19 -3
  19. cribl_control_plane/models/healthstatus.py +7 -4
  20. cribl_control_plane/models/heartbeatmetadata.py +3 -0
  21. cribl_control_plane/models/inputappscope.py +34 -14
  22. cribl_control_plane/models/inputazureblob.py +17 -6
  23. cribl_control_plane/models/inputcollection.py +11 -4
  24. cribl_control_plane/models/inputconfluentcloud.py +47 -20
  25. cribl_control_plane/models/inputcribl.py +11 -4
  26. cribl_control_plane/models/inputcriblhttp.py +23 -8
  27. cribl_control_plane/models/inputcribllakehttp.py +22 -10
  28. cribl_control_plane/models/inputcriblmetrics.py +12 -4
  29. cribl_control_plane/models/inputcribltcp.py +23 -8
  30. cribl_control_plane/models/inputcrowdstrike.py +26 -10
  31. cribl_control_plane/models/inputdatadogagent.py +24 -8
  32. cribl_control_plane/models/inputdatagen.py +11 -4
  33. cribl_control_plane/models/inputedgeprometheus.py +58 -24
  34. cribl_control_plane/models/inputelastic.py +40 -14
  35. cribl_control_plane/models/inputeventhub.py +15 -6
  36. cribl_control_plane/models/inputexec.py +14 -6
  37. cribl_control_plane/models/inputfile.py +15 -6
  38. cribl_control_plane/models/inputfirehose.py +23 -8
  39. cribl_control_plane/models/inputgooglepubsub.py +19 -6
  40. cribl_control_plane/models/inputgrafana.py +67 -24
  41. cribl_control_plane/models/inputhttp.py +23 -8
  42. cribl_control_plane/models/inputhttpraw.py +23 -8
  43. cribl_control_plane/models/inputjournalfiles.py +12 -4
  44. cribl_control_plane/models/inputkafka.py +46 -16
  45. cribl_control_plane/models/inputkinesis.py +38 -14
  46. cribl_control_plane/models/inputkubeevents.py +11 -4
  47. cribl_control_plane/models/inputkubelogs.py +16 -8
  48. cribl_control_plane/models/inputkubemetrics.py +16 -8
  49. cribl_control_plane/models/inputloki.py +29 -10
  50. cribl_control_plane/models/inputmetrics.py +23 -8
  51. cribl_control_plane/models/inputmodeldriventelemetry.py +32 -10
  52. cribl_control_plane/models/inputmsk.py +53 -18
  53. cribl_control_plane/models/inputnetflow.py +11 -4
  54. cribl_control_plane/models/inputoffice365mgmt.py +33 -14
  55. cribl_control_plane/models/inputoffice365msgtrace.py +35 -16
  56. cribl_control_plane/models/inputoffice365service.py +35 -16
  57. cribl_control_plane/models/inputopentelemetry.py +38 -16
  58. cribl_control_plane/models/inputprometheus.py +50 -18
  59. cribl_control_plane/models/inputprometheusrw.py +30 -10
  60. cribl_control_plane/models/inputrawudp.py +11 -4
  61. cribl_control_plane/models/inputs3.py +21 -8
  62. cribl_control_plane/models/inputs3inventory.py +26 -10
  63. cribl_control_plane/models/inputsecuritylake.py +27 -10
  64. cribl_control_plane/models/inputsnmp.py +16 -6
  65. cribl_control_plane/models/inputsplunk.py +33 -12
  66. cribl_control_plane/models/inputsplunkhec.py +29 -10
  67. cribl_control_plane/models/inputsplunksearch.py +33 -14
  68. cribl_control_plane/models/inputsqs.py +27 -10
  69. cribl_control_plane/models/inputsyslog.py +43 -16
  70. cribl_control_plane/models/inputsystemmetrics.py +48 -24
  71. cribl_control_plane/models/inputsystemstate.py +16 -8
  72. cribl_control_plane/models/inputtcp.py +29 -10
  73. cribl_control_plane/models/inputtcpjson.py +29 -10
  74. cribl_control_plane/models/inputwef.py +37 -14
  75. cribl_control_plane/models/inputwindowsmetrics.py +44 -24
  76. cribl_control_plane/models/inputwineventlogs.py +20 -10
  77. cribl_control_plane/models/inputwiz.py +21 -8
  78. cribl_control_plane/models/inputwizwebhook.py +23 -8
  79. cribl_control_plane/models/inputzscalerhec.py +29 -10
  80. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  81. cribl_control_plane/models/listconfiggroupbyproductop.py +3 -1
  82. cribl_control_plane/models/masterworkerentry.py +7 -2
  83. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  84. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  85. cribl_control_plane/models/nodeprovidedinfo.py +3 -0
  86. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  87. cribl_control_plane/models/nodeupgradestate.py +2 -1
  88. cribl_control_plane/models/nodeupgradestatus.py +13 -5
  89. cribl_control_plane/models/outputazureblob.py +48 -18
  90. cribl_control_plane/models/outputazuredataexplorer.py +73 -28
  91. cribl_control_plane/models/outputazureeventhub.py +40 -18
  92. cribl_control_plane/models/outputazurelogs.py +35 -12
  93. cribl_control_plane/models/outputclickhouse.py +55 -20
  94. cribl_control_plane/models/outputcloudwatch.py +29 -10
  95. cribl_control_plane/models/outputconfluentcloud.py +77 -32
  96. cribl_control_plane/models/outputcriblhttp.py +44 -16
  97. cribl_control_plane/models/outputcribllake.py +46 -16
  98. cribl_control_plane/models/outputcribltcp.py +45 -18
  99. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +49 -14
  100. cribl_control_plane/models/outputdatadog.py +48 -20
  101. cribl_control_plane/models/outputdataset.py +46 -18
  102. cribl_control_plane/models/outputdiskspool.py +7 -2
  103. cribl_control_plane/models/outputdls3.py +68 -24
  104. cribl_control_plane/models/outputdynatracehttp.py +53 -20
  105. cribl_control_plane/models/outputdynatraceotlp.py +55 -22
  106. cribl_control_plane/models/outputelastic.py +43 -18
  107. cribl_control_plane/models/outputelasticcloud.py +36 -12
  108. cribl_control_plane/models/outputexabeam.py +29 -10
  109. cribl_control_plane/models/outputfilesystem.py +39 -14
  110. cribl_control_plane/models/outputgooglechronicle.py +50 -16
  111. cribl_control_plane/models/outputgooglecloudlogging.py +41 -14
  112. cribl_control_plane/models/outputgooglecloudstorage.py +66 -24
  113. cribl_control_plane/models/outputgooglepubsub.py +31 -10
  114. cribl_control_plane/models/outputgrafanacloud.py +97 -32
  115. cribl_control_plane/models/outputgraphite.py +31 -14
  116. cribl_control_plane/models/outputhoneycomb.py +35 -12
  117. cribl_control_plane/models/outputhumiohec.py +43 -16
  118. cribl_control_plane/models/outputinfluxdb.py +42 -16
  119. cribl_control_plane/models/outputkafka.py +74 -28
  120. cribl_control_plane/models/outputkinesis.py +40 -16
  121. cribl_control_plane/models/outputloki.py +41 -16
  122. cribl_control_plane/models/outputminio.py +65 -24
  123. cribl_control_plane/models/outputmsk.py +82 -30
  124. cribl_control_plane/models/outputnewrelic.py +43 -18
  125. cribl_control_plane/models/outputnewrelicevents.py +41 -14
  126. cribl_control_plane/models/outputopentelemetry.py +67 -26
  127. cribl_control_plane/models/outputprometheus.py +35 -12
  128. cribl_control_plane/models/outputring.py +19 -8
  129. cribl_control_plane/models/outputs3.py +68 -26
  130. cribl_control_plane/models/outputsecuritylake.py +52 -18
  131. cribl_control_plane/models/outputsentinel.py +45 -18
  132. cribl_control_plane/models/outputsentineloneaisiem.py +50 -18
  133. cribl_control_plane/models/outputservicenow.py +60 -24
  134. cribl_control_plane/models/outputsignalfx.py +37 -14
  135. cribl_control_plane/models/outputsns.py +36 -14
  136. cribl_control_plane/models/outputsplunk.py +60 -24
  137. cribl_control_plane/models/outputsplunkhec.py +35 -12
  138. cribl_control_plane/models/outputsplunklb.py +77 -30
  139. cribl_control_plane/models/outputsqs.py +41 -16
  140. cribl_control_plane/models/outputstatsd.py +30 -14
  141. cribl_control_plane/models/outputstatsdext.py +29 -12
  142. cribl_control_plane/models/outputsumologic.py +35 -12
  143. cribl_control_plane/models/outputsyslog.py +58 -24
  144. cribl_control_plane/models/outputtcpjson.py +52 -20
  145. cribl_control_plane/models/outputwavefront.py +35 -12
  146. cribl_control_plane/models/outputwebhook.py +58 -22
  147. cribl_control_plane/models/outputxsiam.py +35 -14
  148. cribl_control_plane/models/productscore.py +2 -1
  149. cribl_control_plane/models/rbacresource.py +2 -1
  150. cribl_control_plane/models/resourcepolicy.py +4 -2
  151. cribl_control_plane/models/routeconf.py +3 -4
  152. cribl_control_plane/models/runnablejobcollection.py +30 -13
  153. cribl_control_plane/models/runnablejobexecutor.py +13 -4
  154. cribl_control_plane/models/runnablejobscheduledsearch.py +7 -2
  155. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +8 -2
  156. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +8 -2
  157. cribl_control_plane/models/workertypes.py +2 -1
  158. {cribl_control_plane-0.0.47.dist-info → cribl_control_plane-0.0.48a1.dist-info}/METADATA +1 -1
  159. {cribl_control_plane-0.0.47.dist-info → cribl_control_plane-0.0.48a1.dist-info}/RECORD +160 -162
  160. {cribl_control_plane-0.0.47.dist-info → cribl_control_plane-0.0.48a1.dist-info}/WHEEL +1 -1
  161. cribl_control_plane/models/appmode.py +0 -13
  162. cribl_control_plane/models/routecloneconf.py +0 -13
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,14 +15,14 @@ class OutputDatadogType(str, Enum):
12
15
  DATADOG = "datadog"
13
16
 
14
17
 
15
- class SendLogsAs(str, Enum):
18
+ class SendLogsAs(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""The content type to use when sending logs"""
17
20
 
18
21
  TEXT = "text"
19
22
  JSON = "json"
20
23
 
21
24
 
22
- class OutputDatadogSeverity(str, Enum):
25
+ class OutputDatadogSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
23
26
  r"""Default value for message severity. When you send logs as JSON objects, the event's '__severity' field (if set) will override this value."""
24
27
 
25
28
  EMERGENCY = "emergency"
@@ -32,7 +35,7 @@ class OutputDatadogSeverity(str, Enum):
32
35
  DEBUG = "debug"
33
36
 
34
37
 
35
- class DatadogSite(str, Enum):
38
+ class DatadogSite(str, Enum, metaclass=utils.OpenEnumMeta):
36
39
  r"""Datadog site to which events should be sent"""
37
40
 
38
41
  US = "us"
@@ -55,7 +58,7 @@ class OutputDatadogExtraHTTPHeader(BaseModel):
55
58
  name: Optional[str] = None
56
59
 
57
60
 
58
- class OutputDatadogFailedRequestLoggingMode(str, Enum):
61
+ class OutputDatadogFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
59
62
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
60
63
 
61
64
  PAYLOAD = "payload"
@@ -117,7 +120,7 @@ class OutputDatadogTimeoutRetrySettings(BaseModel):
117
120
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
118
121
 
119
122
 
120
- class OutputDatadogBackpressureBehavior(str, Enum):
123
+ class OutputDatadogBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
121
124
  r"""How to handle events when all receivers are exerting backpressure"""
122
125
 
123
126
  BLOCK = "block"
@@ -125,28 +128,28 @@ class OutputDatadogBackpressureBehavior(str, Enum):
125
128
  QUEUE = "queue"
126
129
 
127
130
 
128
- class OutputDatadogAuthenticationMethod(str, Enum):
131
+ class OutputDatadogAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
129
132
  r"""Enter API key directly, or select a stored secret"""
130
133
 
131
134
  MANUAL = "manual"
132
135
  SECRET = "secret"
133
136
 
134
137
 
135
- class OutputDatadogCompression(str, Enum):
138
+ class OutputDatadogCompression(str, Enum, metaclass=utils.OpenEnumMeta):
136
139
  r"""Codec to use to compress the persisted data"""
137
140
 
138
141
  NONE = "none"
139
142
  GZIP = "gzip"
140
143
 
141
144
 
142
- class OutputDatadogQueueFullBehavior(str, Enum):
145
+ class OutputDatadogQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
143
146
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
144
147
 
145
148
  BLOCK = "block"
146
149
  DROP = "drop"
147
150
 
148
151
 
149
- class OutputDatadogMode(str, Enum):
152
+ class OutputDatadogMode(str, Enum, metaclass=utils.OpenEnumMeta):
150
153
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
151
154
 
152
155
  ERROR = "error"
@@ -276,7 +279,8 @@ class OutputDatadog(BaseModel):
276
279
  r"""Tags for filtering and grouping in @{product}"""
277
280
 
278
281
  content_type: Annotated[
279
- Optional[SendLogsAs], pydantic.Field(alias="contentType")
282
+ Annotated[Optional[SendLogsAs], PlainValidator(validate_open_enum(False))],
283
+ pydantic.Field(alias="contentType"),
280
284
  ] = SendLogsAs.JSON
281
285
  r"""The content type to use when sending logs"""
282
286
 
@@ -303,10 +307,14 @@ class OutputDatadog(BaseModel):
303
307
  ] = False
304
308
  r"""Allow API key to be set from the event's '__agent_api_key' field"""
305
309
 
306
- severity: Optional[OutputDatadogSeverity] = None
310
+ severity: Annotated[
311
+ Optional[OutputDatadogSeverity], PlainValidator(validate_open_enum(False))
312
+ ] = None
307
313
  r"""Default value for message severity. When you send logs as JSON objects, the event's '__severity' field (if set) will override this value."""
308
314
 
309
- site: Optional[DatadogSite] = DatadogSite.US
315
+ site: Annotated[
316
+ Optional[DatadogSite], PlainValidator(validate_open_enum(False))
317
+ ] = DatadogSite.US
310
318
  r"""Datadog site to which events should be sent"""
311
319
 
312
320
  send_counters_as_count: Annotated[
@@ -358,7 +366,10 @@ class OutputDatadog(BaseModel):
358
366
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
359
367
 
360
368
  failed_request_logging_mode: Annotated[
361
- Optional[OutputDatadogFailedRequestLoggingMode],
369
+ Annotated[
370
+ Optional[OutputDatadogFailedRequestLoggingMode],
371
+ PlainValidator(validate_open_enum(False)),
372
+ ],
362
373
  pydantic.Field(alias="failedRequestLoggingMode"),
363
374
  ] = OutputDatadogFailedRequestLoggingMode.NONE
364
375
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -385,13 +396,20 @@ class OutputDatadog(BaseModel):
385
396
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
386
397
 
387
398
  on_backpressure: Annotated[
388
- Optional[OutputDatadogBackpressureBehavior],
399
+ Annotated[
400
+ Optional[OutputDatadogBackpressureBehavior],
401
+ PlainValidator(validate_open_enum(False)),
402
+ ],
389
403
  pydantic.Field(alias="onBackpressure"),
390
404
  ] = OutputDatadogBackpressureBehavior.BLOCK
391
405
  r"""How to handle events when all receivers are exerting backpressure"""
392
406
 
393
407
  auth_type: Annotated[
394
- Optional[OutputDatadogAuthenticationMethod], pydantic.Field(alias="authType")
408
+ Annotated[
409
+ Optional[OutputDatadogAuthenticationMethod],
410
+ PlainValidator(validate_open_enum(False)),
411
+ ],
412
+ pydantic.Field(alias="authType"),
395
413
  ] = OutputDatadogAuthenticationMethod.MANUAL
396
414
  r"""Enter API key directly, or select a stored secret"""
397
415
 
@@ -418,19 +436,29 @@ class OutputDatadog(BaseModel):
418
436
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
419
437
 
420
438
  pq_compress: Annotated[
421
- Optional[OutputDatadogCompression], pydantic.Field(alias="pqCompress")
439
+ Annotated[
440
+ Optional[OutputDatadogCompression],
441
+ PlainValidator(validate_open_enum(False)),
442
+ ],
443
+ pydantic.Field(alias="pqCompress"),
422
444
  ] = OutputDatadogCompression.NONE
423
445
  r"""Codec to use to compress the persisted data"""
424
446
 
425
447
  pq_on_backpressure: Annotated[
426
- Optional[OutputDatadogQueueFullBehavior],
448
+ Annotated[
449
+ Optional[OutputDatadogQueueFullBehavior],
450
+ PlainValidator(validate_open_enum(False)),
451
+ ],
427
452
  pydantic.Field(alias="pqOnBackpressure"),
428
453
  ] = OutputDatadogQueueFullBehavior.BLOCK
429
454
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
430
455
 
431
- pq_mode: Annotated[Optional[OutputDatadogMode], pydantic.Field(alias="pqMode")] = (
432
- OutputDatadogMode.ERROR
433
- )
456
+ pq_mode: Annotated[
457
+ Annotated[
458
+ Optional[OutputDatadogMode], PlainValidator(validate_open_enum(False))
459
+ ],
460
+ pydantic.Field(alias="pqMode"),
461
+ ] = OutputDatadogMode.ERROR
434
462
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
435
463
 
436
464
  pq_controls: Annotated[
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputDatasetType(str, Enum):
12
15
  DATASET = "dataset"
13
16
 
14
17
 
15
- class OutputDatasetSeverity(str, Enum):
18
+ class OutputDatasetSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Default value for event severity. If the `sev` or `__severity` fields are set on an event, the first one matching will override this value."""
17
20
 
18
21
  FINEST = "finest"
@@ -78,7 +81,7 @@ class OutputDatasetTimeoutRetrySettings(BaseModel):
78
81
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
79
82
 
80
83
 
81
- class DataSetSite(str, Enum):
84
+ class DataSetSite(str, Enum, metaclass=utils.OpenEnumMeta):
82
85
  r"""DataSet site to which events should be sent"""
83
86
 
84
87
  US = "us"
@@ -97,7 +100,7 @@ class OutputDatasetExtraHTTPHeader(BaseModel):
97
100
  name: Optional[str] = None
98
101
 
99
102
 
100
- class OutputDatasetFailedRequestLoggingMode(str, Enum):
103
+ class OutputDatasetFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
101
104
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
102
105
 
103
106
  PAYLOAD = "payload"
@@ -105,7 +108,7 @@ class OutputDatasetFailedRequestLoggingMode(str, Enum):
105
108
  NONE = "none"
106
109
 
107
110
 
108
- class OutputDatasetBackpressureBehavior(str, Enum):
111
+ class OutputDatasetBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
109
112
  r"""How to handle events when all receivers are exerting backpressure"""
110
113
 
111
114
  BLOCK = "block"
@@ -113,28 +116,28 @@ class OutputDatasetBackpressureBehavior(str, Enum):
113
116
  QUEUE = "queue"
114
117
 
115
118
 
116
- class OutputDatasetAuthenticationMethod(str, Enum):
119
+ class OutputDatasetAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
117
120
  r"""Enter API key directly, or select a stored secret"""
118
121
 
119
122
  MANUAL = "manual"
120
123
  SECRET = "secret"
121
124
 
122
125
 
123
- class OutputDatasetCompression(str, Enum):
126
+ class OutputDatasetCompression(str, Enum, metaclass=utils.OpenEnumMeta):
124
127
  r"""Codec to use to compress the persisted data"""
125
128
 
126
129
  NONE = "none"
127
130
  GZIP = "gzip"
128
131
 
129
132
 
130
- class OutputDatasetQueueFullBehavior(str, Enum):
133
+ class OutputDatasetQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
131
134
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
132
135
 
133
136
  BLOCK = "block"
134
137
  DROP = "drop"
135
138
 
136
139
 
137
- class OutputDatasetMode(str, Enum):
140
+ class OutputDatasetMode(str, Enum, metaclass=utils.OpenEnumMeta):
138
141
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
139
142
 
140
143
  ERROR = "error"
@@ -272,7 +275,10 @@ class OutputDataset(BaseModel):
272
275
  r"""Name of the event field that contains the timestamp. If not specified, defaults to `ts`, `_time`, or `Date.now()`, in that order."""
273
276
 
274
277
  default_severity: Annotated[
275
- Optional[OutputDatasetSeverity], pydantic.Field(alias="defaultSeverity")
278
+ Annotated[
279
+ Optional[OutputDatasetSeverity], PlainValidator(validate_open_enum(False))
280
+ ],
281
+ pydantic.Field(alias="defaultSeverity"),
276
282
  ] = OutputDatasetSeverity.INFO
277
283
  r"""Default value for event severity. If the `sev` or `__severity` fields are set on an event, the first one matching will override this value."""
278
284
 
@@ -292,7 +298,9 @@ class OutputDataset(BaseModel):
292
298
  ] = False
293
299
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
294
300
 
295
- site: Optional[DataSetSite] = DataSetSite.US
301
+ site: Annotated[
302
+ Optional[DataSetSite], PlainValidator(validate_open_enum(False))
303
+ ] = DataSetSite.US
296
304
  r"""DataSet site to which events should be sent"""
297
305
 
298
306
  concurrency: Optional[float] = 5
@@ -339,7 +347,10 @@ class OutputDataset(BaseModel):
339
347
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
340
348
 
341
349
  failed_request_logging_mode: Annotated[
342
- Optional[OutputDatasetFailedRequestLoggingMode],
350
+ Annotated[
351
+ Optional[OutputDatasetFailedRequestLoggingMode],
352
+ PlainValidator(validate_open_enum(False)),
353
+ ],
343
354
  pydantic.Field(alias="failedRequestLoggingMode"),
344
355
  ] = OutputDatasetFailedRequestLoggingMode.NONE
345
356
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -350,13 +361,20 @@ class OutputDataset(BaseModel):
350
361
  r"""List of headers that are safe to log in plain text"""
351
362
 
352
363
  on_backpressure: Annotated[
353
- Optional[OutputDatasetBackpressureBehavior],
364
+ Annotated[
365
+ Optional[OutputDatasetBackpressureBehavior],
366
+ PlainValidator(validate_open_enum(False)),
367
+ ],
354
368
  pydantic.Field(alias="onBackpressure"),
355
369
  ] = OutputDatasetBackpressureBehavior.BLOCK
356
370
  r"""How to handle events when all receivers are exerting backpressure"""
357
371
 
358
372
  auth_type: Annotated[
359
- Optional[OutputDatasetAuthenticationMethod], pydantic.Field(alias="authType")
373
+ Annotated[
374
+ Optional[OutputDatasetAuthenticationMethod],
375
+ PlainValidator(validate_open_enum(False)),
376
+ ],
377
+ pydantic.Field(alias="authType"),
360
378
  ] = OutputDatasetAuthenticationMethod.MANUAL
361
379
  r"""Enter API key directly, or select a stored secret"""
362
380
 
@@ -383,19 +401,29 @@ class OutputDataset(BaseModel):
383
401
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
384
402
 
385
403
  pq_compress: Annotated[
386
- Optional[OutputDatasetCompression], pydantic.Field(alias="pqCompress")
404
+ Annotated[
405
+ Optional[OutputDatasetCompression],
406
+ PlainValidator(validate_open_enum(False)),
407
+ ],
408
+ pydantic.Field(alias="pqCompress"),
387
409
  ] = OutputDatasetCompression.NONE
388
410
  r"""Codec to use to compress the persisted data"""
389
411
 
390
412
  pq_on_backpressure: Annotated[
391
- Optional[OutputDatasetQueueFullBehavior],
413
+ Annotated[
414
+ Optional[OutputDatasetQueueFullBehavior],
415
+ PlainValidator(validate_open_enum(False)),
416
+ ],
392
417
  pydantic.Field(alias="pqOnBackpressure"),
393
418
  ] = OutputDatasetQueueFullBehavior.BLOCK
394
419
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
395
420
 
396
- pq_mode: Annotated[Optional[OutputDatasetMode], pydantic.Field(alias="pqMode")] = (
397
- OutputDatasetMode.ERROR
398
- )
421
+ pq_mode: Annotated[
422
+ Annotated[
423
+ Optional[OutputDatasetMode], PlainValidator(validate_open_enum(False))
424
+ ],
425
+ pydantic.Field(alias="pqMode"),
426
+ ] = OutputDatasetMode.ERROR
399
427
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
400
428
 
401
429
  pq_controls: Annotated[
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputDiskSpoolType(str, Enum):
12
15
  DISK_SPOOL = "disk_spool"
13
16
 
14
17
 
15
- class OutputDiskSpoolCompression(str, Enum):
18
+ class OutputDiskSpoolCompression(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""Data compression format. Default is gzip."""
17
20
 
18
21
  NONE = "none"
@@ -73,7 +76,9 @@ class OutputDiskSpool(BaseModel):
73
76
  max_data_time: Annotated[Optional[str], pydantic.Field(alias="maxDataTime")] = "24h"
74
77
  r"""Maximum amount of time to retain data before older buckets are deleted. Examples: 2h, 4d. Default is 24h."""
75
78
 
76
- compress: Optional[OutputDiskSpoolCompression] = OutputDiskSpoolCompression.GZIP
79
+ compress: Annotated[
80
+ Optional[OutputDiskSpoolCompression], PlainValidator(validate_open_enum(False))
81
+ ] = OutputDiskSpoolCompression.GZIP
77
82
  r"""Data compression format. Default is gzip."""
78
83
 
79
84
  partition_expr: Annotated[Optional[str], pydantic.Field(alias="partitionExpr")] = (
@@ -1,9 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic.functional_validators import PlainValidator
7
10
  from typing import List, Optional
8
11
  from typing_extensions import Annotated, NotRequired, TypedDict
9
12
 
@@ -12,7 +15,7 @@ class OutputDlS3Type(str, Enum):
12
15
  DL_S3 = "dl_s3"
13
16
 
14
17
 
15
- class OutputDlS3AuthenticationMethod(str, Enum):
18
+ class OutputDlS3AuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
16
19
  r"""AWS authentication method. Choose Auto to use IAM roles."""
17
20
 
18
21
  AUTO = "auto"
@@ -20,14 +23,14 @@ class OutputDlS3AuthenticationMethod(str, Enum):
20
23
  SECRET = "secret"
21
24
 
22
25
 
23
- class OutputDlS3SignatureVersion(str, Enum):
26
+ class OutputDlS3SignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
24
27
  r"""Signature version to use for signing S3 requests"""
25
28
 
26
29
  V2 = "v2"
27
30
  V4 = "v4"
28
31
 
29
32
 
30
- class OutputDlS3ObjectACL(str, Enum):
33
+ class OutputDlS3ObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
31
34
  r"""Object ACL to assign to uploaded objects"""
32
35
 
33
36
  PRIVATE = "private"
@@ -39,7 +42,7 @@ class OutputDlS3ObjectACL(str, Enum):
39
42
  BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
40
43
 
41
44
 
42
- class OutputDlS3StorageClass(str, Enum):
45
+ class OutputDlS3StorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
43
46
  r"""Storage class to select for uploaded objects"""
44
47
 
45
48
  STANDARD = "STANDARD"
@@ -52,12 +55,14 @@ class OutputDlS3StorageClass(str, Enum):
52
55
  DEEP_ARCHIVE = "DEEP_ARCHIVE"
53
56
 
54
57
 
55
- class OutputDlS3ServerSideEncryptionForUploadedObjects(str, Enum):
58
+ class OutputDlS3ServerSideEncryptionForUploadedObjects(
59
+ str, Enum, metaclass=utils.OpenEnumMeta
60
+ ):
56
61
  AES256 = "AES256"
57
62
  AWS_KMS = "aws:kms"
58
63
 
59
64
 
60
- class OutputDlS3DataFormat(str, Enum):
65
+ class OutputDlS3DataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
61
66
  r"""Format of the output data"""
62
67
 
63
68
  JSON = "json"
@@ -65,28 +70,28 @@ class OutputDlS3DataFormat(str, Enum):
65
70
  PARQUET = "parquet"
66
71
 
67
72
 
68
- class OutputDlS3BackpressureBehavior(str, Enum):
73
+ class OutputDlS3BackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
69
74
  r"""How to handle events when all receivers are exerting backpressure"""
70
75
 
71
76
  BLOCK = "block"
72
77
  DROP = "drop"
73
78
 
74
79
 
75
- class OutputDlS3DiskSpaceProtection(str, Enum):
80
+ class OutputDlS3DiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
76
81
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
77
82
 
78
83
  BLOCK = "block"
79
84
  DROP = "drop"
80
85
 
81
86
 
82
- class OutputDlS3Compression(str, Enum):
87
+ class OutputDlS3Compression(str, Enum, metaclass=utils.OpenEnumMeta):
83
88
  r"""Data compression format to apply to HTTP content before it is delivered"""
84
89
 
85
90
  NONE = "none"
86
91
  GZIP = "gzip"
87
92
 
88
93
 
89
- class OutputDlS3CompressionLevel(str, Enum):
94
+ class OutputDlS3CompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
90
95
  r"""Compression level to apply before moving files to final destination"""
91
96
 
92
97
  BEST_SPEED = "best_speed"
@@ -94,7 +99,7 @@ class OutputDlS3CompressionLevel(str, Enum):
94
99
  BEST_COMPRESSION = "best_compression"
95
100
 
96
101
 
97
- class OutputDlS3ParquetVersion(str, Enum):
102
+ class OutputDlS3ParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
98
103
  r"""Determines which data types are supported and how they are represented"""
99
104
 
100
105
  PARQUET_1_0 = "PARQUET_1_0"
@@ -102,7 +107,7 @@ class OutputDlS3ParquetVersion(str, Enum):
102
107
  PARQUET_2_6 = "PARQUET_2_6"
103
108
 
104
109
 
105
- class OutputDlS3DataPageVersion(str, Enum):
110
+ class OutputDlS3DataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
106
111
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
107
112
 
108
113
  DATA_PAGE_V1 = "DATA_PAGE_V1"
@@ -274,7 +279,10 @@ class OutputDlS3(BaseModel):
274
279
  r"""Secret key. This value can be a constant or a JavaScript expression. Example: `${C.env.SOME_SECRET}`)"""
275
280
 
276
281
  aws_authentication_method: Annotated[
277
- Optional[OutputDlS3AuthenticationMethod],
282
+ Annotated[
283
+ Optional[OutputDlS3AuthenticationMethod],
284
+ PlainValidator(validate_open_enum(False)),
285
+ ],
278
286
  pydantic.Field(alias="awsAuthenticationMethod"),
279
287
  ] = OutputDlS3AuthenticationMethod.AUTO
280
288
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -283,7 +291,11 @@ class OutputDlS3(BaseModel):
283
291
  r"""S3 service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to S3-compatible endpoint."""
284
292
 
285
293
  signature_version: Annotated[
286
- Optional[OutputDlS3SignatureVersion], pydantic.Field(alias="signatureVersion")
294
+ Annotated[
295
+ Optional[OutputDlS3SignatureVersion],
296
+ PlainValidator(validate_open_enum(False)),
297
+ ],
298
+ pydantic.Field(alias="signatureVersion"),
287
299
  ] = OutputDlS3SignatureVersion.V4
288
300
  r"""Signature version to use for signing S3 requests"""
289
301
 
@@ -331,17 +343,26 @@ class OutputDlS3(BaseModel):
331
343
  r"""Prefix to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myKeyPrefix-${C.vars.myVar}`"""
332
344
 
333
345
  object_acl: Annotated[
334
- Optional[OutputDlS3ObjectACL], pydantic.Field(alias="objectACL")
346
+ Annotated[
347
+ Optional[OutputDlS3ObjectACL], PlainValidator(validate_open_enum(False))
348
+ ],
349
+ pydantic.Field(alias="objectACL"),
335
350
  ] = OutputDlS3ObjectACL.PRIVATE
336
351
  r"""Object ACL to assign to uploaded objects"""
337
352
 
338
353
  storage_class: Annotated[
339
- Optional[OutputDlS3StorageClass], pydantic.Field(alias="storageClass")
354
+ Annotated[
355
+ Optional[OutputDlS3StorageClass], PlainValidator(validate_open_enum(False))
356
+ ],
357
+ pydantic.Field(alias="storageClass"),
340
358
  ] = None
341
359
  r"""Storage class to select for uploaded objects"""
342
360
 
343
361
  server_side_encryption: Annotated[
344
- Optional[OutputDlS3ServerSideEncryptionForUploadedObjects],
362
+ Annotated[
363
+ Optional[OutputDlS3ServerSideEncryptionForUploadedObjects],
364
+ PlainValidator(validate_open_enum(False)),
365
+ ],
345
366
  pydantic.Field(alias="serverSideEncryption"),
346
367
  ] = None
347
368
 
@@ -354,7 +375,10 @@ class OutputDlS3(BaseModel):
354
375
  r"""Remove empty staging directories after moving files"""
355
376
 
356
377
  format_: Annotated[
357
- Optional[OutputDlS3DataFormat], pydantic.Field(alias="format")
378
+ Annotated[
379
+ Optional[OutputDlS3DataFormat], PlainValidator(validate_open_enum(False))
380
+ ],
381
+ pydantic.Field(alias="format"),
358
382
  ] = OutputDlS3DataFormat.JSON
359
383
  r"""Format of the output data"""
360
384
 
@@ -387,7 +411,11 @@ class OutputDlS3(BaseModel):
387
411
  r"""Buffer size used to write to a file"""
388
412
 
389
413
  on_backpressure: Annotated[
390
- Optional[OutputDlS3BackpressureBehavior], pydantic.Field(alias="onBackpressure")
414
+ Annotated[
415
+ Optional[OutputDlS3BackpressureBehavior],
416
+ PlainValidator(validate_open_enum(False)),
417
+ ],
418
+ pydantic.Field(alias="onBackpressure"),
391
419
  ] = OutputDlS3BackpressureBehavior.BLOCK
392
420
  r"""How to handle events when all receivers are exerting backpressure"""
393
421
 
@@ -397,7 +425,10 @@ class OutputDlS3(BaseModel):
397
425
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
398
426
 
399
427
  on_disk_full_backpressure: Annotated[
400
- Optional[OutputDlS3DiskSpaceProtection],
428
+ Annotated[
429
+ Optional[OutputDlS3DiskSpaceProtection],
430
+ PlainValidator(validate_open_enum(False)),
431
+ ],
401
432
  pydantic.Field(alias="onDiskFullBackpressure"),
402
433
  ] = OutputDlS3DiskSpaceProtection.BLOCK
403
434
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
@@ -440,11 +471,17 @@ class OutputDlS3(BaseModel):
440
471
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
441
472
  r"""Select or create a stored secret that references your access key and secret key"""
442
473
 
443
- compress: Optional[OutputDlS3Compression] = OutputDlS3Compression.GZIP
474
+ compress: Annotated[
475
+ Optional[OutputDlS3Compression], PlainValidator(validate_open_enum(False))
476
+ ] = OutputDlS3Compression.GZIP
444
477
  r"""Data compression format to apply to HTTP content before it is delivered"""
445
478
 
446
479
  compression_level: Annotated[
447
- Optional[OutputDlS3CompressionLevel], pydantic.Field(alias="compressionLevel")
480
+ Annotated[
481
+ Optional[OutputDlS3CompressionLevel],
482
+ PlainValidator(validate_open_enum(False)),
483
+ ],
484
+ pydantic.Field(alias="compressionLevel"),
448
485
  ] = OutputDlS3CompressionLevel.BEST_SPEED
449
486
  r"""Compression level to apply before moving files to final destination"""
450
487
 
@@ -454,12 +491,19 @@ class OutputDlS3(BaseModel):
454
491
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
455
492
 
456
493
  parquet_version: Annotated[
457
- Optional[OutputDlS3ParquetVersion], pydantic.Field(alias="parquetVersion")
494
+ Annotated[
495
+ Optional[OutputDlS3ParquetVersion],
496
+ PlainValidator(validate_open_enum(False)),
497
+ ],
498
+ pydantic.Field(alias="parquetVersion"),
458
499
  ] = OutputDlS3ParquetVersion.PARQUET_2_6
459
500
  r"""Determines which data types are supported and how they are represented"""
460
501
 
461
502
  parquet_data_page_version: Annotated[
462
- Optional[OutputDlS3DataPageVersion],
503
+ Annotated[
504
+ Optional[OutputDlS3DataPageVersion],
505
+ PlainValidator(validate_open_enum(False)),
506
+ ],
463
507
  pydantic.Field(alias="parquetDataPageVersion"),
464
508
  ] = OutputDlS3DataPageVersion.DATA_PAGE_V2
465
509
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""