cribl-control-plane 0.0.50rc1__py3-none-any.whl → 0.0.51__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (174) hide show
  1. cribl_control_plane/_version.py +6 -4
  2. cribl_control_plane/errors/healthstatus_error.py +2 -8
  3. cribl_control_plane/health.py +2 -6
  4. cribl_control_plane/httpclient.py +0 -1
  5. cribl_control_plane/models/__init__.py +4 -21
  6. cribl_control_plane/models/appmode.py +1 -2
  7. cribl_control_plane/models/cacheconnection.py +2 -10
  8. cribl_control_plane/models/cacheconnectionbackfillstatus.py +1 -2
  9. cribl_control_plane/models/cloudprovider.py +1 -2
  10. cribl_control_plane/models/configgroup.py +2 -7
  11. cribl_control_plane/models/configgroupcloud.py +2 -6
  12. cribl_control_plane/models/createconfiggroupbyproductop.py +2 -8
  13. cribl_control_plane/models/createinputhectokenbyidop.py +5 -6
  14. cribl_control_plane/models/createversionpushop.py +5 -5
  15. cribl_control_plane/models/createversionundoop.py +3 -3
  16. cribl_control_plane/models/cribllakedataset.py +2 -8
  17. cribl_control_plane/models/datasetmetadata.py +2 -8
  18. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +2 -7
  19. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +2 -4
  20. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +2 -4
  21. cribl_control_plane/models/getconfiggroupbyproductandidop.py +1 -3
  22. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +2 -7
  23. cribl_control_plane/models/getsummaryop.py +2 -7
  24. cribl_control_plane/models/getversionshowop.py +5 -6
  25. cribl_control_plane/models/gitinfo.py +3 -14
  26. cribl_control_plane/models/hbcriblinfo.py +1 -11
  27. cribl_control_plane/models/healthstatus.py +4 -7
  28. cribl_control_plane/models/inputappscope.py +14 -34
  29. cribl_control_plane/models/inputazureblob.py +6 -17
  30. cribl_control_plane/models/inputcollection.py +4 -11
  31. cribl_control_plane/models/inputconfluentcloud.py +20 -47
  32. cribl_control_plane/models/inputcribl.py +4 -11
  33. cribl_control_plane/models/inputcriblhttp.py +8 -23
  34. cribl_control_plane/models/inputcribllakehttp.py +10 -22
  35. cribl_control_plane/models/inputcriblmetrics.py +4 -12
  36. cribl_control_plane/models/inputcribltcp.py +8 -23
  37. cribl_control_plane/models/inputcrowdstrike.py +10 -26
  38. cribl_control_plane/models/inputdatadogagent.py +8 -24
  39. cribl_control_plane/models/inputdatagen.py +4 -11
  40. cribl_control_plane/models/inputedgeprometheus.py +24 -58
  41. cribl_control_plane/models/inputelastic.py +14 -40
  42. cribl_control_plane/models/inputeventhub.py +6 -15
  43. cribl_control_plane/models/inputexec.py +6 -14
  44. cribl_control_plane/models/inputfile.py +6 -15
  45. cribl_control_plane/models/inputfirehose.py +8 -23
  46. cribl_control_plane/models/inputgooglepubsub.py +6 -19
  47. cribl_control_plane/models/inputgrafana.py +24 -67
  48. cribl_control_plane/models/inputhttp.py +8 -23
  49. cribl_control_plane/models/inputhttpraw.py +8 -23
  50. cribl_control_plane/models/inputjournalfiles.py +4 -12
  51. cribl_control_plane/models/inputkafka.py +16 -46
  52. cribl_control_plane/models/inputkinesis.py +14 -38
  53. cribl_control_plane/models/inputkubeevents.py +4 -11
  54. cribl_control_plane/models/inputkubelogs.py +8 -16
  55. cribl_control_plane/models/inputkubemetrics.py +8 -16
  56. cribl_control_plane/models/inputloki.py +10 -29
  57. cribl_control_plane/models/inputmetrics.py +8 -23
  58. cribl_control_plane/models/inputmodeldriventelemetry.py +10 -32
  59. cribl_control_plane/models/inputmsk.py +18 -53
  60. cribl_control_plane/models/inputnetflow.py +4 -11
  61. cribl_control_plane/models/inputoffice365mgmt.py +14 -33
  62. cribl_control_plane/models/inputoffice365msgtrace.py +16 -35
  63. cribl_control_plane/models/inputoffice365service.py +16 -35
  64. cribl_control_plane/models/inputopentelemetry.py +16 -38
  65. cribl_control_plane/models/inputprometheus.py +18 -50
  66. cribl_control_plane/models/inputprometheusrw.py +10 -30
  67. cribl_control_plane/models/inputrawudp.py +4 -11
  68. cribl_control_plane/models/inputs3.py +8 -21
  69. cribl_control_plane/models/inputs3inventory.py +10 -26
  70. cribl_control_plane/models/inputsecuritylake.py +10 -27
  71. cribl_control_plane/models/inputsnmp.py +6 -16
  72. cribl_control_plane/models/inputsplunk.py +12 -33
  73. cribl_control_plane/models/inputsplunkhec.py +10 -29
  74. cribl_control_plane/models/inputsplunksearch.py +14 -33
  75. cribl_control_plane/models/inputsqs.py +10 -27
  76. cribl_control_plane/models/inputsyslog.py +16 -43
  77. cribl_control_plane/models/inputsystemmetrics.py +24 -48
  78. cribl_control_plane/models/inputsystemstate.py +8 -16
  79. cribl_control_plane/models/inputtcp.py +10 -29
  80. cribl_control_plane/models/inputtcpjson.py +10 -29
  81. cribl_control_plane/models/inputwef.py +14 -37
  82. cribl_control_plane/models/inputwindowsmetrics.py +24 -44
  83. cribl_control_plane/models/inputwineventlogs.py +10 -20
  84. cribl_control_plane/models/inputwiz.py +8 -21
  85. cribl_control_plane/models/inputwizwebhook.py +8 -23
  86. cribl_control_plane/models/inputzscalerhec.py +10 -29
  87. cribl_control_plane/models/jobinfo.py +1 -4
  88. cribl_control_plane/models/lakehouseconnectiontype.py +1 -2
  89. cribl_control_plane/models/listconfiggroupbyproductop.py +1 -3
  90. cribl_control_plane/models/masterworkerentry.py +2 -7
  91. cribl_control_plane/models/nodeactiveupgradestatus.py +1 -2
  92. cribl_control_plane/models/nodefailedupgradestatus.py +1 -2
  93. cribl_control_plane/models/nodeprovidedinfo.py +1 -4
  94. cribl_control_plane/models/nodeskippedupgradestatus.py +1 -2
  95. cribl_control_plane/models/nodeupgradestate.py +1 -2
  96. cribl_control_plane/models/nodeupgradestatus.py +5 -13
  97. cribl_control_plane/models/outputazureblob.py +18 -48
  98. cribl_control_plane/models/outputazuredataexplorer.py +28 -73
  99. cribl_control_plane/models/outputazureeventhub.py +18 -40
  100. cribl_control_plane/models/outputazurelogs.py +12 -35
  101. cribl_control_plane/models/outputclickhouse.py +20 -55
  102. cribl_control_plane/models/outputcloudwatch.py +10 -29
  103. cribl_control_plane/models/outputconfluentcloud.py +32 -77
  104. cribl_control_plane/models/outputcriblhttp.py +16 -44
  105. cribl_control_plane/models/outputcribllake.py +16 -46
  106. cribl_control_plane/models/outputcribltcp.py +18 -45
  107. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +14 -49
  108. cribl_control_plane/models/outputdatadog.py +20 -48
  109. cribl_control_plane/models/outputdataset.py +18 -46
  110. cribl_control_plane/models/outputdiskspool.py +2 -7
  111. cribl_control_plane/models/outputdls3.py +24 -68
  112. cribl_control_plane/models/outputdynatracehttp.py +20 -53
  113. cribl_control_plane/models/outputdynatraceotlp.py +22 -55
  114. cribl_control_plane/models/outputelastic.py +18 -43
  115. cribl_control_plane/models/outputelasticcloud.py +12 -36
  116. cribl_control_plane/models/outputexabeam.py +10 -29
  117. cribl_control_plane/models/outputfilesystem.py +14 -39
  118. cribl_control_plane/models/outputgooglechronicle.py +16 -50
  119. cribl_control_plane/models/outputgooglecloudlogging.py +18 -50
  120. cribl_control_plane/models/outputgooglecloudstorage.py +24 -66
  121. cribl_control_plane/models/outputgooglepubsub.py +10 -31
  122. cribl_control_plane/models/outputgrafanacloud.py +32 -97
  123. cribl_control_plane/models/outputgraphite.py +14 -31
  124. cribl_control_plane/models/outputhoneycomb.py +12 -35
  125. cribl_control_plane/models/outputhumiohec.py +16 -43
  126. cribl_control_plane/models/outputinfluxdb.py +16 -42
  127. cribl_control_plane/models/outputkafka.py +28 -74
  128. cribl_control_plane/models/outputkinesis.py +16 -40
  129. cribl_control_plane/models/outputloki.py +16 -41
  130. cribl_control_plane/models/outputminio.py +24 -65
  131. cribl_control_plane/models/outputmsk.py +30 -82
  132. cribl_control_plane/models/outputnewrelic.py +18 -43
  133. cribl_control_plane/models/outputnewrelicevents.py +14 -41
  134. cribl_control_plane/models/outputopentelemetry.py +26 -67
  135. cribl_control_plane/models/outputprometheus.py +12 -35
  136. cribl_control_plane/models/outputring.py +8 -19
  137. cribl_control_plane/models/outputs3.py +26 -68
  138. cribl_control_plane/models/outputsecuritylake.py +18 -52
  139. cribl_control_plane/models/outputsentinel.py +18 -45
  140. cribl_control_plane/models/outputsentineloneaisiem.py +18 -50
  141. cribl_control_plane/models/outputservicenow.py +24 -60
  142. cribl_control_plane/models/outputsignalfx.py +14 -37
  143. cribl_control_plane/models/outputsns.py +14 -36
  144. cribl_control_plane/models/outputsplunk.py +24 -60
  145. cribl_control_plane/models/outputsplunkhec.py +12 -35
  146. cribl_control_plane/models/outputsplunklb.py +30 -77
  147. cribl_control_plane/models/outputsqs.py +16 -41
  148. cribl_control_plane/models/outputstatsd.py +14 -30
  149. cribl_control_plane/models/outputstatsdext.py +12 -29
  150. cribl_control_plane/models/outputsumologic.py +12 -35
  151. cribl_control_plane/models/outputsyslog.py +24 -58
  152. cribl_control_plane/models/outputtcpjson.py +20 -52
  153. cribl_control_plane/models/outputwavefront.py +12 -35
  154. cribl_control_plane/models/outputwebhook.py +22 -58
  155. cribl_control_plane/models/outputxsiam.py +14 -35
  156. cribl_control_plane/models/packinfo.py +0 -3
  157. cribl_control_plane/models/packinstallinfo.py +0 -3
  158. cribl_control_plane/models/productscore.py +1 -2
  159. cribl_control_plane/models/rbacresource.py +1 -2
  160. cribl_control_plane/models/resourcepolicy.py +2 -4
  161. cribl_control_plane/models/runnablejobcollection.py +13 -30
  162. cribl_control_plane/models/runnablejobexecutor.py +4 -13
  163. cribl_control_plane/models/runnablejobscheduledsearch.py +2 -7
  164. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +2 -8
  165. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +2 -8
  166. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +5 -6
  167. cribl_control_plane/models/workertypes.py +1 -2
  168. {cribl_control_plane-0.0.50rc1.dist-info → cribl_control_plane-0.0.51.dist-info}/METADATA +14 -5
  169. cribl_control_plane-0.0.51.dist-info/RECORD +325 -0
  170. cribl_control_plane/models/error.py +0 -16
  171. cribl_control_plane/models/gethealthinfoop.py +0 -17
  172. cribl_control_plane/models/gitshowresult.py +0 -19
  173. cribl_control_plane-0.0.50rc1.dist-info/RECORD +0 -328
  174. {cribl_control_plane-0.0.50rc1.dist-info → cribl_control_plane-0.0.51.dist-info}/WHEEL +0 -0
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputDatasetType(str, Enum):
15
12
  DATASET = "dataset"
16
13
 
17
14
 
18
- class OutputDatasetSeverity(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputDatasetSeverity(str, Enum):
19
16
  r"""Default value for event severity. If the `sev` or `__severity` fields are set on an event, the first one matching will override this value."""
20
17
 
21
18
  FINEST = "finest"
@@ -81,7 +78,7 @@ class OutputDatasetTimeoutRetrySettings(BaseModel):
81
78
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
82
79
 
83
80
 
84
- class DataSetSite(str, Enum, metaclass=utils.OpenEnumMeta):
81
+ class DataSetSite(str, Enum):
85
82
  r"""DataSet site to which events should be sent"""
86
83
 
87
84
  US = "us"
@@ -100,7 +97,7 @@ class OutputDatasetExtraHTTPHeader(BaseModel):
100
97
  name: Optional[str] = None
101
98
 
102
99
 
103
- class OutputDatasetFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
100
+ class OutputDatasetFailedRequestLoggingMode(str, Enum):
104
101
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
105
102
 
106
103
  PAYLOAD = "payload"
@@ -108,7 +105,7 @@ class OutputDatasetFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumM
108
105
  NONE = "none"
109
106
 
110
107
 
111
- class OutputDatasetBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
108
+ class OutputDatasetBackpressureBehavior(str, Enum):
112
109
  r"""How to handle events when all receivers are exerting backpressure"""
113
110
 
114
111
  BLOCK = "block"
@@ -116,28 +113,28 @@ class OutputDatasetBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta)
116
113
  QUEUE = "queue"
117
114
 
118
115
 
119
- class OutputDatasetAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
116
+ class OutputDatasetAuthenticationMethod(str, Enum):
120
117
  r"""Enter API key directly, or select a stored secret"""
121
118
 
122
119
  MANUAL = "manual"
123
120
  SECRET = "secret"
124
121
 
125
122
 
126
- class OutputDatasetCompression(str, Enum, metaclass=utils.OpenEnumMeta):
123
+ class OutputDatasetCompression(str, Enum):
127
124
  r"""Codec to use to compress the persisted data"""
128
125
 
129
126
  NONE = "none"
130
127
  GZIP = "gzip"
131
128
 
132
129
 
133
- class OutputDatasetQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
130
+ class OutputDatasetQueueFullBehavior(str, Enum):
134
131
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
135
132
 
136
133
  BLOCK = "block"
137
134
  DROP = "drop"
138
135
 
139
136
 
140
- class OutputDatasetMode(str, Enum, metaclass=utils.OpenEnumMeta):
137
+ class OutputDatasetMode(str, Enum):
141
138
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
142
139
 
143
140
  ERROR = "error"
@@ -275,10 +272,7 @@ class OutputDataset(BaseModel):
275
272
  r"""Name of the event field that contains the timestamp. If not specified, defaults to `ts`, `_time`, or `Date.now()`, in that order."""
276
273
 
277
274
  default_severity: Annotated[
278
- Annotated[
279
- Optional[OutputDatasetSeverity], PlainValidator(validate_open_enum(False))
280
- ],
281
- pydantic.Field(alias="defaultSeverity"),
275
+ Optional[OutputDatasetSeverity], pydantic.Field(alias="defaultSeverity")
282
276
  ] = OutputDatasetSeverity.INFO
283
277
  r"""Default value for event severity. If the `sev` or `__severity` fields are set on an event, the first one matching will override this value."""
284
278
 
@@ -298,9 +292,7 @@ class OutputDataset(BaseModel):
298
292
  ] = False
299
293
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
300
294
 
301
- site: Annotated[
302
- Optional[DataSetSite], PlainValidator(validate_open_enum(False))
303
- ] = DataSetSite.US
295
+ site: Optional[DataSetSite] = DataSetSite.US
304
296
  r"""DataSet site to which events should be sent"""
305
297
 
306
298
  concurrency: Optional[float] = 5
@@ -347,10 +339,7 @@ class OutputDataset(BaseModel):
347
339
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
348
340
 
349
341
  failed_request_logging_mode: Annotated[
350
- Annotated[
351
- Optional[OutputDatasetFailedRequestLoggingMode],
352
- PlainValidator(validate_open_enum(False)),
353
- ],
342
+ Optional[OutputDatasetFailedRequestLoggingMode],
354
343
  pydantic.Field(alias="failedRequestLoggingMode"),
355
344
  ] = OutputDatasetFailedRequestLoggingMode.NONE
356
345
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -361,20 +350,13 @@ class OutputDataset(BaseModel):
361
350
  r"""List of headers that are safe to log in plain text"""
362
351
 
363
352
  on_backpressure: Annotated[
364
- Annotated[
365
- Optional[OutputDatasetBackpressureBehavior],
366
- PlainValidator(validate_open_enum(False)),
367
- ],
353
+ Optional[OutputDatasetBackpressureBehavior],
368
354
  pydantic.Field(alias="onBackpressure"),
369
355
  ] = OutputDatasetBackpressureBehavior.BLOCK
370
356
  r"""How to handle events when all receivers are exerting backpressure"""
371
357
 
372
358
  auth_type: Annotated[
373
- Annotated[
374
- Optional[OutputDatasetAuthenticationMethod],
375
- PlainValidator(validate_open_enum(False)),
376
- ],
377
- pydantic.Field(alias="authType"),
359
+ Optional[OutputDatasetAuthenticationMethod], pydantic.Field(alias="authType")
378
360
  ] = OutputDatasetAuthenticationMethod.MANUAL
379
361
  r"""Enter API key directly, or select a stored secret"""
380
362
 
@@ -401,29 +383,19 @@ class OutputDataset(BaseModel):
401
383
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
402
384
 
403
385
  pq_compress: Annotated[
404
- Annotated[
405
- Optional[OutputDatasetCompression],
406
- PlainValidator(validate_open_enum(False)),
407
- ],
408
- pydantic.Field(alias="pqCompress"),
386
+ Optional[OutputDatasetCompression], pydantic.Field(alias="pqCompress")
409
387
  ] = OutputDatasetCompression.NONE
410
388
  r"""Codec to use to compress the persisted data"""
411
389
 
412
390
  pq_on_backpressure: Annotated[
413
- Annotated[
414
- Optional[OutputDatasetQueueFullBehavior],
415
- PlainValidator(validate_open_enum(False)),
416
- ],
391
+ Optional[OutputDatasetQueueFullBehavior],
417
392
  pydantic.Field(alias="pqOnBackpressure"),
418
393
  ] = OutputDatasetQueueFullBehavior.BLOCK
419
394
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
420
395
 
421
- pq_mode: Annotated[
422
- Annotated[
423
- Optional[OutputDatasetMode], PlainValidator(validate_open_enum(False))
424
- ],
425
- pydantic.Field(alias="pqMode"),
426
- ] = OutputDatasetMode.ERROR
396
+ pq_mode: Annotated[Optional[OutputDatasetMode], pydantic.Field(alias="pqMode")] = (
397
+ OutputDatasetMode.ERROR
398
+ )
427
399
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
428
400
 
429
401
  pq_controls: Annotated[
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputDiskSpoolType(str, Enum):
15
12
  DISK_SPOOL = "disk_spool"
16
13
 
17
14
 
18
- class OutputDiskSpoolCompression(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputDiskSpoolCompression(str, Enum):
19
16
  r"""Data compression format. Default is gzip."""
20
17
 
21
18
  NONE = "none"
@@ -76,9 +73,7 @@ class OutputDiskSpool(BaseModel):
76
73
  max_data_time: Annotated[Optional[str], pydantic.Field(alias="maxDataTime")] = "24h"
77
74
  r"""Maximum amount of time to retain data before older buckets are deleted. Examples: 2h, 4d. Default is 24h."""
78
75
 
79
- compress: Annotated[
80
- Optional[OutputDiskSpoolCompression], PlainValidator(validate_open_enum(False))
81
- ] = OutputDiskSpoolCompression.GZIP
76
+ compress: Optional[OutputDiskSpoolCompression] = OutputDiskSpoolCompression.GZIP
82
77
  r"""Data compression format. Default is gzip."""
83
78
 
84
79
  partition_expr: Annotated[Optional[str], pydantic.Field(alias="partitionExpr")] = (
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputDlS3Type(str, Enum):
15
12
  DL_S3 = "dl_s3"
16
13
 
17
14
 
18
- class OutputDlS3AuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputDlS3AuthenticationMethod(str, Enum):
19
16
  r"""AWS authentication method. Choose Auto to use IAM roles."""
20
17
 
21
18
  AUTO = "auto"
@@ -23,14 +20,14 @@ class OutputDlS3AuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
23
20
  SECRET = "secret"
24
21
 
25
22
 
26
- class OutputDlS3SignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
23
+ class OutputDlS3SignatureVersion(str, Enum):
27
24
  r"""Signature version to use for signing S3 requests"""
28
25
 
29
26
  V2 = "v2"
30
27
  V4 = "v4"
31
28
 
32
29
 
33
- class OutputDlS3ObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
30
+ class OutputDlS3ObjectACL(str, Enum):
34
31
  r"""Object ACL to assign to uploaded objects"""
35
32
 
36
33
  PRIVATE = "private"
@@ -42,7 +39,7 @@ class OutputDlS3ObjectACL(str, Enum, metaclass=utils.OpenEnumMeta):
42
39
  BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control"
43
40
 
44
41
 
45
- class OutputDlS3StorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
42
+ class OutputDlS3StorageClass(str, Enum):
46
43
  r"""Storage class to select for uploaded objects"""
47
44
 
48
45
  STANDARD = "STANDARD"
@@ -55,14 +52,12 @@ class OutputDlS3StorageClass(str, Enum, metaclass=utils.OpenEnumMeta):
55
52
  DEEP_ARCHIVE = "DEEP_ARCHIVE"
56
53
 
57
54
 
58
- class OutputDlS3ServerSideEncryptionForUploadedObjects(
59
- str, Enum, metaclass=utils.OpenEnumMeta
60
- ):
55
+ class OutputDlS3ServerSideEncryptionForUploadedObjects(str, Enum):
61
56
  AES256 = "AES256"
62
57
  AWS_KMS = "aws:kms"
63
58
 
64
59
 
65
- class OutputDlS3DataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
60
+ class OutputDlS3DataFormat(str, Enum):
66
61
  r"""Format of the output data"""
67
62
 
68
63
  JSON = "json"
@@ -70,28 +65,28 @@ class OutputDlS3DataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
70
65
  PARQUET = "parquet"
71
66
 
72
67
 
73
- class OutputDlS3BackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
68
+ class OutputDlS3BackpressureBehavior(str, Enum):
74
69
  r"""How to handle events when all receivers are exerting backpressure"""
75
70
 
76
71
  BLOCK = "block"
77
72
  DROP = "drop"
78
73
 
79
74
 
80
- class OutputDlS3DiskSpaceProtection(str, Enum, metaclass=utils.OpenEnumMeta):
75
+ class OutputDlS3DiskSpaceProtection(str, Enum):
81
76
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
82
77
 
83
78
  BLOCK = "block"
84
79
  DROP = "drop"
85
80
 
86
81
 
87
- class OutputDlS3Compression(str, Enum, metaclass=utils.OpenEnumMeta):
82
+ class OutputDlS3Compression(str, Enum):
88
83
  r"""Data compression format to apply to HTTP content before it is delivered"""
89
84
 
90
85
  NONE = "none"
91
86
  GZIP = "gzip"
92
87
 
93
88
 
94
- class OutputDlS3CompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
89
+ class OutputDlS3CompressionLevel(str, Enum):
95
90
  r"""Compression level to apply before moving files to final destination"""
96
91
 
97
92
  BEST_SPEED = "best_speed"
@@ -99,7 +94,7 @@ class OutputDlS3CompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
99
94
  BEST_COMPRESSION = "best_compression"
100
95
 
101
96
 
102
- class OutputDlS3ParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
97
+ class OutputDlS3ParquetVersion(str, Enum):
103
98
  r"""Determines which data types are supported and how they are represented"""
104
99
 
105
100
  PARQUET_1_0 = "PARQUET_1_0"
@@ -107,7 +102,7 @@ class OutputDlS3ParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
107
102
  PARQUET_2_6 = "PARQUET_2_6"
108
103
 
109
104
 
110
- class OutputDlS3DataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
105
+ class OutputDlS3DataPageVersion(str, Enum):
111
106
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
112
107
 
113
108
  DATA_PAGE_V1 = "DATA_PAGE_V1"
@@ -279,10 +274,7 @@ class OutputDlS3(BaseModel):
279
274
  r"""Secret key. This value can be a constant or a JavaScript expression. Example: `${C.env.SOME_SECRET}`)"""
280
275
 
281
276
  aws_authentication_method: Annotated[
282
- Annotated[
283
- Optional[OutputDlS3AuthenticationMethod],
284
- PlainValidator(validate_open_enum(False)),
285
- ],
277
+ Optional[OutputDlS3AuthenticationMethod],
286
278
  pydantic.Field(alias="awsAuthenticationMethod"),
287
279
  ] = OutputDlS3AuthenticationMethod.AUTO
288
280
  r"""AWS authentication method. Choose Auto to use IAM roles."""
@@ -291,11 +283,7 @@ class OutputDlS3(BaseModel):
291
283
  r"""S3 service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to S3-compatible endpoint."""
292
284
 
293
285
  signature_version: Annotated[
294
- Annotated[
295
- Optional[OutputDlS3SignatureVersion],
296
- PlainValidator(validate_open_enum(False)),
297
- ],
298
- pydantic.Field(alias="signatureVersion"),
286
+ Optional[OutputDlS3SignatureVersion], pydantic.Field(alias="signatureVersion")
299
287
  ] = OutputDlS3SignatureVersion.V4
300
288
  r"""Signature version to use for signing S3 requests"""
301
289
 
@@ -343,26 +331,17 @@ class OutputDlS3(BaseModel):
343
331
  r"""Prefix to prepend to files before uploading. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `myKeyPrefix-${C.vars.myVar}`"""
344
332
 
345
333
  object_acl: Annotated[
346
- Annotated[
347
- Optional[OutputDlS3ObjectACL], PlainValidator(validate_open_enum(False))
348
- ],
349
- pydantic.Field(alias="objectACL"),
334
+ Optional[OutputDlS3ObjectACL], pydantic.Field(alias="objectACL")
350
335
  ] = OutputDlS3ObjectACL.PRIVATE
351
336
  r"""Object ACL to assign to uploaded objects"""
352
337
 
353
338
  storage_class: Annotated[
354
- Annotated[
355
- Optional[OutputDlS3StorageClass], PlainValidator(validate_open_enum(False))
356
- ],
357
- pydantic.Field(alias="storageClass"),
339
+ Optional[OutputDlS3StorageClass], pydantic.Field(alias="storageClass")
358
340
  ] = None
359
341
  r"""Storage class to select for uploaded objects"""
360
342
 
361
343
  server_side_encryption: Annotated[
362
- Annotated[
363
- Optional[OutputDlS3ServerSideEncryptionForUploadedObjects],
364
- PlainValidator(validate_open_enum(False)),
365
- ],
344
+ Optional[OutputDlS3ServerSideEncryptionForUploadedObjects],
366
345
  pydantic.Field(alias="serverSideEncryption"),
367
346
  ] = None
368
347
 
@@ -375,10 +354,7 @@ class OutputDlS3(BaseModel):
375
354
  r"""Remove empty staging directories after moving files"""
376
355
 
377
356
  format_: Annotated[
378
- Annotated[
379
- Optional[OutputDlS3DataFormat], PlainValidator(validate_open_enum(False))
380
- ],
381
- pydantic.Field(alias="format"),
357
+ Optional[OutputDlS3DataFormat], pydantic.Field(alias="format")
382
358
  ] = OutputDlS3DataFormat.JSON
383
359
  r"""Format of the output data"""
384
360
 
@@ -411,11 +387,7 @@ class OutputDlS3(BaseModel):
411
387
  r"""Buffer size used to write to a file"""
412
388
 
413
389
  on_backpressure: Annotated[
414
- Annotated[
415
- Optional[OutputDlS3BackpressureBehavior],
416
- PlainValidator(validate_open_enum(False)),
417
- ],
418
- pydantic.Field(alias="onBackpressure"),
390
+ Optional[OutputDlS3BackpressureBehavior], pydantic.Field(alias="onBackpressure")
419
391
  ] = OutputDlS3BackpressureBehavior.BLOCK
420
392
  r"""How to handle events when all receivers are exerting backpressure"""
421
393
 
@@ -425,10 +397,7 @@ class OutputDlS3(BaseModel):
425
397
  r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
426
398
 
427
399
  on_disk_full_backpressure: Annotated[
428
- Annotated[
429
- Optional[OutputDlS3DiskSpaceProtection],
430
- PlainValidator(validate_open_enum(False)),
431
- ],
400
+ Optional[OutputDlS3DiskSpaceProtection],
432
401
  pydantic.Field(alias="onDiskFullBackpressure"),
433
402
  ] = OutputDlS3DiskSpaceProtection.BLOCK
434
403
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
@@ -471,17 +440,11 @@ class OutputDlS3(BaseModel):
471
440
  aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
472
441
  r"""Select or create a stored secret that references your access key and secret key"""
473
442
 
474
- compress: Annotated[
475
- Optional[OutputDlS3Compression], PlainValidator(validate_open_enum(False))
476
- ] = OutputDlS3Compression.GZIP
443
+ compress: Optional[OutputDlS3Compression] = OutputDlS3Compression.GZIP
477
444
  r"""Data compression format to apply to HTTP content before it is delivered"""
478
445
 
479
446
  compression_level: Annotated[
480
- Annotated[
481
- Optional[OutputDlS3CompressionLevel],
482
- PlainValidator(validate_open_enum(False)),
483
- ],
484
- pydantic.Field(alias="compressionLevel"),
447
+ Optional[OutputDlS3CompressionLevel], pydantic.Field(alias="compressionLevel")
485
448
  ] = OutputDlS3CompressionLevel.BEST_SPEED
486
449
  r"""Compression level to apply before moving files to final destination"""
487
450
 
@@ -491,19 +454,12 @@ class OutputDlS3(BaseModel):
491
454
  r"""Automatically calculate the schema based on the events of each Parquet file generated"""
492
455
 
493
456
  parquet_version: Annotated[
494
- Annotated[
495
- Optional[OutputDlS3ParquetVersion],
496
- PlainValidator(validate_open_enum(False)),
497
- ],
498
- pydantic.Field(alias="parquetVersion"),
457
+ Optional[OutputDlS3ParquetVersion], pydantic.Field(alias="parquetVersion")
499
458
  ] = OutputDlS3ParquetVersion.PARQUET_2_6
500
459
  r"""Determines which data types are supported and how they are represented"""
501
460
 
502
461
  parquet_data_page_version: Annotated[
503
- Annotated[
504
- Optional[OutputDlS3DataPageVersion],
505
- PlainValidator(validate_open_enum(False)),
506
- ],
462
+ Optional[OutputDlS3DataPageVersion],
507
463
  pydantic.Field(alias="parquetDataPageVersion"),
508
464
  ] = OutputDlS3DataPageVersion.DATA_PAGE_V2
509
465
  r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
@@ -1,12 +1,9 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
@@ -15,7 +12,7 @@ class OutputDynatraceHTTPType(str, Enum):
15
12
  DYNATRACE_HTTP = "dynatrace_http"
16
13
 
17
14
 
18
- class OutputDynatraceHTTPMethod(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputDynatraceHTTPMethod(str, Enum):
19
16
  r"""The method to use when sending events"""
20
17
 
21
18
  POST = "POST"
@@ -34,9 +31,7 @@ class OutputDynatraceHTTPExtraHTTPHeader(BaseModel):
34
31
  name: Optional[str] = None
35
32
 
36
33
 
37
- class OutputDynatraceHTTPFailedRequestLoggingMode(
38
- str, Enum, metaclass=utils.OpenEnumMeta
39
- ):
34
+ class OutputDynatraceHTTPFailedRequestLoggingMode(str, Enum):
40
35
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
41
36
 
42
37
  PAYLOAD = "payload"
@@ -98,7 +93,7 @@ class OutputDynatraceHTTPTimeoutRetrySettings(BaseModel):
98
93
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
99
94
 
100
95
 
101
- class OutputDynatraceHTTPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
96
+ class OutputDynatraceHTTPBackpressureBehavior(str, Enum):
102
97
  r"""How to handle events when all receivers are exerting backpressure"""
103
98
 
104
99
  BLOCK = "block"
@@ -106,44 +101,44 @@ class OutputDynatraceHTTPBackpressureBehavior(str, Enum, metaclass=utils.OpenEnu
106
101
  QUEUE = "queue"
107
102
 
108
103
 
109
- class OutputDynatraceHTTPAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
104
+ class OutputDynatraceHTTPAuthenticationType(str, Enum):
110
105
  TOKEN = "token"
111
106
  TEXT_SECRET = "textSecret"
112
107
 
113
108
 
114
- class OutputDynatraceHTTPFormat(str, Enum, metaclass=utils.OpenEnumMeta):
109
+ class OutputDynatraceHTTPFormat(str, Enum):
115
110
  r"""How to format events before sending. Defaults to JSON. Plaintext is not currently supported."""
116
111
 
117
112
  JSON_ARRAY = "json_array"
118
113
  PLAINTEXT = "plaintext"
119
114
 
120
115
 
121
- class Endpoint(str, Enum, metaclass=utils.OpenEnumMeta):
116
+ class Endpoint(str, Enum):
122
117
  CLOUD = "cloud"
123
118
  ACTIVE_GATE = "activeGate"
124
119
  MANUAL = "manual"
125
120
 
126
121
 
127
- class TelemetryType(str, Enum, metaclass=utils.OpenEnumMeta):
122
+ class TelemetryType(str, Enum):
128
123
  LOGS = "logs"
129
124
  METRICS = "metrics"
130
125
 
131
126
 
132
- class OutputDynatraceHTTPCompression(str, Enum, metaclass=utils.OpenEnumMeta):
127
+ class OutputDynatraceHTTPCompression(str, Enum):
133
128
  r"""Codec to use to compress the persisted data"""
134
129
 
135
130
  NONE = "none"
136
131
  GZIP = "gzip"
137
132
 
138
133
 
139
- class OutputDynatraceHTTPQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
134
+ class OutputDynatraceHTTPQueueFullBehavior(str, Enum):
140
135
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
141
136
 
142
137
  BLOCK = "block"
143
138
  DROP = "drop"
144
139
 
145
140
 
146
- class OutputDynatraceHTTPMode(str, Enum, metaclass=utils.OpenEnumMeta):
141
+ class OutputDynatraceHTTPMode(str, Enum):
147
142
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
148
143
 
149
144
  ERROR = "error"
@@ -266,9 +261,7 @@ class OutputDynatraceHTTP(BaseModel):
266
261
  streamtags: Optional[List[str]] = None
267
262
  r"""Tags for filtering and grouping in @{product}"""
268
263
 
269
- method: Annotated[
270
- Optional[OutputDynatraceHTTPMethod], PlainValidator(validate_open_enum(False))
271
- ] = OutputDynatraceHTTPMethod.POST
264
+ method: Optional[OutputDynatraceHTTPMethod] = OutputDynatraceHTTPMethod.POST
272
265
  r"""The method to use when sending events"""
273
266
 
274
267
  keep_alive: Annotated[Optional[bool], pydantic.Field(alias="keepAlive")] = True
@@ -318,10 +311,7 @@ class OutputDynatraceHTTP(BaseModel):
318
311
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
319
312
 
320
313
  failed_request_logging_mode: Annotated[
321
- Annotated[
322
- Optional[OutputDynatraceHTTPFailedRequestLoggingMode],
323
- PlainValidator(validate_open_enum(False)),
324
- ],
314
+ Optional[OutputDynatraceHTTPFailedRequestLoggingMode],
325
315
  pydantic.Field(alias="failedRequestLoggingMode"),
326
316
  ] = OutputDynatraceHTTPFailedRequestLoggingMode.NONE
327
317
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -348,38 +338,25 @@ class OutputDynatraceHTTP(BaseModel):
348
338
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
349
339
 
350
340
  on_backpressure: Annotated[
351
- Annotated[
352
- Optional[OutputDynatraceHTTPBackpressureBehavior],
353
- PlainValidator(validate_open_enum(False)),
354
- ],
341
+ Optional[OutputDynatraceHTTPBackpressureBehavior],
355
342
  pydantic.Field(alias="onBackpressure"),
356
343
  ] = OutputDynatraceHTTPBackpressureBehavior.BLOCK
357
344
  r"""How to handle events when all receivers are exerting backpressure"""
358
345
 
359
346
  auth_type: Annotated[
360
- Annotated[
361
- Optional[OutputDynatraceHTTPAuthenticationType],
362
- PlainValidator(validate_open_enum(False)),
363
- ],
347
+ Optional[OutputDynatraceHTTPAuthenticationType],
364
348
  pydantic.Field(alias="authType"),
365
349
  ] = OutputDynatraceHTTPAuthenticationType.TOKEN
366
350
 
367
351
  format_: Annotated[
368
- Annotated[
369
- Optional[OutputDynatraceHTTPFormat],
370
- PlainValidator(validate_open_enum(False)),
371
- ],
372
- pydantic.Field(alias="format"),
352
+ Optional[OutputDynatraceHTTPFormat], pydantic.Field(alias="format")
373
353
  ] = OutputDynatraceHTTPFormat.JSON_ARRAY
374
354
  r"""How to format events before sending. Defaults to JSON. Plaintext is not currently supported."""
375
355
 
376
- endpoint: Annotated[
377
- Optional[Endpoint], PlainValidator(validate_open_enum(False))
378
- ] = Endpoint.CLOUD
356
+ endpoint: Optional[Endpoint] = Endpoint.CLOUD
379
357
 
380
358
  telemetry_type: Annotated[
381
- Annotated[Optional[TelemetryType], PlainValidator(validate_open_enum(False))],
382
- pydantic.Field(alias="telemetryType"),
359
+ Optional[TelemetryType], pydantic.Field(alias="telemetryType")
383
360
  ] = TelemetryType.LOGS
384
361
 
385
362
  total_memory_limit_kb: Annotated[
@@ -403,28 +380,18 @@ class OutputDynatraceHTTP(BaseModel):
403
380
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
404
381
 
405
382
  pq_compress: Annotated[
406
- Annotated[
407
- Optional[OutputDynatraceHTTPCompression],
408
- PlainValidator(validate_open_enum(False)),
409
- ],
410
- pydantic.Field(alias="pqCompress"),
383
+ Optional[OutputDynatraceHTTPCompression], pydantic.Field(alias="pqCompress")
411
384
  ] = OutputDynatraceHTTPCompression.NONE
412
385
  r"""Codec to use to compress the persisted data"""
413
386
 
414
387
  pq_on_backpressure: Annotated[
415
- Annotated[
416
- Optional[OutputDynatraceHTTPQueueFullBehavior],
417
- PlainValidator(validate_open_enum(False)),
418
- ],
388
+ Optional[OutputDynatraceHTTPQueueFullBehavior],
419
389
  pydantic.Field(alias="pqOnBackpressure"),
420
390
  ] = OutputDynatraceHTTPQueueFullBehavior.BLOCK
421
391
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
422
392
 
423
393
  pq_mode: Annotated[
424
- Annotated[
425
- Optional[OutputDynatraceHTTPMode], PlainValidator(validate_open_enum(False))
426
- ],
427
- pydantic.Field(alias="pqMode"),
394
+ Optional[OutputDynatraceHTTPMode], pydantic.Field(alias="pqMode")
428
395
  ] = OutputDynatraceHTTPMode.ERROR
429
396
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
430
397