cribl-control-plane 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (144) hide show
  1. cribl_control_plane/_version.py +3 -3
  2. cribl_control_plane/{outputs.py → destinations.py} +69 -71
  3. cribl_control_plane/errors/healthstatus_error.py +2 -8
  4. cribl_control_plane/models/__init__.py +5347 -115
  5. cribl_control_plane/models/createinputop.py +18216 -2
  6. cribl_control_plane/models/createoutputop.py +18417 -4
  7. cribl_control_plane/models/createoutputtestbyidop.py +2 -2
  8. cribl_control_plane/models/deleteoutputbyidop.py +2 -2
  9. cribl_control_plane/models/deleteoutputpqbyidop.py +2 -2
  10. cribl_control_plane/models/getoutputbyidop.py +2 -2
  11. cribl_control_plane/models/getoutputpqbyidop.py +2 -2
  12. cribl_control_plane/models/getoutputsamplesbyidop.py +2 -2
  13. cribl_control_plane/models/healthstatus.py +4 -7
  14. cribl_control_plane/models/inputappscope.py +16 -36
  15. cribl_control_plane/models/inputazureblob.py +8 -19
  16. cribl_control_plane/models/inputcollection.py +6 -15
  17. cribl_control_plane/models/inputconfluentcloud.py +22 -45
  18. cribl_control_plane/models/inputcribl.py +6 -13
  19. cribl_control_plane/models/inputcriblhttp.py +12 -27
  20. cribl_control_plane/models/inputcribllakehttp.py +14 -26
  21. cribl_control_plane/models/inputcriblmetrics.py +6 -14
  22. cribl_control_plane/models/inputcribltcp.py +12 -27
  23. cribl_control_plane/models/inputcrowdstrike.py +12 -28
  24. cribl_control_plane/models/inputdatadogagent.py +12 -28
  25. cribl_control_plane/models/inputdatagen.py +6 -13
  26. cribl_control_plane/models/inputedgeprometheus.py +33 -64
  27. cribl_control_plane/models/inputelastic.py +18 -44
  28. cribl_control_plane/models/inputeventhub.py +10 -19
  29. cribl_control_plane/models/inputexec.py +8 -16
  30. cribl_control_plane/models/inputfile.py +8 -17
  31. cribl_control_plane/models/inputfirehose.py +12 -27
  32. cribl_control_plane/models/inputgooglepubsub.py +10 -23
  33. cribl_control_plane/models/inputgrafana_union.py +39 -81
  34. cribl_control_plane/models/inputhttp.py +12 -27
  35. cribl_control_plane/models/inputhttpraw.py +12 -27
  36. cribl_control_plane/models/inputjournalfiles.py +8 -16
  37. cribl_control_plane/models/inputkafka.py +18 -45
  38. cribl_control_plane/models/inputkinesis.py +18 -42
  39. cribl_control_plane/models/inputkubeevents.py +6 -13
  40. cribl_control_plane/models/inputkubelogs.py +10 -18
  41. cribl_control_plane/models/inputkubemetrics.py +10 -18
  42. cribl_control_plane/models/inputloki.py +14 -33
  43. cribl_control_plane/models/inputmetrics.py +10 -25
  44. cribl_control_plane/models/inputmodeldriventelemetry.py +14 -33
  45. cribl_control_plane/models/inputmsk.py +20 -52
  46. cribl_control_plane/models/inputnetflow.py +8 -15
  47. cribl_control_plane/models/inputoffice365mgmt.py +18 -37
  48. cribl_control_plane/models/inputoffice365msgtrace.py +20 -41
  49. cribl_control_plane/models/inputoffice365service.py +20 -41
  50. cribl_control_plane/models/inputopentelemetry.py +20 -42
  51. cribl_control_plane/models/inputprometheus.py +22 -54
  52. cribl_control_plane/models/inputprometheusrw.py +14 -34
  53. cribl_control_plane/models/inputrawudp.py +8 -15
  54. cribl_control_plane/models/inputs3.py +10 -23
  55. cribl_control_plane/models/inputs3inventory.py +12 -28
  56. cribl_control_plane/models/inputsecuritylake.py +12 -29
  57. cribl_control_plane/models/inputsnmp.py +10 -20
  58. cribl_control_plane/models/inputsplunk.py +16 -37
  59. cribl_control_plane/models/inputsplunkhec.py +14 -33
  60. cribl_control_plane/models/inputsplunksearch.py +18 -37
  61. cribl_control_plane/models/inputsqs.py +14 -31
  62. cribl_control_plane/models/inputsyslog_union.py +29 -53
  63. cribl_control_plane/models/inputsystemmetrics.py +26 -50
  64. cribl_control_plane/models/inputsystemstate.py +10 -18
  65. cribl_control_plane/models/inputtcp.py +14 -33
  66. cribl_control_plane/models/inputtcpjson.py +14 -33
  67. cribl_control_plane/models/inputwef.py +22 -45
  68. cribl_control_plane/models/inputwindowsmetrics.py +26 -46
  69. cribl_control_plane/models/inputwineventlogs.py +12 -22
  70. cribl_control_plane/models/inputwiz.py +12 -25
  71. cribl_control_plane/models/inputzscalerhec.py +14 -33
  72. cribl_control_plane/models/listoutputop.py +2 -2
  73. cribl_control_plane/models/output.py +3 -6
  74. cribl_control_plane/models/outputazureblob.py +20 -52
  75. cribl_control_plane/models/outputazuredataexplorer.py +30 -77
  76. cribl_control_plane/models/outputazureeventhub.py +20 -44
  77. cribl_control_plane/models/outputazurelogs.py +14 -37
  78. cribl_control_plane/models/outputclickhouse.py +22 -59
  79. cribl_control_plane/models/outputcloudwatch.py +12 -33
  80. cribl_control_plane/models/outputconfluentcloud.py +32 -75
  81. cribl_control_plane/models/outputcriblhttp.py +18 -46
  82. cribl_control_plane/models/outputcribllake.py +18 -48
  83. cribl_control_plane/models/outputcribltcp.py +20 -47
  84. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +16 -54
  85. cribl_control_plane/models/outputdatadog.py +22 -50
  86. cribl_control_plane/models/outputdataset.py +20 -48
  87. cribl_control_plane/models/outputdefault.py +2 -5
  88. cribl_control_plane/models/outputdevnull.py +2 -5
  89. cribl_control_plane/models/outputdiskspool.py +4 -9
  90. cribl_control_plane/models/outputdls3.py +26 -72
  91. cribl_control_plane/models/outputdynatracehttp.py +22 -57
  92. cribl_control_plane/models/outputdynatraceotlp.py +24 -59
  93. cribl_control_plane/models/outputelastic.py +20 -45
  94. cribl_control_plane/models/outputelasticcloud.py +14 -40
  95. cribl_control_plane/models/outputexabeam.py +12 -33
  96. cribl_control_plane/models/outputfilesystem.py +16 -41
  97. cribl_control_plane/models/outputgooglechronicle.py +18 -54
  98. cribl_control_plane/models/outputgooglecloudlogging.py +16 -46
  99. cribl_control_plane/models/outputgooglecloudstorage.py +26 -71
  100. cribl_control_plane/models/outputgooglepubsub.py +16 -39
  101. cribl_control_plane/models/{outputgrafanacloud_union.py → outputgrafanacloud.py} +49 -110
  102. cribl_control_plane/models/outputgraphite.py +16 -35
  103. cribl_control_plane/models/outputhoneycomb.py +14 -37
  104. cribl_control_plane/models/outputhumiohec.py +18 -47
  105. cribl_control_plane/models/outputinfluxdb.py +18 -44
  106. cribl_control_plane/models/outputkafka.py +28 -73
  107. cribl_control_plane/models/outputkinesis.py +18 -44
  108. cribl_control_plane/models/outputloki.py +18 -43
  109. cribl_control_plane/models/outputminio.py +26 -69
  110. cribl_control_plane/models/outputmsk.py +30 -81
  111. cribl_control_plane/models/outputnetflow.py +2 -5
  112. cribl_control_plane/models/outputnewrelic.py +20 -45
  113. cribl_control_plane/models/outputnewrelicevents.py +16 -45
  114. cribl_control_plane/models/outputopentelemetry.py +28 -69
  115. cribl_control_plane/models/outputprometheus.py +14 -37
  116. cribl_control_plane/models/outputring.py +10 -21
  117. cribl_control_plane/models/outputrouter.py +2 -5
  118. cribl_control_plane/models/outputs3.py +28 -72
  119. cribl_control_plane/models/outputsecuritylake.py +20 -56
  120. cribl_control_plane/models/outputsentinel.py +20 -49
  121. cribl_control_plane/models/outputsentineloneaisiem.py +20 -54
  122. cribl_control_plane/models/outputservicenow.py +26 -64
  123. cribl_control_plane/models/outputsignalfx.py +16 -39
  124. cribl_control_plane/models/outputsnmp.py +2 -5
  125. cribl_control_plane/models/outputsns.py +16 -40
  126. cribl_control_plane/models/outputsplunk.py +26 -64
  127. cribl_control_plane/models/outputsplunkhec.py +14 -37
  128. cribl_control_plane/models/outputsplunklb.py +36 -83
  129. cribl_control_plane/models/outputsqs.py +18 -45
  130. cribl_control_plane/models/outputstatsd.py +16 -34
  131. cribl_control_plane/models/outputstatsdext.py +14 -33
  132. cribl_control_plane/models/outputsumologic.py +14 -37
  133. cribl_control_plane/models/outputsyslog.py +26 -60
  134. cribl_control_plane/models/outputtcpjson.py +22 -54
  135. cribl_control_plane/models/outputwavefront.py +14 -37
  136. cribl_control_plane/models/outputwebhook.py +24 -60
  137. cribl_control_plane/models/outputxsiam.py +16 -37
  138. cribl_control_plane/models/updateoutputbyidop.py +4 -4
  139. cribl_control_plane/sdk.py +3 -5
  140. cribl_control_plane/sources.py +8 -10
  141. {cribl_control_plane-0.0.15.dist-info → cribl_control_plane-0.0.17.dist-info}/METADATA +13 -13
  142. cribl_control_plane-0.0.17.dist-info/RECORD +215 -0
  143. cribl_control_plane-0.0.15.dist-info/RECORD +0 -215
  144. {cribl_control_plane-0.0.15.dist-info → cribl_control_plane-0.0.17.dist-info}/WHEEL +0 -0
@@ -1,21 +1,18 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
13
10
 
14
- class OutputNewrelicType(str, Enum, metaclass=utils.OpenEnumMeta):
11
+ class OutputNewrelicType(str, Enum):
15
12
  NEWRELIC = "newrelic"
16
13
 
17
14
 
18
- class OutputNewrelicRegion(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputNewrelicRegion(str, Enum):
19
16
  r"""Which New Relic region endpoint to use."""
20
17
 
21
18
  US = "US"
@@ -23,7 +20,7 @@ class OutputNewrelicRegion(str, Enum, metaclass=utils.OpenEnumMeta):
23
20
  CUSTOM = "Custom"
24
21
 
25
22
 
26
- class FieldName(str, Enum, metaclass=utils.OpenEnumMeta):
23
+ class FieldName(str, Enum):
27
24
  SERVICE = "service"
28
25
  HOSTNAME = "hostname"
29
26
  TIMESTAMP = "timestamp"
@@ -37,7 +34,7 @@ class OutputNewrelicMetadatumTypedDict(TypedDict):
37
34
 
38
35
 
39
36
  class OutputNewrelicMetadatum(BaseModel):
40
- name: Annotated[FieldName, PlainValidator(validate_open_enum(False))]
37
+ name: FieldName
41
38
 
42
39
  value: str
43
40
  r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
@@ -54,7 +51,7 @@ class OutputNewrelicExtraHTTPHeader(BaseModel):
54
51
  name: Optional[str] = None
55
52
 
56
53
 
57
- class OutputNewrelicFailedRequestLoggingMode(str, Enum, metaclass=utils.OpenEnumMeta):
54
+ class OutputNewrelicFailedRequestLoggingMode(str, Enum):
58
55
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
59
56
 
60
57
  PAYLOAD = "payload"
@@ -116,7 +113,7 @@ class OutputNewrelicTimeoutRetrySettings(BaseModel):
116
113
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
117
114
 
118
115
 
119
- class OutputNewrelicBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
116
+ class OutputNewrelicBackpressureBehavior(str, Enum):
120
117
  r"""How to handle events when all receivers are exerting backpressure"""
121
118
 
122
119
  BLOCK = "block"
@@ -124,28 +121,28 @@ class OutputNewrelicBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta
124
121
  QUEUE = "queue"
125
122
 
126
123
 
127
- class OutputNewrelicAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
124
+ class OutputNewrelicAuthenticationMethod(str, Enum):
128
125
  r"""Enter API key directly, or select a stored secret"""
129
126
 
130
127
  MANUAL = "manual"
131
128
  SECRET = "secret"
132
129
 
133
130
 
134
- class OutputNewrelicCompression(str, Enum, metaclass=utils.OpenEnumMeta):
131
+ class OutputNewrelicCompression(str, Enum):
135
132
  r"""Codec to use to compress the persisted data"""
136
133
 
137
134
  NONE = "none"
138
135
  GZIP = "gzip"
139
136
 
140
137
 
141
- class OutputNewrelicQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
138
+ class OutputNewrelicQueueFullBehavior(str, Enum):
142
139
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
143
140
 
144
141
  BLOCK = "block"
145
142
  DROP = "drop"
146
143
 
147
144
 
148
- class OutputNewrelicMode(str, Enum, metaclass=utils.OpenEnumMeta):
145
+ class OutputNewrelicMode(str, Enum):
149
146
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
150
147
 
151
148
  ERROR = "error"
@@ -244,7 +241,7 @@ class OutputNewrelic(BaseModel):
244
241
  id: str
245
242
  r"""Unique ID for this output"""
246
243
 
247
- type: Annotated[OutputNewrelicType, PlainValidator(validate_open_enum(False))]
244
+ type: OutputNewrelicType
248
245
 
249
246
  pipeline: Optional[str] = None
250
247
  r"""Pipeline to process data before sending out to this output"""
@@ -260,9 +257,7 @@ class OutputNewrelic(BaseModel):
260
257
  streamtags: Optional[List[str]] = None
261
258
  r"""Tags for filtering and grouping in @{product}"""
262
259
 
263
- region: Annotated[
264
- Optional[OutputNewrelicRegion], PlainValidator(validate_open_enum(False))
265
- ] = OutputNewrelicRegion.US
260
+ region: Optional[OutputNewrelicRegion] = OutputNewrelicRegion.US
266
261
  r"""Which New Relic region endpoint to use."""
267
262
 
268
263
  log_type: Annotated[Optional[str], pydantic.Field(alias="logType")] = ""
@@ -318,10 +313,7 @@ class OutputNewrelic(BaseModel):
318
313
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
319
314
 
320
315
  failed_request_logging_mode: Annotated[
321
- Annotated[
322
- Optional[OutputNewrelicFailedRequestLoggingMode],
323
- PlainValidator(validate_open_enum(False)),
324
- ],
316
+ Optional[OutputNewrelicFailedRequestLoggingMode],
325
317
  pydantic.Field(alias="failedRequestLoggingMode"),
326
318
  ] = OutputNewrelicFailedRequestLoggingMode.NONE
327
319
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -348,20 +340,13 @@ class OutputNewrelic(BaseModel):
348
340
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
349
341
 
350
342
  on_backpressure: Annotated[
351
- Annotated[
352
- Optional[OutputNewrelicBackpressureBehavior],
353
- PlainValidator(validate_open_enum(False)),
354
- ],
343
+ Optional[OutputNewrelicBackpressureBehavior],
355
344
  pydantic.Field(alias="onBackpressure"),
356
345
  ] = OutputNewrelicBackpressureBehavior.BLOCK
357
346
  r"""How to handle events when all receivers are exerting backpressure"""
358
347
 
359
348
  auth_type: Annotated[
360
- Annotated[
361
- Optional[OutputNewrelicAuthenticationMethod],
362
- PlainValidator(validate_open_enum(False)),
363
- ],
364
- pydantic.Field(alias="authType"),
349
+ Optional[OutputNewrelicAuthenticationMethod], pydantic.Field(alias="authType")
365
350
  ] = OutputNewrelicAuthenticationMethod.MANUAL
366
351
  r"""Enter API key directly, or select a stored secret"""
367
352
 
@@ -388,29 +373,19 @@ class OutputNewrelic(BaseModel):
388
373
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
389
374
 
390
375
  pq_compress: Annotated[
391
- Annotated[
392
- Optional[OutputNewrelicCompression],
393
- PlainValidator(validate_open_enum(False)),
394
- ],
395
- pydantic.Field(alias="pqCompress"),
376
+ Optional[OutputNewrelicCompression], pydantic.Field(alias="pqCompress")
396
377
  ] = OutputNewrelicCompression.NONE
397
378
  r"""Codec to use to compress the persisted data"""
398
379
 
399
380
  pq_on_backpressure: Annotated[
400
- Annotated[
401
- Optional[OutputNewrelicQueueFullBehavior],
402
- PlainValidator(validate_open_enum(False)),
403
- ],
381
+ Optional[OutputNewrelicQueueFullBehavior],
404
382
  pydantic.Field(alias="pqOnBackpressure"),
405
383
  ] = OutputNewrelicQueueFullBehavior.BLOCK
406
384
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
407
385
 
408
- pq_mode: Annotated[
409
- Annotated[
410
- Optional[OutputNewrelicMode], PlainValidator(validate_open_enum(False))
411
- ],
412
- pydantic.Field(alias="pqMode"),
413
- ] = OutputNewrelicMode.ERROR
386
+ pq_mode: Annotated[Optional[OutputNewrelicMode], pydantic.Field(alias="pqMode")] = (
387
+ OutputNewrelicMode.ERROR
388
+ )
414
389
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
415
390
 
416
391
  pq_controls: Annotated[
@@ -1,21 +1,18 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
13
10
 
14
- class OutputNewrelicEventsType(str, Enum, metaclass=utils.OpenEnumMeta):
11
+ class OutputNewrelicEventsType(str, Enum):
15
12
  NEWRELIC_EVENTS = "newrelic_events"
16
13
 
17
14
 
18
- class OutputNewrelicEventsRegion(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputNewrelicEventsRegion(str, Enum):
19
16
  r"""Which New Relic region endpoint to use."""
20
17
 
21
18
  US = "US"
@@ -34,9 +31,7 @@ class OutputNewrelicEventsExtraHTTPHeader(BaseModel):
34
31
  name: Optional[str] = None
35
32
 
36
33
 
37
- class OutputNewrelicEventsFailedRequestLoggingMode(
38
- str, Enum, metaclass=utils.OpenEnumMeta
39
- ):
34
+ class OutputNewrelicEventsFailedRequestLoggingMode(str, Enum):
40
35
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
41
36
 
42
37
  PAYLOAD = "payload"
@@ -98,7 +93,7 @@ class OutputNewrelicEventsTimeoutRetrySettings(BaseModel):
98
93
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
99
94
 
100
95
 
101
- class OutputNewrelicEventsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
96
+ class OutputNewrelicEventsBackpressureBehavior(str, Enum):
102
97
  r"""How to handle events when all receivers are exerting backpressure"""
103
98
 
104
99
  BLOCK = "block"
@@ -106,28 +101,28 @@ class OutputNewrelicEventsBackpressureBehavior(str, Enum, metaclass=utils.OpenEn
106
101
  QUEUE = "queue"
107
102
 
108
103
 
109
- class OutputNewrelicEventsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
104
+ class OutputNewrelicEventsAuthenticationMethod(str, Enum):
110
105
  r"""Enter API key directly, or select a stored secret"""
111
106
 
112
107
  MANUAL = "manual"
113
108
  SECRET = "secret"
114
109
 
115
110
 
116
- class OutputNewrelicEventsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
111
+ class OutputNewrelicEventsCompression(str, Enum):
117
112
  r"""Codec to use to compress the persisted data"""
118
113
 
119
114
  NONE = "none"
120
115
  GZIP = "gzip"
121
116
 
122
117
 
123
- class OutputNewrelicEventsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
118
+ class OutputNewrelicEventsQueueFullBehavior(str, Enum):
124
119
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
125
120
 
126
121
  BLOCK = "block"
127
122
  DROP = "drop"
128
123
 
129
124
 
130
- class OutputNewrelicEventsMode(str, Enum, metaclass=utils.OpenEnumMeta):
125
+ class OutputNewrelicEventsMode(str, Enum):
131
126
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
132
127
 
133
128
  ERROR = "error"
@@ -232,9 +227,7 @@ class OutputNewrelicEvents(BaseModel):
232
227
  id: Optional[str] = None
233
228
  r"""Unique ID for this output"""
234
229
 
235
- type: Annotated[
236
- Optional[OutputNewrelicEventsType], PlainValidator(validate_open_enum(False))
237
- ] = None
230
+ type: Optional[OutputNewrelicEventsType] = None
238
231
 
239
232
  pipeline: Optional[str] = None
240
233
  r"""Pipeline to process data before sending out to this output"""
@@ -250,9 +243,7 @@ class OutputNewrelicEvents(BaseModel):
250
243
  streamtags: Optional[List[str]] = None
251
244
  r"""Tags for filtering and grouping in @{product}"""
252
245
 
253
- region: Annotated[
254
- Optional[OutputNewrelicEventsRegion], PlainValidator(validate_open_enum(False))
255
- ] = OutputNewrelicEventsRegion.US
246
+ region: Optional[OutputNewrelicEventsRegion] = OutputNewrelicEventsRegion.US
256
247
  r"""Which New Relic region endpoint to use."""
257
248
 
258
249
  concurrency: Optional[float] = 5
@@ -299,10 +290,7 @@ class OutputNewrelicEvents(BaseModel):
299
290
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
300
291
 
301
292
  failed_request_logging_mode: Annotated[
302
- Annotated[
303
- Optional[OutputNewrelicEventsFailedRequestLoggingMode],
304
- PlainValidator(validate_open_enum(False)),
305
- ],
293
+ Optional[OutputNewrelicEventsFailedRequestLoggingMode],
306
294
  pydantic.Field(alias="failedRequestLoggingMode"),
307
295
  ] = OutputNewrelicEventsFailedRequestLoggingMode.NONE
308
296
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -329,19 +317,13 @@ class OutputNewrelicEvents(BaseModel):
329
317
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
330
318
 
331
319
  on_backpressure: Annotated[
332
- Annotated[
333
- Optional[OutputNewrelicEventsBackpressureBehavior],
334
- PlainValidator(validate_open_enum(False)),
335
- ],
320
+ Optional[OutputNewrelicEventsBackpressureBehavior],
336
321
  pydantic.Field(alias="onBackpressure"),
337
322
  ] = OutputNewrelicEventsBackpressureBehavior.BLOCK
338
323
  r"""How to handle events when all receivers are exerting backpressure"""
339
324
 
340
325
  auth_type: Annotated[
341
- Annotated[
342
- Optional[OutputNewrelicEventsAuthenticationMethod],
343
- PlainValidator(validate_open_enum(False)),
344
- ],
326
+ Optional[OutputNewrelicEventsAuthenticationMethod],
345
327
  pydantic.Field(alias="authType"),
346
328
  ] = OutputNewrelicEventsAuthenticationMethod.MANUAL
347
329
  r"""Enter API key directly, or select a stored secret"""
@@ -364,29 +346,18 @@ class OutputNewrelicEvents(BaseModel):
364
346
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
365
347
 
366
348
  pq_compress: Annotated[
367
- Annotated[
368
- Optional[OutputNewrelicEventsCompression],
369
- PlainValidator(validate_open_enum(False)),
370
- ],
371
- pydantic.Field(alias="pqCompress"),
349
+ Optional[OutputNewrelicEventsCompression], pydantic.Field(alias="pqCompress")
372
350
  ] = OutputNewrelicEventsCompression.NONE
373
351
  r"""Codec to use to compress the persisted data"""
374
352
 
375
353
  pq_on_backpressure: Annotated[
376
- Annotated[
377
- Optional[OutputNewrelicEventsQueueFullBehavior],
378
- PlainValidator(validate_open_enum(False)),
379
- ],
354
+ Optional[OutputNewrelicEventsQueueFullBehavior],
380
355
  pydantic.Field(alias="pqOnBackpressure"),
381
356
  ] = OutputNewrelicEventsQueueFullBehavior.BLOCK
382
357
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
383
358
 
384
359
  pq_mode: Annotated[
385
- Annotated[
386
- Optional[OutputNewrelicEventsMode],
387
- PlainValidator(validate_open_enum(False)),
388
- ],
389
- pydantic.Field(alias="pqMode"),
360
+ Optional[OutputNewrelicEventsMode], pydantic.Field(alias="pqMode")
390
361
  ] = OutputNewrelicEventsMode.ERROR
391
362
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
392
363
 
@@ -1,35 +1,32 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
5
4
  from cribl_control_plane.types import BaseModel
6
- from cribl_control_plane.utils import validate_open_enum
7
5
  from enum import Enum
8
6
  import pydantic
9
- from pydantic.functional_validators import PlainValidator
10
7
  from typing import List, Optional
11
8
  from typing_extensions import Annotated, NotRequired, TypedDict
12
9
 
13
10
 
14
- class OutputOpenTelemetryType(str, Enum, metaclass=utils.OpenEnumMeta):
11
+ class OutputOpenTelemetryType(str, Enum):
15
12
  OPEN_TELEMETRY = "open_telemetry"
16
13
 
17
14
 
18
- class OutputOpenTelemetryProtocol(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ class OutputOpenTelemetryProtocol(str, Enum):
19
16
  r"""Select a transport option for OpenTelemetry"""
20
17
 
21
18
  GRPC = "grpc"
22
19
  HTTP = "http"
23
20
 
24
21
 
25
- class OutputOpenTelemetryOTLPVersion(str, Enum, metaclass=utils.OpenEnumMeta):
22
+ class OutputOpenTelemetryOTLPVersion(str, Enum):
26
23
  r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
27
24
 
28
25
  ZERO_DOT_10_DOT_0 = "0.10.0"
29
26
  ONE_DOT_3_DOT_1 = "1.3.1"
30
27
 
31
28
 
32
- class OutputOpenTelemetryCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
29
+ class OutputOpenTelemetryCompressCompression(str, Enum):
33
30
  r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
34
31
 
35
32
  NONE = "none"
@@ -37,16 +34,14 @@ class OutputOpenTelemetryCompressCompression(str, Enum, metaclass=utils.OpenEnum
37
34
  GZIP = "gzip"
38
35
 
39
36
 
40
- class OutputOpenTelemetryHTTPCompressCompression(
41
- str, Enum, metaclass=utils.OpenEnumMeta
42
- ):
37
+ class OutputOpenTelemetryHTTPCompressCompression(str, Enum):
43
38
  r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
44
39
 
45
40
  NONE = "none"
46
41
  GZIP = "gzip"
47
42
 
48
43
 
49
- class OutputOpenTelemetryAuthenticationType(str, Enum, metaclass=utils.OpenEnumMeta):
44
+ class OutputOpenTelemetryAuthenticationType(str, Enum):
50
45
  r"""OpenTelemetry authentication type"""
51
46
 
52
47
  NONE = "none"
@@ -68,9 +63,7 @@ class OutputOpenTelemetryMetadatum(BaseModel):
68
63
  key: Optional[str] = ""
69
64
 
70
65
 
71
- class OutputOpenTelemetryFailedRequestLoggingMode(
72
- str, Enum, metaclass=utils.OpenEnumMeta
73
- ):
66
+ class OutputOpenTelemetryFailedRequestLoggingMode(str, Enum):
74
67
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
75
68
 
76
69
  PAYLOAD = "payload"
@@ -78,7 +71,7 @@ class OutputOpenTelemetryFailedRequestLoggingMode(
78
71
  NONE = "none"
79
72
 
80
73
 
81
- class OutputOpenTelemetryBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
74
+ class OutputOpenTelemetryBackpressureBehavior(str, Enum):
82
75
  r"""How to handle events when all receivers are exerting backpressure"""
83
76
 
84
77
  BLOCK = "block"
@@ -181,14 +174,14 @@ class OutputOpenTelemetryTimeoutRetrySettings(BaseModel):
181
174
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
182
175
 
183
176
 
184
- class OutputOpenTelemetryMinimumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
177
+ class OutputOpenTelemetryMinimumTLSVersion(str, Enum):
185
178
  TL_SV1 = "TLSv1"
186
179
  TL_SV1_1 = "TLSv1.1"
187
180
  TL_SV1_2 = "TLSv1.2"
188
181
  TL_SV1_3 = "TLSv1.3"
189
182
 
190
183
 
191
- class OutputOpenTelemetryMaximumTLSVersion(str, Enum, metaclass=utils.OpenEnumMeta):
184
+ class OutputOpenTelemetryMaximumTLSVersion(str, Enum):
192
185
  TL_SV1 = "TLSv1"
193
186
  TL_SV1_1 = "TLSv1.1"
194
187
  TL_SV1_2 = "TLSv1.2"
@@ -243,37 +236,31 @@ class OutputOpenTelemetryTLSSettingsClientSide(BaseModel):
243
236
  r"""Passphrase to use to decrypt private key"""
244
237
 
245
238
  min_version: Annotated[
246
- Annotated[
247
- Optional[OutputOpenTelemetryMinimumTLSVersion],
248
- PlainValidator(validate_open_enum(False)),
249
- ],
239
+ Optional[OutputOpenTelemetryMinimumTLSVersion],
250
240
  pydantic.Field(alias="minVersion"),
251
241
  ] = None
252
242
 
253
243
  max_version: Annotated[
254
- Annotated[
255
- Optional[OutputOpenTelemetryMaximumTLSVersion],
256
- PlainValidator(validate_open_enum(False)),
257
- ],
244
+ Optional[OutputOpenTelemetryMaximumTLSVersion],
258
245
  pydantic.Field(alias="maxVersion"),
259
246
  ] = None
260
247
 
261
248
 
262
- class OutputOpenTelemetryPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
249
+ class OutputOpenTelemetryPqCompressCompression(str, Enum):
263
250
  r"""Codec to use to compress the persisted data"""
264
251
 
265
252
  NONE = "none"
266
253
  GZIP = "gzip"
267
254
 
268
255
 
269
- class OutputOpenTelemetryQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
256
+ class OutputOpenTelemetryQueueFullBehavior(str, Enum):
270
257
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
271
258
 
272
259
  BLOCK = "block"
273
260
  DROP = "drop"
274
261
 
275
262
 
276
- class OutputOpenTelemetryMode(str, Enum, metaclass=utils.OpenEnumMeta):
263
+ class OutputOpenTelemetryMode(str, Enum):
277
264
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
278
265
 
279
266
  ERROR = "error"
@@ -403,7 +390,7 @@ class OutputOpenTelemetryTypedDict(TypedDict):
403
390
 
404
391
 
405
392
  class OutputOpenTelemetry(BaseModel):
406
- type: Annotated[OutputOpenTelemetryType, PlainValidator(validate_open_enum(False))]
393
+ type: OutputOpenTelemetryType
407
394
 
408
395
  endpoint: str
409
396
  r"""The endpoint where OTel events will be sent. Enter any valid URL or an IP address (IPv4 or IPv6; enclose IPv6 addresses in square brackets). Unspecified ports will default to 4317, unless the endpoint is an HTTPS-based URL or TLS is enabled, in which case 443 will be used."""
@@ -425,40 +412,27 @@ class OutputOpenTelemetry(BaseModel):
425
412
  streamtags: Optional[List[str]] = None
426
413
  r"""Tags for filtering and grouping in @{product}"""
427
414
 
428
- protocol: Annotated[
429
- Optional[OutputOpenTelemetryProtocol], PlainValidator(validate_open_enum(False))
430
- ] = OutputOpenTelemetryProtocol.GRPC
415
+ protocol: Optional[OutputOpenTelemetryProtocol] = OutputOpenTelemetryProtocol.GRPC
431
416
  r"""Select a transport option for OpenTelemetry"""
432
417
 
433
418
  otlp_version: Annotated[
434
- Annotated[
435
- Optional[OutputOpenTelemetryOTLPVersion],
436
- PlainValidator(validate_open_enum(False)),
437
- ],
438
- pydantic.Field(alias="otlpVersion"),
419
+ Optional[OutputOpenTelemetryOTLPVersion], pydantic.Field(alias="otlpVersion")
439
420
  ] = OutputOpenTelemetryOTLPVersion.ZERO_DOT_10_DOT_0
440
421
  r"""The version of OTLP Protobuf definitions to use when structuring data to send"""
441
422
 
442
- compress: Annotated[
443
- Optional[OutputOpenTelemetryCompressCompression],
444
- PlainValidator(validate_open_enum(False)),
445
- ] = OutputOpenTelemetryCompressCompression.GZIP
423
+ compress: Optional[OutputOpenTelemetryCompressCompression] = (
424
+ OutputOpenTelemetryCompressCompression.GZIP
425
+ )
446
426
  r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
447
427
 
448
428
  http_compress: Annotated[
449
- Annotated[
450
- Optional[OutputOpenTelemetryHTTPCompressCompression],
451
- PlainValidator(validate_open_enum(False)),
452
- ],
429
+ Optional[OutputOpenTelemetryHTTPCompressCompression],
453
430
  pydantic.Field(alias="httpCompress"),
454
431
  ] = OutputOpenTelemetryHTTPCompressCompression.GZIP
455
432
  r"""Type of compression to apply to messages sent to the OpenTelemetry endpoint"""
456
433
 
457
434
  auth_type: Annotated[
458
- Annotated[
459
- Optional[OutputOpenTelemetryAuthenticationType],
460
- PlainValidator(validate_open_enum(False)),
461
- ],
435
+ Optional[OutputOpenTelemetryAuthenticationType],
462
436
  pydantic.Field(alias="authType"),
463
437
  ] = OutputOpenTelemetryAuthenticationType.NONE
464
438
  r"""OpenTelemetry authentication type"""
@@ -498,10 +472,7 @@ class OutputOpenTelemetry(BaseModel):
498
472
  r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Body size limit."""
499
473
 
500
474
  failed_request_logging_mode: Annotated[
501
- Annotated[
502
- Optional[OutputOpenTelemetryFailedRequestLoggingMode],
503
- PlainValidator(validate_open_enum(False)),
504
- ],
475
+ Optional[OutputOpenTelemetryFailedRequestLoggingMode],
505
476
  pydantic.Field(alias="failedRequestLoggingMode"),
506
477
  ] = OutputOpenTelemetryFailedRequestLoggingMode.NONE
507
478
  r"""Data to log when a request fails. All headers are redacted by default, unless listed as safe headers below."""
@@ -520,10 +491,7 @@ class OutputOpenTelemetry(BaseModel):
520
491
  r"""Disable to close the connection immediately after sending the outgoing request"""
521
492
 
522
493
  on_backpressure: Annotated[
523
- Annotated[
524
- Optional[OutputOpenTelemetryBackpressureBehavior],
525
- PlainValidator(validate_open_enum(False)),
526
- ],
494
+ Optional[OutputOpenTelemetryBackpressureBehavior],
527
495
  pydantic.Field(alias="onBackpressure"),
528
496
  ] = OutputOpenTelemetryBackpressureBehavior.BLOCK
529
497
  r"""How to handle events when all receivers are exerting backpressure"""
@@ -639,28 +607,19 @@ class OutputOpenTelemetry(BaseModel):
639
607
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
640
608
 
641
609
  pq_compress: Annotated[
642
- Annotated[
643
- Optional[OutputOpenTelemetryPqCompressCompression],
644
- PlainValidator(validate_open_enum(False)),
645
- ],
610
+ Optional[OutputOpenTelemetryPqCompressCompression],
646
611
  pydantic.Field(alias="pqCompress"),
647
612
  ] = OutputOpenTelemetryPqCompressCompression.NONE
648
613
  r"""Codec to use to compress the persisted data"""
649
614
 
650
615
  pq_on_backpressure: Annotated[
651
- Annotated[
652
- Optional[OutputOpenTelemetryQueueFullBehavior],
653
- PlainValidator(validate_open_enum(False)),
654
- ],
616
+ Optional[OutputOpenTelemetryQueueFullBehavior],
655
617
  pydantic.Field(alias="pqOnBackpressure"),
656
618
  ] = OutputOpenTelemetryQueueFullBehavior.BLOCK
657
619
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
658
620
 
659
621
  pq_mode: Annotated[
660
- Annotated[
661
- Optional[OutputOpenTelemetryMode], PlainValidator(validate_open_enum(False))
662
- ],
663
- pydantic.Field(alias="pqMode"),
622
+ Optional[OutputOpenTelemetryMode], pydantic.Field(alias="pqMode")
664
623
  ] = OutputOpenTelemetryMode.ERROR
665
624
  r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
666
625