cribl-control-plane 0.3.0b3__py3-none-any.whl → 0.3.0b12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (158) hide show
  1. cribl_control_plane/_version.py +4 -4
  2. cribl_control_plane/groups_sdk.py +2 -2
  3. cribl_control_plane/lakedatasets.py +28 -0
  4. cribl_control_plane/models/__init__.py +124 -5
  5. cribl_control_plane/models/cacheconnection.py +20 -0
  6. cribl_control_plane/models/configgroup.py +20 -1
  7. cribl_control_plane/models/configgroupcloud.py +11 -1
  8. cribl_control_plane/models/createconfiggroupbyproductop.py +13 -2
  9. cribl_control_plane/models/cribllakedataset.py +15 -1
  10. cribl_control_plane/models/cribllakedatasetupdate.py +15 -1
  11. cribl_control_plane/models/datasetmetadata.py +11 -1
  12. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +11 -0
  13. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +20 -0
  14. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +20 -0
  15. cribl_control_plane/models/getconfiggroupbyproductandidop.py +11 -0
  16. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +11 -0
  17. cribl_control_plane/models/getsummaryop.py +11 -0
  18. cribl_control_plane/models/groupcreaterequest.py +20 -1
  19. cribl_control_plane/models/hbcriblinfo.py +11 -1
  20. cribl_control_plane/models/healthserverstatus.py +20 -1
  21. cribl_control_plane/models/input.py +15 -15
  22. cribl_control_plane/models/inputappscope.py +76 -17
  23. cribl_control_plane/models/inputazureblob.py +29 -1
  24. cribl_control_plane/models/inputcollection.py +20 -1
  25. cribl_control_plane/models/inputconfluentcloud.py +188 -1
  26. cribl_control_plane/models/inputcribl.py +20 -1
  27. cribl_control_plane/models/inputcriblhttp.py +58 -17
  28. cribl_control_plane/models/inputcribllakehttp.py +58 -17
  29. cribl_control_plane/models/inputcriblmetrics.py +20 -1
  30. cribl_control_plane/models/inputcribltcp.py +58 -17
  31. cribl_control_plane/models/inputcrowdstrike.py +47 -1
  32. cribl_control_plane/models/inputdatadogagent.py +58 -17
  33. cribl_control_plane/models/inputdatagen.py +20 -1
  34. cribl_control_plane/models/inputedgeprometheus.py +138 -37
  35. cribl_control_plane/models/inputelastic.py +108 -27
  36. cribl_control_plane/models/inputeventhub.py +176 -1
  37. cribl_control_plane/models/inputexec.py +29 -1
  38. cribl_control_plane/models/inputfile.py +40 -7
  39. cribl_control_plane/models/inputfirehose.py +58 -17
  40. cribl_control_plane/models/inputgooglepubsub.py +29 -1
  41. cribl_control_plane/models/inputgrafana.py +149 -32
  42. cribl_control_plane/models/inputhttp.py +58 -17
  43. cribl_control_plane/models/inputhttpraw.py +58 -17
  44. cribl_control_plane/models/inputjournalfiles.py +20 -1
  45. cribl_control_plane/models/inputkafka.py +182 -1
  46. cribl_control_plane/models/inputkinesis.py +65 -1
  47. cribl_control_plane/models/inputkubeevents.py +20 -1
  48. cribl_control_plane/models/inputkubelogs.py +29 -1
  49. cribl_control_plane/models/inputkubemetrics.py +29 -1
  50. cribl_control_plane/models/inputloki.py +67 -17
  51. cribl_control_plane/models/inputmetrics.py +58 -17
  52. cribl_control_plane/models/inputmodeldriventelemetry.py +58 -17
  53. cribl_control_plane/models/inputmsk.py +74 -1
  54. cribl_control_plane/models/inputnetflow.py +20 -1
  55. cribl_control_plane/models/inputoffice365mgmt.py +56 -1
  56. cribl_control_plane/models/inputoffice365msgtrace.py +56 -1
  57. cribl_control_plane/models/inputoffice365service.py +56 -1
  58. cribl_control_plane/models/inputopentelemetry.py +84 -16
  59. cribl_control_plane/models/inputprometheus.py +131 -37
  60. cribl_control_plane/models/inputprometheusrw.py +67 -17
  61. cribl_control_plane/models/inputrawudp.py +20 -1
  62. cribl_control_plane/models/inputs3.py +38 -1
  63. cribl_control_plane/models/inputs3inventory.py +47 -1
  64. cribl_control_plane/models/inputsecuritylake.py +47 -1
  65. cribl_control_plane/models/inputsnmp.py +29 -1
  66. cribl_control_plane/models/inputsplunk.py +76 -17
  67. cribl_control_plane/models/inputsplunkhec.py +66 -16
  68. cribl_control_plane/models/inputsplunksearch.py +56 -1
  69. cribl_control_plane/models/inputsqs.py +47 -1
  70. cribl_control_plane/models/inputsyslog.py +113 -32
  71. cribl_control_plane/models/inputsystemmetrics.py +110 -9
  72. cribl_control_plane/models/inputsystemstate.py +29 -1
  73. cribl_control_plane/models/inputtcp.py +77 -17
  74. cribl_control_plane/models/inputtcpjson.py +67 -17
  75. cribl_control_plane/models/inputwef.py +65 -1
  76. cribl_control_plane/models/inputwindowsmetrics.py +101 -9
  77. cribl_control_plane/models/inputwineventlogs.py +52 -1
  78. cribl_control_plane/models/inputwiz.py +38 -1
  79. cribl_control_plane/models/inputwizwebhook.py +58 -17
  80. cribl_control_plane/models/inputzscalerhec.py +66 -16
  81. cribl_control_plane/models/jobinfo.py +10 -4
  82. cribl_control_plane/models/jobstatus.py +34 -3
  83. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  84. cribl_control_plane/models/listconfiggroupbyproductop.py +11 -0
  85. cribl_control_plane/models/masterworkerentry.py +11 -1
  86. cribl_control_plane/models/nodeupgradestatus.py +38 -0
  87. cribl_control_plane/models/output.py +21 -21
  88. cribl_control_plane/models/outputazureblob.py +90 -1
  89. cribl_control_plane/models/outputazuredataexplorer.py +430 -93
  90. cribl_control_plane/models/outputazureeventhub.py +267 -22
  91. cribl_control_plane/models/outputazurelogs.py +105 -22
  92. cribl_control_plane/models/outputchronicle.py +105 -22
  93. cribl_control_plane/models/outputclickhouse.py +141 -22
  94. cribl_control_plane/models/outputcloudwatch.py +96 -22
  95. cribl_control_plane/models/outputconfluentcloud.py +292 -23
  96. cribl_control_plane/models/outputcriblhttp.py +123 -22
  97. cribl_control_plane/models/outputcribllake.py +76 -1
  98. cribl_control_plane/models/outputcribltcp.py +123 -22
  99. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +117 -23
  100. cribl_control_plane/models/outputdatabricks.py +76 -5
  101. cribl_control_plane/models/outputdatadog.py +132 -22
  102. cribl_control_plane/models/outputdataset.py +123 -22
  103. cribl_control_plane/models/outputdiskspool.py +11 -1
  104. cribl_control_plane/models/outputdls3.py +117 -1
  105. cribl_control_plane/models/outputdynatracehttp.py +141 -22
  106. cribl_control_plane/models/outputdynatraceotlp.py +141 -22
  107. cribl_control_plane/models/outputelastic.py +148 -22
  108. cribl_control_plane/models/outputelasticcloud.py +130 -22
  109. cribl_control_plane/models/outputexabeam.py +47 -1
  110. cribl_control_plane/models/outputfilesystem.py +72 -1
  111. cribl_control_plane/models/outputgooglechronicle.py +148 -23
  112. cribl_control_plane/models/outputgooglecloudlogging.py +115 -23
  113. cribl_control_plane/models/outputgooglecloudstorage.py +108 -1
  114. cribl_control_plane/models/outputgooglepubsub.py +96 -22
  115. cribl_control_plane/models/outputgrafanacloud.py +244 -43
  116. cribl_control_plane/models/outputgraphite.py +96 -22
  117. cribl_control_plane/models/outputhoneycomb.py +105 -22
  118. cribl_control_plane/models/outputhumiohec.py +114 -22
  119. cribl_control_plane/models/outputinfluxdb.py +114 -22
  120. cribl_control_plane/models/outputkafka.py +283 -20
  121. cribl_control_plane/models/outputkinesis.py +121 -22
  122. cribl_control_plane/models/outputloki.py +112 -20
  123. cribl_control_plane/models/outputminio.py +117 -1
  124. cribl_control_plane/models/outputmsk.py +175 -20
  125. cribl_control_plane/models/outputnewrelic.py +123 -22
  126. cribl_control_plane/models/outputnewrelicevents.py +115 -23
  127. cribl_control_plane/models/outputopentelemetry.py +159 -22
  128. cribl_control_plane/models/outputprometheus.py +105 -22
  129. cribl_control_plane/models/outputring.py +29 -1
  130. cribl_control_plane/models/outputs3.py +117 -1
  131. cribl_control_plane/models/outputsecuritylake.py +85 -1
  132. cribl_control_plane/models/outputsentinel.py +123 -22
  133. cribl_control_plane/models/outputsentineloneaisiem.py +124 -23
  134. cribl_control_plane/models/outputservicenow.py +150 -22
  135. cribl_control_plane/models/outputsignalfx.py +105 -22
  136. cribl_control_plane/models/outputsns.py +103 -20
  137. cribl_control_plane/models/outputsplunk.py +141 -22
  138. cribl_control_plane/models/outputsplunkhec.py +198 -22
  139. cribl_control_plane/models/outputsplunklb.py +170 -22
  140. cribl_control_plane/models/outputsqs.py +112 -20
  141. cribl_control_plane/models/outputstatsd.py +96 -22
  142. cribl_control_plane/models/outputstatsdext.py +96 -22
  143. cribl_control_plane/models/outputsumologic.py +105 -22
  144. cribl_control_plane/models/outputsyslog.py +238 -99
  145. cribl_control_plane/models/outputtcpjson.py +132 -22
  146. cribl_control_plane/models/outputwavefront.py +105 -22
  147. cribl_control_plane/models/outputwebhook.py +141 -22
  148. cribl_control_plane/models/outputxsiam.py +103 -20
  149. cribl_control_plane/models/resourcepolicy.py +11 -0
  150. cribl_control_plane/models/runnablejobcollection.py +68 -9
  151. cribl_control_plane/models/runnablejobexecutor.py +32 -9
  152. cribl_control_plane/models/runnablejobscheduledsearch.py +23 -9
  153. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +11 -0
  154. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +11 -0
  155. cribl_control_plane/sdk.py +2 -2
  156. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/METADATA +25 -7
  157. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/RECORD +158 -157
  158. {cribl_control_plane-0.3.0b3.dist-info → cribl_control_plane-0.3.0b12.dist-info}/WHEEL +0 -0
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -45,6 +46,15 @@ class OutputNewrelicMetadatum(BaseModel):
45
46
  value: str
46
47
  r"""JavaScript expression to compute field's value, enclosed in quotes or backticks. (Can evaluate to a constant.)"""
47
48
 
49
+ @field_serializer("name")
50
+ def serialize_name(self, value):
51
+ if isinstance(value, str):
52
+ try:
53
+ return models.FieldName(value)
54
+ except ValueError:
55
+ return value
56
+ return value
57
+
48
58
 
49
59
  class OutputNewrelicExtraHTTPHeaderTypedDict(TypedDict):
50
60
  value: str
@@ -140,6 +150,17 @@ class OutputNewrelicAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta
140
150
  SECRET = "secret"
141
151
 
142
152
 
153
+ class OutputNewrelicMode(str, Enum, metaclass=utils.OpenEnumMeta):
154
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
155
+
156
+ # Error
157
+ ERROR = "error"
158
+ # Backpressure
159
+ ALWAYS = "always"
160
+ # Always On
161
+ BACKPRESSURE = "backpressure"
162
+
163
+
143
164
  class OutputNewrelicCompression(str, Enum, metaclass=utils.OpenEnumMeta):
144
165
  r"""Codec to use to compress the persisted data"""
145
166
 
@@ -158,17 +179,6 @@ class OutputNewrelicQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
158
179
  DROP = "drop"
159
180
 
160
181
 
161
- class OutputNewrelicMode(str, Enum, metaclass=utils.OpenEnumMeta):
162
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
163
-
164
- # Error
165
- ERROR = "error"
166
- # Backpressure
167
- BACKPRESSURE = "backpressure"
168
- # Always On
169
- ALWAYS = "always"
170
-
171
-
172
182
  class OutputNewrelicPqControlsTypedDict(TypedDict):
173
183
  pass
174
184
 
@@ -237,6 +247,16 @@ class OutputNewrelicTypedDict(TypedDict):
237
247
  r"""Maximum total size of the batches waiting to be sent. If left blank, defaults to 5 times the max body size (if set). If 0, no limit is enforced."""
238
248
  description: NotRequired[str]
239
249
  custom_url: NotRequired[str]
250
+ pq_strict_ordering: NotRequired[bool]
251
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
252
+ pq_rate_per_sec: NotRequired[float]
253
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
254
+ pq_mode: NotRequired[OutputNewrelicMode]
255
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
256
+ pq_max_buffer_size: NotRequired[float]
257
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
258
+ pq_max_backpressure_sec: NotRequired[float]
259
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
240
260
  pq_max_file_size: NotRequired[str]
241
261
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
242
262
  pq_max_size: NotRequired[str]
@@ -247,8 +267,6 @@ class OutputNewrelicTypedDict(TypedDict):
247
267
  r"""Codec to use to compress the persisted data"""
248
268
  pq_on_backpressure: NotRequired[OutputNewrelicQueueFullBehavior]
249
269
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
250
- pq_mode: NotRequired[OutputNewrelicMode]
251
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
252
270
  pq_controls: NotRequired[OutputNewrelicPqControlsTypedDict]
253
271
  api_key: NotRequired[str]
254
272
  r"""New Relic API key. Can be overridden using __newRelic_apiKey field."""
@@ -390,6 +408,34 @@ class OutputNewrelic(BaseModel):
390
408
 
391
409
  custom_url: Annotated[Optional[str], pydantic.Field(alias="customUrl")] = None
392
410
 
411
+ pq_strict_ordering: Annotated[
412
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
413
+ ] = True
414
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
415
+
416
+ pq_rate_per_sec: Annotated[
417
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
418
+ ] = 0
419
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
420
+
421
+ pq_mode: Annotated[
422
+ Annotated[
423
+ Optional[OutputNewrelicMode], PlainValidator(validate_open_enum(False))
424
+ ],
425
+ pydantic.Field(alias="pqMode"),
426
+ ] = OutputNewrelicMode.ERROR
427
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
428
+
429
+ pq_max_buffer_size: Annotated[
430
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
431
+ ] = 42
432
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
433
+
434
+ pq_max_backpressure_sec: Annotated[
435
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
436
+ ] = 30
437
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
438
+
393
439
  pq_max_file_size: Annotated[
394
440
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
395
441
  ] = "1 MB"
@@ -421,14 +467,6 @@ class OutputNewrelic(BaseModel):
421
467
  ] = OutputNewrelicQueueFullBehavior.BLOCK
422
468
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
423
469
 
424
- pq_mode: Annotated[
425
- Annotated[
426
- Optional[OutputNewrelicMode], PlainValidator(validate_open_enum(False))
427
- ],
428
- pydantic.Field(alias="pqMode"),
429
- ] = OutputNewrelicMode.ERROR
430
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
431
-
432
470
  pq_controls: Annotated[
433
471
  Optional[OutputNewrelicPqControls], pydantic.Field(alias="pqControls")
434
472
  ] = None
@@ -438,3 +476,66 @@ class OutputNewrelic(BaseModel):
438
476
 
439
477
  text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
440
478
  r"""Select or create a stored text secret"""
479
+
480
+ @field_serializer("region")
481
+ def serialize_region(self, value):
482
+ if isinstance(value, str):
483
+ try:
484
+ return models.OutputNewrelicRegion(value)
485
+ except ValueError:
486
+ return value
487
+ return value
488
+
489
+ @field_serializer("failed_request_logging_mode")
490
+ def serialize_failed_request_logging_mode(self, value):
491
+ if isinstance(value, str):
492
+ try:
493
+ return models.OutputNewrelicFailedRequestLoggingMode(value)
494
+ except ValueError:
495
+ return value
496
+ return value
497
+
498
+ @field_serializer("on_backpressure")
499
+ def serialize_on_backpressure(self, value):
500
+ if isinstance(value, str):
501
+ try:
502
+ return models.OutputNewrelicBackpressureBehavior(value)
503
+ except ValueError:
504
+ return value
505
+ return value
506
+
507
+ @field_serializer("auth_type")
508
+ def serialize_auth_type(self, value):
509
+ if isinstance(value, str):
510
+ try:
511
+ return models.OutputNewrelicAuthenticationMethod(value)
512
+ except ValueError:
513
+ return value
514
+ return value
515
+
516
+ @field_serializer("pq_mode")
517
+ def serialize_pq_mode(self, value):
518
+ if isinstance(value, str):
519
+ try:
520
+ return models.OutputNewrelicMode(value)
521
+ except ValueError:
522
+ return value
523
+ return value
524
+
525
+ @field_serializer("pq_compress")
526
+ def serialize_pq_compress(self, value):
527
+ if isinstance(value, str):
528
+ try:
529
+ return models.OutputNewrelicCompression(value)
530
+ except ValueError:
531
+ return value
532
+ return value
533
+
534
+ @field_serializer("pq_on_backpressure")
535
+ def serialize_pq_on_backpressure(self, value):
536
+ if isinstance(value, str):
537
+ try:
538
+ return models.OutputNewrelicQueueFullBehavior(value)
539
+ except ValueError:
540
+ return value
541
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -122,6 +123,17 @@ class OutputNewrelicEventsAuthenticationMethod(str, Enum, metaclass=utils.OpenEn
122
123
  SECRET = "secret"
123
124
 
124
125
 
126
+ class OutputNewrelicEventsMode(str, Enum, metaclass=utils.OpenEnumMeta):
127
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
128
+
129
+ # Error
130
+ ERROR = "error"
131
+ # Backpressure
132
+ ALWAYS = "always"
133
+ # Always On
134
+ BACKPRESSURE = "backpressure"
135
+
136
+
125
137
  class OutputNewrelicEventsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
126
138
  r"""Codec to use to compress the persisted data"""
127
139
 
@@ -140,17 +152,6 @@ class OutputNewrelicEventsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumM
140
152
  DROP = "drop"
141
153
 
142
154
 
143
- class OutputNewrelicEventsMode(str, Enum, metaclass=utils.OpenEnumMeta):
144
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
145
-
146
- # Error
147
- ERROR = "error"
148
- # Backpressure
149
- BACKPRESSURE = "backpressure"
150
- # Always On
151
- ALWAYS = "always"
152
-
153
-
154
155
  class OutputNewrelicEventsPqControlsTypedDict(TypedDict):
155
156
  pass
156
157
 
@@ -219,6 +220,16 @@ class OutputNewrelicEventsTypedDict(TypedDict):
219
220
  r"""Enter API key directly, or select a stored secret"""
220
221
  description: NotRequired[str]
221
222
  custom_url: NotRequired[str]
223
+ pq_strict_ordering: NotRequired[bool]
224
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
225
+ pq_rate_per_sec: NotRequired[float]
226
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
227
+ pq_mode: NotRequired[OutputNewrelicEventsMode]
228
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
229
+ pq_max_buffer_size: NotRequired[float]
230
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
231
+ pq_max_backpressure_sec: NotRequired[float]
232
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
222
233
  pq_max_file_size: NotRequired[str]
223
234
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
224
235
  pq_max_size: NotRequired[str]
@@ -229,8 +240,6 @@ class OutputNewrelicEventsTypedDict(TypedDict):
229
240
  r"""Codec to use to compress the persisted data"""
230
241
  pq_on_backpressure: NotRequired[OutputNewrelicEventsQueueFullBehavior]
231
242
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
232
- pq_mode: NotRequired[OutputNewrelicEventsMode]
233
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
234
243
  pq_controls: NotRequired[OutputNewrelicEventsPqControlsTypedDict]
235
244
  api_key: NotRequired[str]
236
245
  r"""New Relic API key. Can be overridden using __newRelic_apiKey field."""
@@ -364,6 +373,35 @@ class OutputNewrelicEvents(BaseModel):
364
373
 
365
374
  custom_url: Annotated[Optional[str], pydantic.Field(alias="customUrl")] = None
366
375
 
376
+ pq_strict_ordering: Annotated[
377
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
378
+ ] = True
379
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
380
+
381
+ pq_rate_per_sec: Annotated[
382
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
383
+ ] = 0
384
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
385
+
386
+ pq_mode: Annotated[
387
+ Annotated[
388
+ Optional[OutputNewrelicEventsMode],
389
+ PlainValidator(validate_open_enum(False)),
390
+ ],
391
+ pydantic.Field(alias="pqMode"),
392
+ ] = OutputNewrelicEventsMode.ERROR
393
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
394
+
395
+ pq_max_buffer_size: Annotated[
396
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
397
+ ] = 42
398
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
399
+
400
+ pq_max_backpressure_sec: Annotated[
401
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
402
+ ] = 30
403
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
404
+
367
405
  pq_max_file_size: Annotated[
368
406
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
369
407
  ] = "1 MB"
@@ -395,15 +433,6 @@ class OutputNewrelicEvents(BaseModel):
395
433
  ] = OutputNewrelicEventsQueueFullBehavior.BLOCK
396
434
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
397
435
 
398
- pq_mode: Annotated[
399
- Annotated[
400
- Optional[OutputNewrelicEventsMode],
401
- PlainValidator(validate_open_enum(False)),
402
- ],
403
- pydantic.Field(alias="pqMode"),
404
- ] = OutputNewrelicEventsMode.ERROR
405
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
406
-
407
436
  pq_controls: Annotated[
408
437
  Optional[OutputNewrelicEventsPqControls], pydantic.Field(alias="pqControls")
409
438
  ] = None
@@ -413,3 +442,66 @@ class OutputNewrelicEvents(BaseModel):
413
442
 
414
443
  text_secret: Annotated[Optional[str], pydantic.Field(alias="textSecret")] = None
415
444
  r"""Select or create a stored text secret"""
445
+
446
+ @field_serializer("region")
447
+ def serialize_region(self, value):
448
+ if isinstance(value, str):
449
+ try:
450
+ return models.OutputNewrelicEventsRegion(value)
451
+ except ValueError:
452
+ return value
453
+ return value
454
+
455
+ @field_serializer("failed_request_logging_mode")
456
+ def serialize_failed_request_logging_mode(self, value):
457
+ if isinstance(value, str):
458
+ try:
459
+ return models.OutputNewrelicEventsFailedRequestLoggingMode(value)
460
+ except ValueError:
461
+ return value
462
+ return value
463
+
464
+ @field_serializer("on_backpressure")
465
+ def serialize_on_backpressure(self, value):
466
+ if isinstance(value, str):
467
+ try:
468
+ return models.OutputNewrelicEventsBackpressureBehavior(value)
469
+ except ValueError:
470
+ return value
471
+ return value
472
+
473
+ @field_serializer("auth_type")
474
+ def serialize_auth_type(self, value):
475
+ if isinstance(value, str):
476
+ try:
477
+ return models.OutputNewrelicEventsAuthenticationMethod(value)
478
+ except ValueError:
479
+ return value
480
+ return value
481
+
482
+ @field_serializer("pq_mode")
483
+ def serialize_pq_mode(self, value):
484
+ if isinstance(value, str):
485
+ try:
486
+ return models.OutputNewrelicEventsMode(value)
487
+ except ValueError:
488
+ return value
489
+ return value
490
+
491
+ @field_serializer("pq_compress")
492
+ def serialize_pq_compress(self, value):
493
+ if isinstance(value, str):
494
+ try:
495
+ return models.OutputNewrelicEventsCompression(value)
496
+ except ValueError:
497
+ return value
498
+ return value
499
+
500
+ @field_serializer("pq_on_backpressure")
501
+ def serialize_pq_on_backpressure(self, value):
502
+ if isinstance(value, str):
503
+ try:
504
+ return models.OutputNewrelicEventsQueueFullBehavior(value)
505
+ except ValueError:
506
+ return value
507
+ return value
@@ -1,11 +1,12 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from cribl_control_plane import utils
4
+ from cribl_control_plane import models, utils
5
5
  from cribl_control_plane.types import BaseModel
6
6
  from cribl_control_plane.utils import validate_open_enum
7
7
  from enum import Enum
8
8
  import pydantic
9
+ from pydantic import field_serializer
9
10
  from pydantic.functional_validators import PlainValidator
10
11
  from typing import List, Optional
11
12
  from typing_extensions import Annotated, NotRequired, TypedDict
@@ -273,6 +274,35 @@ class OutputOpenTelemetryTLSSettingsClientSide(BaseModel):
273
274
  pydantic.Field(alias="maxVersion"),
274
275
  ] = None
275
276
 
277
+ @field_serializer("min_version")
278
+ def serialize_min_version(self, value):
279
+ if isinstance(value, str):
280
+ try:
281
+ return models.OutputOpenTelemetryMinimumTLSVersion(value)
282
+ except ValueError:
283
+ return value
284
+ return value
285
+
286
+ @field_serializer("max_version")
287
+ def serialize_max_version(self, value):
288
+ if isinstance(value, str):
289
+ try:
290
+ return models.OutputOpenTelemetryMaximumTLSVersion(value)
291
+ except ValueError:
292
+ return value
293
+ return value
294
+
295
+
296
+ class OutputOpenTelemetryMode(str, Enum, metaclass=utils.OpenEnumMeta):
297
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
298
+
299
+ # Error
300
+ ERROR = "error"
301
+ # Backpressure
302
+ ALWAYS = "always"
303
+ # Always On
304
+ BACKPRESSURE = "backpressure"
305
+
276
306
 
277
307
  class OutputOpenTelemetryPqCompressCompression(str, Enum, metaclass=utils.OpenEnumMeta):
278
308
  r"""Codec to use to compress the persisted data"""
@@ -292,17 +322,6 @@ class OutputOpenTelemetryQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMe
292
322
  DROP = "drop"
293
323
 
294
324
 
295
- class OutputOpenTelemetryMode(str, Enum, metaclass=utils.OpenEnumMeta):
296
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
297
-
298
- # Error
299
- ERROR = "error"
300
- # Backpressure
301
- BACKPRESSURE = "backpressure"
302
- # Always On
303
- ALWAYS = "always"
304
-
305
-
306
325
  class OutputOpenTelemetryPqControlsTypedDict(TypedDict):
307
326
  pass
308
327
 
@@ -409,6 +428,16 @@ class OutputOpenTelemetryTypedDict(TypedDict):
409
428
  response_honor_retry_after_header: NotRequired[bool]
410
429
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
411
430
  tls: NotRequired[OutputOpenTelemetryTLSSettingsClientSideTypedDict]
431
+ pq_strict_ordering: NotRequired[bool]
432
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
433
+ pq_rate_per_sec: NotRequired[float]
434
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
435
+ pq_mode: NotRequired[OutputOpenTelemetryMode]
436
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
437
+ pq_max_buffer_size: NotRequired[float]
438
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
439
+ pq_max_backpressure_sec: NotRequired[float]
440
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
412
441
  pq_max_file_size: NotRequired[str]
413
442
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
414
443
  pq_max_size: NotRequired[str]
@@ -419,8 +448,6 @@ class OutputOpenTelemetryTypedDict(TypedDict):
419
448
  r"""Codec to use to compress the persisted data"""
420
449
  pq_on_backpressure: NotRequired[OutputOpenTelemetryQueueFullBehavior]
421
450
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
422
- pq_mode: NotRequired[OutputOpenTelemetryMode]
423
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
424
451
  pq_controls: NotRequired[OutputOpenTelemetryPqControlsTypedDict]
425
452
 
426
453
 
@@ -647,6 +674,34 @@ class OutputOpenTelemetry(BaseModel):
647
674
 
648
675
  tls: Optional[OutputOpenTelemetryTLSSettingsClientSide] = None
649
676
 
677
+ pq_strict_ordering: Annotated[
678
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
679
+ ] = True
680
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
681
+
682
+ pq_rate_per_sec: Annotated[
683
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
684
+ ] = 0
685
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
686
+
687
+ pq_mode: Annotated[
688
+ Annotated[
689
+ Optional[OutputOpenTelemetryMode], PlainValidator(validate_open_enum(False))
690
+ ],
691
+ pydantic.Field(alias="pqMode"),
692
+ ] = OutputOpenTelemetryMode.ERROR
693
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
694
+
695
+ pq_max_buffer_size: Annotated[
696
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
697
+ ] = 42
698
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
699
+
700
+ pq_max_backpressure_sec: Annotated[
701
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
702
+ ] = 30
703
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
704
+
650
705
  pq_max_file_size: Annotated[
651
706
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
652
707
  ] = "1 MB"
@@ -678,14 +733,96 @@ class OutputOpenTelemetry(BaseModel):
678
733
  ] = OutputOpenTelemetryQueueFullBehavior.BLOCK
679
734
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
680
735
 
681
- pq_mode: Annotated[
682
- Annotated[
683
- Optional[OutputOpenTelemetryMode], PlainValidator(validate_open_enum(False))
684
- ],
685
- pydantic.Field(alias="pqMode"),
686
- ] = OutputOpenTelemetryMode.ERROR
687
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
688
-
689
736
  pq_controls: Annotated[
690
737
  Optional[OutputOpenTelemetryPqControls], pydantic.Field(alias="pqControls")
691
738
  ] = None
739
+
740
+ @field_serializer("protocol")
741
+ def serialize_protocol(self, value):
742
+ if isinstance(value, str):
743
+ try:
744
+ return models.OutputOpenTelemetryProtocol(value)
745
+ except ValueError:
746
+ return value
747
+ return value
748
+
749
+ @field_serializer("otlp_version")
750
+ def serialize_otlp_version(self, value):
751
+ if isinstance(value, str):
752
+ try:
753
+ return models.OutputOpenTelemetryOTLPVersion(value)
754
+ except ValueError:
755
+ return value
756
+ return value
757
+
758
+ @field_serializer("compress")
759
+ def serialize_compress(self, value):
760
+ if isinstance(value, str):
761
+ try:
762
+ return models.OutputOpenTelemetryCompressCompression(value)
763
+ except ValueError:
764
+ return value
765
+ return value
766
+
767
+ @field_serializer("http_compress")
768
+ def serialize_http_compress(self, value):
769
+ if isinstance(value, str):
770
+ try:
771
+ return models.OutputOpenTelemetryHTTPCompressCompression(value)
772
+ except ValueError:
773
+ return value
774
+ return value
775
+
776
+ @field_serializer("auth_type")
777
+ def serialize_auth_type(self, value):
778
+ if isinstance(value, str):
779
+ try:
780
+ return models.OutputOpenTelemetryAuthenticationType(value)
781
+ except ValueError:
782
+ return value
783
+ return value
784
+
785
+ @field_serializer("failed_request_logging_mode")
786
+ def serialize_failed_request_logging_mode(self, value):
787
+ if isinstance(value, str):
788
+ try:
789
+ return models.OutputOpenTelemetryFailedRequestLoggingMode(value)
790
+ except ValueError:
791
+ return value
792
+ return value
793
+
794
+ @field_serializer("on_backpressure")
795
+ def serialize_on_backpressure(self, value):
796
+ if isinstance(value, str):
797
+ try:
798
+ return models.OutputOpenTelemetryBackpressureBehavior(value)
799
+ except ValueError:
800
+ return value
801
+ return value
802
+
803
+ @field_serializer("pq_mode")
804
+ def serialize_pq_mode(self, value):
805
+ if isinstance(value, str):
806
+ try:
807
+ return models.OutputOpenTelemetryMode(value)
808
+ except ValueError:
809
+ return value
810
+ return value
811
+
812
+ @field_serializer("pq_compress")
813
+ def serialize_pq_compress(self, value):
814
+ if isinstance(value, str):
815
+ try:
816
+ return models.OutputOpenTelemetryPqCompressCompression(value)
817
+ except ValueError:
818
+ return value
819
+ return value
820
+
821
+ @field_serializer("pq_on_backpressure")
822
+ def serialize_pq_on_backpressure(self, value):
823
+ if isinstance(value, str):
824
+ try:
825
+ return models.OutputOpenTelemetryQueueFullBehavior(value)
826
+ except ValueError:
827
+ return value
828
+ return value