cribl-control-plane 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cribl-control-plane might be problematic. Click here for more details.

Files changed (197) hide show
  1. cribl_control_plane/__init__.py +17 -0
  2. cribl_control_plane/_hooks/__init__.py +5 -0
  3. cribl_control_plane/_hooks/clientcredentials.py +211 -0
  4. cribl_control_plane/_hooks/registration.py +13 -0
  5. cribl_control_plane/_hooks/sdkhooks.py +81 -0
  6. cribl_control_plane/_hooks/types.py +112 -0
  7. cribl_control_plane/_version.py +15 -0
  8. cribl_control_plane/auth_sdk.py +184 -0
  9. cribl_control_plane/basesdk.py +358 -0
  10. cribl_control_plane/errors/__init__.py +60 -0
  11. cribl_control_plane/errors/apierror.py +38 -0
  12. cribl_control_plane/errors/criblcontrolplaneerror.py +26 -0
  13. cribl_control_plane/errors/error.py +24 -0
  14. cribl_control_plane/errors/healthstatus_error.py +38 -0
  15. cribl_control_plane/errors/no_response_error.py +13 -0
  16. cribl_control_plane/errors/responsevalidationerror.py +25 -0
  17. cribl_control_plane/health.py +166 -0
  18. cribl_control_plane/httpclient.py +126 -0
  19. cribl_control_plane/models/__init__.py +7305 -0
  20. cribl_control_plane/models/addhectokenrequest.py +34 -0
  21. cribl_control_plane/models/authtoken.py +13 -0
  22. cribl_control_plane/models/createinputhectokenbyidop.py +45 -0
  23. cribl_control_plane/models/createinputop.py +24 -0
  24. cribl_control_plane/models/createoutputop.py +24 -0
  25. cribl_control_plane/models/createoutputtestbyidop.py +46 -0
  26. cribl_control_plane/models/criblevent.py +14 -0
  27. cribl_control_plane/models/deleteinputbyidop.py +37 -0
  28. cribl_control_plane/models/deleteoutputbyidop.py +37 -0
  29. cribl_control_plane/models/deleteoutputpqbyidop.py +36 -0
  30. cribl_control_plane/models/getinputbyidop.py +37 -0
  31. cribl_control_plane/models/getoutputbyidop.py +37 -0
  32. cribl_control_plane/models/getoutputpqbyidop.py +36 -0
  33. cribl_control_plane/models/getoutputsamplesbyidop.py +37 -0
  34. cribl_control_plane/models/healthstatus.py +36 -0
  35. cribl_control_plane/models/input.py +199 -0
  36. cribl_control_plane/models/inputappscope.py +448 -0
  37. cribl_control_plane/models/inputazureblob.py +308 -0
  38. cribl_control_plane/models/inputcollection.py +208 -0
  39. cribl_control_plane/models/inputconfluentcloud.py +585 -0
  40. cribl_control_plane/models/inputcribl.py +165 -0
  41. cribl_control_plane/models/inputcriblhttp.py +341 -0
  42. cribl_control_plane/models/inputcribllakehttp.py +342 -0
  43. cribl_control_plane/models/inputcriblmetrics.py +175 -0
  44. cribl_control_plane/models/inputcribltcp.py +299 -0
  45. cribl_control_plane/models/inputcrowdstrike.py +410 -0
  46. cribl_control_plane/models/inputdatadogagent.py +364 -0
  47. cribl_control_plane/models/inputdatagen.py +180 -0
  48. cribl_control_plane/models/inputedgeprometheus.py +551 -0
  49. cribl_control_plane/models/inputelastic.py +494 -0
  50. cribl_control_plane/models/inputeventhub.py +360 -0
  51. cribl_control_plane/models/inputexec.py +213 -0
  52. cribl_control_plane/models/inputfile.py +259 -0
  53. cribl_control_plane/models/inputfirehose.py +341 -0
  54. cribl_control_plane/models/inputgooglepubsub.py +247 -0
  55. cribl_control_plane/models/inputgrafana_union.py +1247 -0
  56. cribl_control_plane/models/inputhttp.py +403 -0
  57. cribl_control_plane/models/inputhttpraw.py +407 -0
  58. cribl_control_plane/models/inputjournalfiles.py +208 -0
  59. cribl_control_plane/models/inputkafka.py +581 -0
  60. cribl_control_plane/models/inputkinesis.py +363 -0
  61. cribl_control_plane/models/inputkubeevents.py +182 -0
  62. cribl_control_plane/models/inputkubelogs.py +256 -0
  63. cribl_control_plane/models/inputkubemetrics.py +233 -0
  64. cribl_control_plane/models/inputloki.py +468 -0
  65. cribl_control_plane/models/inputmetrics.py +290 -0
  66. cribl_control_plane/models/inputmodeldriventelemetry.py +274 -0
  67. cribl_control_plane/models/inputmsk.py +654 -0
  68. cribl_control_plane/models/inputnetflow.py +224 -0
  69. cribl_control_plane/models/inputoffice365mgmt.py +384 -0
  70. cribl_control_plane/models/inputoffice365msgtrace.py +449 -0
  71. cribl_control_plane/models/inputoffice365service.py +377 -0
  72. cribl_control_plane/models/inputopentelemetry.py +516 -0
  73. cribl_control_plane/models/inputprometheus.py +464 -0
  74. cribl_control_plane/models/inputprometheusrw.py +470 -0
  75. cribl_control_plane/models/inputrawudp.py +207 -0
  76. cribl_control_plane/models/inputs3.py +416 -0
  77. cribl_control_plane/models/inputs3inventory.py +440 -0
  78. cribl_control_plane/models/inputsecuritylake.py +425 -0
  79. cribl_control_plane/models/inputsnmp.py +274 -0
  80. cribl_control_plane/models/inputsplunk.py +387 -0
  81. cribl_control_plane/models/inputsplunkhec.py +478 -0
  82. cribl_control_plane/models/inputsplunksearch.py +537 -0
  83. cribl_control_plane/models/inputsqs.py +320 -0
  84. cribl_control_plane/models/inputsyslog_union.py +759 -0
  85. cribl_control_plane/models/inputsystemmetrics.py +533 -0
  86. cribl_control_plane/models/inputsystemstate.py +417 -0
  87. cribl_control_plane/models/inputtcp.py +359 -0
  88. cribl_control_plane/models/inputtcpjson.py +334 -0
  89. cribl_control_plane/models/inputwef.py +498 -0
  90. cribl_control_plane/models/inputwindowsmetrics.py +457 -0
  91. cribl_control_plane/models/inputwineventlogs.py +222 -0
  92. cribl_control_plane/models/inputwiz.py +334 -0
  93. cribl_control_plane/models/inputzscalerhec.py +439 -0
  94. cribl_control_plane/models/listinputop.py +24 -0
  95. cribl_control_plane/models/listoutputop.py +24 -0
  96. cribl_control_plane/models/logininfo.py +16 -0
  97. cribl_control_plane/models/output.py +229 -0
  98. cribl_control_plane/models/outputazureblob.py +471 -0
  99. cribl_control_plane/models/outputazuredataexplorer.py +660 -0
  100. cribl_control_plane/models/outputazureeventhub.py +321 -0
  101. cribl_control_plane/models/outputazurelogs.py +386 -0
  102. cribl_control_plane/models/outputclickhouse.py +650 -0
  103. cribl_control_plane/models/outputcloudwatch.py +273 -0
  104. cribl_control_plane/models/outputconfluentcloud.py +591 -0
  105. cribl_control_plane/models/outputcriblhttp.py +494 -0
  106. cribl_control_plane/models/outputcribllake.py +396 -0
  107. cribl_control_plane/models/outputcribltcp.py +387 -0
  108. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +410 -0
  109. cribl_control_plane/models/outputdatadog.py +472 -0
  110. cribl_control_plane/models/outputdataset.py +437 -0
  111. cribl_control_plane/models/outputdefault.py +55 -0
  112. cribl_control_plane/models/outputdevnull.py +50 -0
  113. cribl_control_plane/models/outputdiskspool.py +89 -0
  114. cribl_control_plane/models/outputdls3.py +560 -0
  115. cribl_control_plane/models/outputdynatracehttp.py +454 -0
  116. cribl_control_plane/models/outputdynatraceotlp.py +486 -0
  117. cribl_control_plane/models/outputelastic.py +494 -0
  118. cribl_control_plane/models/outputelasticcloud.py +407 -0
  119. cribl_control_plane/models/outputexabeam.py +297 -0
  120. cribl_control_plane/models/outputfilesystem.py +357 -0
  121. cribl_control_plane/models/outputgooglechronicle.py +486 -0
  122. cribl_control_plane/models/outputgooglecloudlogging.py +557 -0
  123. cribl_control_plane/models/outputgooglecloudstorage.py +499 -0
  124. cribl_control_plane/models/outputgooglepubsub.py +274 -0
  125. cribl_control_plane/models/outputgrafanacloud_union.py +1024 -0
  126. cribl_control_plane/models/outputgraphite.py +225 -0
  127. cribl_control_plane/models/outputhoneycomb.py +369 -0
  128. cribl_control_plane/models/outputhumiohec.py +389 -0
  129. cribl_control_plane/models/outputinfluxdb.py +523 -0
  130. cribl_control_plane/models/outputkafka.py +581 -0
  131. cribl_control_plane/models/outputkinesis.py +312 -0
  132. cribl_control_plane/models/outputloki.py +425 -0
  133. cribl_control_plane/models/outputminio.py +512 -0
  134. cribl_control_plane/models/outputmsk.py +654 -0
  135. cribl_control_plane/models/outputnetflow.py +80 -0
  136. cribl_control_plane/models/outputnewrelic.py +424 -0
  137. cribl_control_plane/models/outputnewrelicevents.py +401 -0
  138. cribl_control_plane/models/outputopentelemetry.py +669 -0
  139. cribl_control_plane/models/outputprometheus.py +485 -0
  140. cribl_control_plane/models/outputring.py +121 -0
  141. cribl_control_plane/models/outputrouter.py +83 -0
  142. cribl_control_plane/models/outputs3.py +556 -0
  143. cribl_control_plane/models/outputsamplesresponse.py +14 -0
  144. cribl_control_plane/models/outputsecuritylake.py +505 -0
  145. cribl_control_plane/models/outputsentinel.py +488 -0
  146. cribl_control_plane/models/outputsentineloneaisiem.py +505 -0
  147. cribl_control_plane/models/outputservicenow.py +543 -0
  148. cribl_control_plane/models/outputsignalfx.py +369 -0
  149. cribl_control_plane/models/outputsnmp.py +80 -0
  150. cribl_control_plane/models/outputsns.py +274 -0
  151. cribl_control_plane/models/outputsplunk.py +383 -0
  152. cribl_control_plane/models/outputsplunkhec.py +434 -0
  153. cribl_control_plane/models/outputsplunklb.py +558 -0
  154. cribl_control_plane/models/outputsqs.py +328 -0
  155. cribl_control_plane/models/outputstatsd.py +224 -0
  156. cribl_control_plane/models/outputstatsdext.py +225 -0
  157. cribl_control_plane/models/outputsumologic.py +378 -0
  158. cribl_control_plane/models/outputsyslog.py +415 -0
  159. cribl_control_plane/models/outputtcpjson.py +413 -0
  160. cribl_control_plane/models/outputtestrequest.py +15 -0
  161. cribl_control_plane/models/outputtestresponse.py +29 -0
  162. cribl_control_plane/models/outputwavefront.py +369 -0
  163. cribl_control_plane/models/outputwebhook.py +689 -0
  164. cribl_control_plane/models/outputxsiam.py +415 -0
  165. cribl_control_plane/models/schemeclientoauth.py +24 -0
  166. cribl_control_plane/models/security.py +36 -0
  167. cribl_control_plane/models/updatehectokenrequest.py +31 -0
  168. cribl_control_plane/models/updateinputbyidop.py +44 -0
  169. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +52 -0
  170. cribl_control_plane/models/updateoutputbyidop.py +44 -0
  171. cribl_control_plane/outputs.py +1615 -0
  172. cribl_control_plane/py.typed +1 -0
  173. cribl_control_plane/sdk.py +164 -0
  174. cribl_control_plane/sdkconfiguration.py +36 -0
  175. cribl_control_plane/sources.py +1355 -0
  176. cribl_control_plane/types/__init__.py +21 -0
  177. cribl_control_plane/types/basemodel.py +39 -0
  178. cribl_control_plane/utils/__init__.py +187 -0
  179. cribl_control_plane/utils/annotations.py +55 -0
  180. cribl_control_plane/utils/datetimes.py +23 -0
  181. cribl_control_plane/utils/enums.py +74 -0
  182. cribl_control_plane/utils/eventstreaming.py +238 -0
  183. cribl_control_plane/utils/forms.py +223 -0
  184. cribl_control_plane/utils/headers.py +136 -0
  185. cribl_control_plane/utils/logger.py +27 -0
  186. cribl_control_plane/utils/metadata.py +118 -0
  187. cribl_control_plane/utils/queryparams.py +205 -0
  188. cribl_control_plane/utils/requestbodies.py +66 -0
  189. cribl_control_plane/utils/retries.py +217 -0
  190. cribl_control_plane/utils/security.py +207 -0
  191. cribl_control_plane/utils/serializers.py +249 -0
  192. cribl_control_plane/utils/unmarshal_json_response.py +24 -0
  193. cribl_control_plane/utils/url.py +155 -0
  194. cribl_control_plane/utils/values.py +137 -0
  195. cribl_control_plane-0.0.13.dist-info/METADATA +489 -0
  196. cribl_control_plane-0.0.13.dist-info/RECORD +197 -0
  197. cribl_control_plane-0.0.13.dist-info/WHEEL +4 -0
@@ -0,0 +1,328 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class OutputSqsType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ SQS = "sqs"
16
+
17
+
18
+ class OutputSqsQueueType(str, Enum, metaclass=utils.OpenEnumMeta):
19
+ r"""The queue type used (or created). Defaults to Standard."""
20
+
21
+ STANDARD = "standard"
22
+ FIFO = "fifo"
23
+
24
+
25
+ class OutputSqsAuthenticationMethod(str, Enum, metaclass=utils.OpenEnumMeta):
26
+ r"""AWS authentication method. Choose Auto to use IAM roles."""
27
+
28
+ AUTO = "auto"
29
+ MANUAL = "manual"
30
+ SECRET = "secret"
31
+
32
+
33
+ class OutputSqsSignatureVersion(str, Enum, metaclass=utils.OpenEnumMeta):
34
+ r"""Signature version to use for signing SQS requests"""
35
+
36
+ V2 = "v2"
37
+ V4 = "v4"
38
+
39
+
40
+ class OutputSqsBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
41
+ r"""How to handle events when all receivers are exerting backpressure"""
42
+
43
+ BLOCK = "block"
44
+ DROP = "drop"
45
+ QUEUE = "queue"
46
+
47
+
48
+ class OutputSqsCompression(str, Enum, metaclass=utils.OpenEnumMeta):
49
+ r"""Codec to use to compress the persisted data"""
50
+
51
+ NONE = "none"
52
+ GZIP = "gzip"
53
+
54
+
55
+ class OutputSqsQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
56
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
57
+
58
+ BLOCK = "block"
59
+ DROP = "drop"
60
+
61
+
62
+ class OutputSqsMode(str, Enum, metaclass=utils.OpenEnumMeta):
63
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
64
+
65
+ ERROR = "error"
66
+ BACKPRESSURE = "backpressure"
67
+ ALWAYS = "always"
68
+
69
+
70
+ class OutputSqsPqControlsTypedDict(TypedDict):
71
+ pass
72
+
73
+
74
+ class OutputSqsPqControls(BaseModel):
75
+ pass
76
+
77
+
78
+ class OutputSqsTypedDict(TypedDict):
79
+ queue_name: str
80
+ r"""The name, URL, or ARN of the SQS queue to send events to. When a non-AWS URL is specified, format must be: '{url}/myQueueName'. Example: 'https://host:port/myQueueName'. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `https://host:port/myQueue-${C.vars.myVar}`."""
81
+ id: NotRequired[str]
82
+ r"""Unique ID for this output"""
83
+ type: NotRequired[OutputSqsType]
84
+ pipeline: NotRequired[str]
85
+ r"""Pipeline to process data before sending out to this output"""
86
+ system_fields: NotRequired[List[str]]
87
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
88
+ environment: NotRequired[str]
89
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
90
+ streamtags: NotRequired[List[str]]
91
+ r"""Tags for filtering and grouping in @{product}"""
92
+ queue_type: NotRequired[OutputSqsQueueType]
93
+ r"""The queue type used (or created). Defaults to Standard."""
94
+ aws_account_id: NotRequired[str]
95
+ r"""SQS queue owner's AWS account ID. Leave empty if SQS queue is in same AWS account."""
96
+ message_group_id: NotRequired[str]
97
+ r"""This parameter applies only to FIFO queues. The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner. Use event field __messageGroupId to override this value."""
98
+ create_queue: NotRequired[bool]
99
+ r"""Create queue if it does not exist."""
100
+ aws_authentication_method: NotRequired[OutputSqsAuthenticationMethod]
101
+ r"""AWS authentication method. Choose Auto to use IAM roles."""
102
+ aws_secret_key: NotRequired[str]
103
+ region: NotRequired[str]
104
+ r"""AWS Region where the SQS queue is located. Required, unless the Queue entry is a URL or ARN that includes a Region."""
105
+ endpoint: NotRequired[str]
106
+ r"""SQS service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to SQS-compatible endpoint."""
107
+ signature_version: NotRequired[OutputSqsSignatureVersion]
108
+ r"""Signature version to use for signing SQS requests"""
109
+ reuse_connections: NotRequired[bool]
110
+ r"""Reuse connections between requests, which can improve performance"""
111
+ reject_unauthorized: NotRequired[bool]
112
+ r"""Reject certificates that cannot be verified against a valid CA, such as self-signed certificates"""
113
+ enable_assume_role: NotRequired[bool]
114
+ r"""Use Assume Role credentials to access SQS"""
115
+ assume_role_arn: NotRequired[str]
116
+ r"""Amazon Resource Name (ARN) of the role to assume"""
117
+ assume_role_external_id: NotRequired[str]
118
+ r"""External ID to use when assuming role"""
119
+ duration_seconds: NotRequired[float]
120
+ r"""Duration of the assumed role's session, in seconds. Minimum is 900 (15 minutes), default is 3600 (1 hour), and maximum is 43200 (12 hours)."""
121
+ max_queue_size: NotRequired[float]
122
+ r"""Maximum number of queued batches before blocking."""
123
+ max_record_size_kb: NotRequired[float]
124
+ r"""Maximum size (KB) of batches to send. Per the SQS spec, the max allowed value is 256 KB."""
125
+ flush_period_sec: NotRequired[float]
126
+ r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
127
+ max_in_progress: NotRequired[float]
128
+ r"""The maximum number of in-progress API requests before backpressure is applied."""
129
+ on_backpressure: NotRequired[OutputSqsBackpressureBehavior]
130
+ r"""How to handle events when all receivers are exerting backpressure"""
131
+ description: NotRequired[str]
132
+ aws_api_key: NotRequired[str]
133
+ aws_secret: NotRequired[str]
134
+ r"""Select or create a stored secret that references your access key and secret key"""
135
+ pq_max_file_size: NotRequired[str]
136
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
137
+ pq_max_size: NotRequired[str]
138
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
139
+ pq_path: NotRequired[str]
140
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
141
+ pq_compress: NotRequired[OutputSqsCompression]
142
+ r"""Codec to use to compress the persisted data"""
143
+ pq_on_backpressure: NotRequired[OutputSqsQueueFullBehavior]
144
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
145
+ pq_mode: NotRequired[OutputSqsMode]
146
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
147
+ pq_controls: NotRequired[OutputSqsPqControlsTypedDict]
148
+
149
+
150
+ class OutputSqs(BaseModel):
151
+ queue_name: Annotated[str, pydantic.Field(alias="queueName")]
152
+ r"""The name, URL, or ARN of the SQS queue to send events to. When a non-AWS URL is specified, format must be: '{url}/myQueueName'. Example: 'https://host:port/myQueueName'. Must be a JavaScript expression (which can evaluate to a constant value), enclosed in quotes or backticks. Can be evaluated only at init time. Example referencing a Global Variable: `https://host:port/myQueue-${C.vars.myVar}`."""
153
+
154
+ id: Optional[str] = None
155
+ r"""Unique ID for this output"""
156
+
157
+ type: Annotated[
158
+ Optional[OutputSqsType], PlainValidator(validate_open_enum(False))
159
+ ] = None
160
+
161
+ pipeline: Optional[str] = None
162
+ r"""Pipeline to process data before sending out to this output"""
163
+
164
+ system_fields: Annotated[
165
+ Optional[List[str]], pydantic.Field(alias="systemFields")
166
+ ] = None
167
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
168
+
169
+ environment: Optional[str] = None
170
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
171
+
172
+ streamtags: Optional[List[str]] = None
173
+ r"""Tags for filtering and grouping in @{product}"""
174
+
175
+ queue_type: Annotated[
176
+ Annotated[
177
+ Optional[OutputSqsQueueType], PlainValidator(validate_open_enum(False))
178
+ ],
179
+ pydantic.Field(alias="queueType"),
180
+ ] = OutputSqsQueueType.STANDARD
181
+ r"""The queue type used (or created). Defaults to Standard."""
182
+
183
+ aws_account_id: Annotated[Optional[str], pydantic.Field(alias="awsAccountId")] = (
184
+ None
185
+ )
186
+ r"""SQS queue owner's AWS account ID. Leave empty if SQS queue is in same AWS account."""
187
+
188
+ message_group_id: Annotated[
189
+ Optional[str], pydantic.Field(alias="messageGroupId")
190
+ ] = "cribl"
191
+ r"""This parameter applies only to FIFO queues. The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner. Use event field __messageGroupId to override this value."""
192
+
193
+ create_queue: Annotated[Optional[bool], pydantic.Field(alias="createQueue")] = True
194
+ r"""Create queue if it does not exist."""
195
+
196
+ aws_authentication_method: Annotated[
197
+ Annotated[
198
+ Optional[OutputSqsAuthenticationMethod],
199
+ PlainValidator(validate_open_enum(False)),
200
+ ],
201
+ pydantic.Field(alias="awsAuthenticationMethod"),
202
+ ] = OutputSqsAuthenticationMethod.AUTO
203
+ r"""AWS authentication method. Choose Auto to use IAM roles."""
204
+
205
+ aws_secret_key: Annotated[Optional[str], pydantic.Field(alias="awsSecretKey")] = (
206
+ None
207
+ )
208
+
209
+ region: Optional[str] = None
210
+ r"""AWS Region where the SQS queue is located. Required, unless the Queue entry is a URL or ARN that includes a Region."""
211
+
212
+ endpoint: Optional[str] = None
213
+ r"""SQS service endpoint. If empty, defaults to the AWS Region-specific endpoint. Otherwise, it must point to SQS-compatible endpoint."""
214
+
215
+ signature_version: Annotated[
216
+ Annotated[
217
+ Optional[OutputSqsSignatureVersion],
218
+ PlainValidator(validate_open_enum(False)),
219
+ ],
220
+ pydantic.Field(alias="signatureVersion"),
221
+ ] = OutputSqsSignatureVersion.V4
222
+ r"""Signature version to use for signing SQS requests"""
223
+
224
+ reuse_connections: Annotated[
225
+ Optional[bool], pydantic.Field(alias="reuseConnections")
226
+ ] = True
227
+ r"""Reuse connections between requests, which can improve performance"""
228
+
229
+ reject_unauthorized: Annotated[
230
+ Optional[bool], pydantic.Field(alias="rejectUnauthorized")
231
+ ] = True
232
+ r"""Reject certificates that cannot be verified against a valid CA, such as self-signed certificates"""
233
+
234
+ enable_assume_role: Annotated[
235
+ Optional[bool], pydantic.Field(alias="enableAssumeRole")
236
+ ] = False
237
+ r"""Use Assume Role credentials to access SQS"""
238
+
239
+ assume_role_arn: Annotated[Optional[str], pydantic.Field(alias="assumeRoleArn")] = (
240
+ None
241
+ )
242
+ r"""Amazon Resource Name (ARN) of the role to assume"""
243
+
244
+ assume_role_external_id: Annotated[
245
+ Optional[str], pydantic.Field(alias="assumeRoleExternalId")
246
+ ] = None
247
+ r"""External ID to use when assuming role"""
248
+
249
+ duration_seconds: Annotated[
250
+ Optional[float], pydantic.Field(alias="durationSeconds")
251
+ ] = 3600
252
+ r"""Duration of the assumed role's session, in seconds. Minimum is 900 (15 minutes), default is 3600 (1 hour), and maximum is 43200 (12 hours)."""
253
+
254
+ max_queue_size: Annotated[Optional[float], pydantic.Field(alias="maxQueueSize")] = (
255
+ 100
256
+ )
257
+ r"""Maximum number of queued batches before blocking."""
258
+
259
+ max_record_size_kb: Annotated[
260
+ Optional[float], pydantic.Field(alias="maxRecordSizeKB")
261
+ ] = 256
262
+ r"""Maximum size (KB) of batches to send. Per the SQS spec, the max allowed value is 256 KB."""
263
+
264
+ flush_period_sec: Annotated[
265
+ Optional[float], pydantic.Field(alias="flushPeriodSec")
266
+ ] = 1
267
+ r"""Maximum time between requests. Small values could cause the payload size to be smaller than the configured Max record size."""
268
+
269
+ max_in_progress: Annotated[
270
+ Optional[float], pydantic.Field(alias="maxInProgress")
271
+ ] = 10
272
+ r"""The maximum number of in-progress API requests before backpressure is applied."""
273
+
274
+ on_backpressure: Annotated[
275
+ Annotated[
276
+ Optional[OutputSqsBackpressureBehavior],
277
+ PlainValidator(validate_open_enum(False)),
278
+ ],
279
+ pydantic.Field(alias="onBackpressure"),
280
+ ] = OutputSqsBackpressureBehavior.BLOCK
281
+ r"""How to handle events when all receivers are exerting backpressure"""
282
+
283
+ description: Optional[str] = None
284
+
285
+ aws_api_key: Annotated[Optional[str], pydantic.Field(alias="awsApiKey")] = None
286
+
287
+ aws_secret: Annotated[Optional[str], pydantic.Field(alias="awsSecret")] = None
288
+ r"""Select or create a stored secret that references your access key and secret key"""
289
+
290
+ pq_max_file_size: Annotated[
291
+ Optional[str], pydantic.Field(alias="pqMaxFileSize")
292
+ ] = "1 MB"
293
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
294
+
295
+ pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
296
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
297
+
298
+ pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
299
+ "$CRIBL_HOME/state/queues"
300
+ )
301
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
302
+
303
+ pq_compress: Annotated[
304
+ Annotated[
305
+ Optional[OutputSqsCompression], PlainValidator(validate_open_enum(False))
306
+ ],
307
+ pydantic.Field(alias="pqCompress"),
308
+ ] = OutputSqsCompression.NONE
309
+ r"""Codec to use to compress the persisted data"""
310
+
311
+ pq_on_backpressure: Annotated[
312
+ Annotated[
313
+ Optional[OutputSqsQueueFullBehavior],
314
+ PlainValidator(validate_open_enum(False)),
315
+ ],
316
+ pydantic.Field(alias="pqOnBackpressure"),
317
+ ] = OutputSqsQueueFullBehavior.BLOCK
318
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
319
+
320
+ pq_mode: Annotated[
321
+ Annotated[Optional[OutputSqsMode], PlainValidator(validate_open_enum(False))],
322
+ pydantic.Field(alias="pqMode"),
323
+ ] = OutputSqsMode.ERROR
324
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
325
+
326
+ pq_controls: Annotated[
327
+ Optional[OutputSqsPqControls], pydantic.Field(alias="pqControls")
328
+ ] = None
@@ -0,0 +1,224 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from cribl_control_plane import utils
5
+ from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
7
+ from enum import Enum
8
+ import pydantic
9
+ from pydantic.functional_validators import PlainValidator
10
+ from typing import List, Optional
11
+ from typing_extensions import Annotated, NotRequired, TypedDict
12
+
13
+
14
+ class OutputStatsdType(str, Enum, metaclass=utils.OpenEnumMeta):
15
+ STATSD = "statsd"
16
+
17
+
18
+ class OutputStatsdDestinationProtocol(str, Enum, metaclass=utils.OpenEnumMeta):
19
+ r"""Protocol to use when communicating with the destination."""
20
+
21
+ UDP = "udp"
22
+ TCP = "tcp"
23
+
24
+
25
+ class OutputStatsdBackpressureBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
26
+ r"""How to handle events when all receivers are exerting backpressure"""
27
+
28
+ BLOCK = "block"
29
+ DROP = "drop"
30
+ QUEUE = "queue"
31
+
32
+
33
+ class OutputStatsdCompression(str, Enum, metaclass=utils.OpenEnumMeta):
34
+ r"""Codec to use to compress the persisted data"""
35
+
36
+ NONE = "none"
37
+ GZIP = "gzip"
38
+
39
+
40
+ class OutputStatsdQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
41
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
42
+
43
+ BLOCK = "block"
44
+ DROP = "drop"
45
+
46
+
47
+ class OutputStatsdMode(str, Enum, metaclass=utils.OpenEnumMeta):
48
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
49
+
50
+ ERROR = "error"
51
+ BACKPRESSURE = "backpressure"
52
+ ALWAYS = "always"
53
+
54
+
55
+ class OutputStatsdPqControlsTypedDict(TypedDict):
56
+ pass
57
+
58
+
59
+ class OutputStatsdPqControls(BaseModel):
60
+ pass
61
+
62
+
63
+ class OutputStatsdTypedDict(TypedDict):
64
+ host: str
65
+ r"""The hostname of the destination."""
66
+ id: NotRequired[str]
67
+ r"""Unique ID for this output"""
68
+ type: NotRequired[OutputStatsdType]
69
+ pipeline: NotRequired[str]
70
+ r"""Pipeline to process data before sending out to this output"""
71
+ system_fields: NotRequired[List[str]]
72
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
73
+ environment: NotRequired[str]
74
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
75
+ streamtags: NotRequired[List[str]]
76
+ r"""Tags for filtering and grouping in @{product}"""
77
+ protocol: NotRequired[OutputStatsdDestinationProtocol]
78
+ r"""Protocol to use when communicating with the destination."""
79
+ port: NotRequired[float]
80
+ r"""Destination port."""
81
+ mtu: NotRequired[float]
82
+ r"""When protocol is UDP, specifies the maximum size of packets sent to the destination. Also known as the MTU for the network path to the destination system."""
83
+ flush_period_sec: NotRequired[float]
84
+ r"""When protocol is TCP, specifies how often buffers should be flushed, resulting in records sent to the destination."""
85
+ dns_resolve_period_sec: NotRequired[float]
86
+ r"""How often to resolve the destination hostname to an IP address. Ignored if the destination is an IP address. A value of 0 means every batch sent will incur a DNS lookup."""
87
+ description: NotRequired[str]
88
+ throttle_rate_per_sec: NotRequired[str]
89
+ r"""Rate (in bytes per second) to throttle while writing to an output. Accepts values with multiple-byte units, such as KB, MB, and GB. (Example: 42 MB) Default value of 0 specifies no throttling."""
90
+ connection_timeout: NotRequired[float]
91
+ r"""Amount of time (milliseconds) to wait for the connection to establish before retrying"""
92
+ write_timeout: NotRequired[float]
93
+ r"""Amount of time (milliseconds) to wait for a write to complete before assuming connection is dead"""
94
+ on_backpressure: NotRequired[OutputStatsdBackpressureBehavior]
95
+ r"""How to handle events when all receivers are exerting backpressure"""
96
+ pq_max_file_size: NotRequired[str]
97
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
98
+ pq_max_size: NotRequired[str]
99
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
100
+ pq_path: NotRequired[str]
101
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
102
+ pq_compress: NotRequired[OutputStatsdCompression]
103
+ r"""Codec to use to compress the persisted data"""
104
+ pq_on_backpressure: NotRequired[OutputStatsdQueueFullBehavior]
105
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
106
+ pq_mode: NotRequired[OutputStatsdMode]
107
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
108
+ pq_controls: NotRequired[OutputStatsdPqControlsTypedDict]
109
+
110
+
111
+ class OutputStatsd(BaseModel):
112
+ host: str
113
+ r"""The hostname of the destination."""
114
+
115
+ id: Optional[str] = None
116
+ r"""Unique ID for this output"""
117
+
118
+ type: Annotated[
119
+ Optional[OutputStatsdType], PlainValidator(validate_open_enum(False))
120
+ ] = None
121
+
122
+ pipeline: Optional[str] = None
123
+ r"""Pipeline to process data before sending out to this output"""
124
+
125
+ system_fields: Annotated[
126
+ Optional[List[str]], pydantic.Field(alias="systemFields")
127
+ ] = None
128
+ r"""Fields to automatically add to events, such as cribl_pipe. Supports wildcards."""
129
+
130
+ environment: Optional[str] = None
131
+ r"""Optionally, enable this config only on a specified Git branch. If empty, will be enabled everywhere."""
132
+
133
+ streamtags: Optional[List[str]] = None
134
+ r"""Tags for filtering and grouping in @{product}"""
135
+
136
+ protocol: Annotated[
137
+ Optional[OutputStatsdDestinationProtocol],
138
+ PlainValidator(validate_open_enum(False)),
139
+ ] = OutputStatsdDestinationProtocol.UDP
140
+ r"""Protocol to use when communicating with the destination."""
141
+
142
+ port: Optional[float] = 8125
143
+ r"""Destination port."""
144
+
145
+ mtu: Optional[float] = 512
146
+ r"""When protocol is UDP, specifies the maximum size of packets sent to the destination. Also known as the MTU for the network path to the destination system."""
147
+
148
+ flush_period_sec: Annotated[
149
+ Optional[float], pydantic.Field(alias="flushPeriodSec")
150
+ ] = 1
151
+ r"""When protocol is TCP, specifies how often buffers should be flushed, resulting in records sent to the destination."""
152
+
153
+ dns_resolve_period_sec: Annotated[
154
+ Optional[float], pydantic.Field(alias="dnsResolvePeriodSec")
155
+ ] = 0
156
+ r"""How often to resolve the destination hostname to an IP address. Ignored if the destination is an IP address. A value of 0 means every batch sent will incur a DNS lookup."""
157
+
158
+ description: Optional[str] = None
159
+
160
+ throttle_rate_per_sec: Annotated[
161
+ Optional[str], pydantic.Field(alias="throttleRatePerSec")
162
+ ] = "0"
163
+ r"""Rate (in bytes per second) to throttle while writing to an output. Accepts values with multiple-byte units, such as KB, MB, and GB. (Example: 42 MB) Default value of 0 specifies no throttling."""
164
+
165
+ connection_timeout: Annotated[
166
+ Optional[float], pydantic.Field(alias="connectionTimeout")
167
+ ] = 10000
168
+ r"""Amount of time (milliseconds) to wait for the connection to establish before retrying"""
169
+
170
+ write_timeout: Annotated[Optional[float], pydantic.Field(alias="writeTimeout")] = (
171
+ 60000
172
+ )
173
+ r"""Amount of time (milliseconds) to wait for a write to complete before assuming connection is dead"""
174
+
175
+ on_backpressure: Annotated[
176
+ Annotated[
177
+ Optional[OutputStatsdBackpressureBehavior],
178
+ PlainValidator(validate_open_enum(False)),
179
+ ],
180
+ pydantic.Field(alias="onBackpressure"),
181
+ ] = OutputStatsdBackpressureBehavior.BLOCK
182
+ r"""How to handle events when all receivers are exerting backpressure"""
183
+
184
+ pq_max_file_size: Annotated[
185
+ Optional[str], pydantic.Field(alias="pqMaxFileSize")
186
+ ] = "1 MB"
187
+ r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
188
+
189
+ pq_max_size: Annotated[Optional[str], pydantic.Field(alias="pqMaxSize")] = "5GB"
190
+ r"""The maximum disk space that the queue can consume (as an average per Worker Process) before queueing stops. Enter a numeral with units of KB, MB, etc."""
191
+
192
+ pq_path: Annotated[Optional[str], pydantic.Field(alias="pqPath")] = (
193
+ "$CRIBL_HOME/state/queues"
194
+ )
195
+ r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
196
+
197
+ pq_compress: Annotated[
198
+ Annotated[
199
+ Optional[OutputStatsdCompression], PlainValidator(validate_open_enum(False))
200
+ ],
201
+ pydantic.Field(alias="pqCompress"),
202
+ ] = OutputStatsdCompression.NONE
203
+ r"""Codec to use to compress the persisted data"""
204
+
205
+ pq_on_backpressure: Annotated[
206
+ Annotated[
207
+ Optional[OutputStatsdQueueFullBehavior],
208
+ PlainValidator(validate_open_enum(False)),
209
+ ],
210
+ pydantic.Field(alias="pqOnBackpressure"),
211
+ ] = OutputStatsdQueueFullBehavior.BLOCK
212
+ r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
213
+
214
+ pq_mode: Annotated[
215
+ Annotated[
216
+ Optional[OutputStatsdMode], PlainValidator(validate_open_enum(False))
217
+ ],
218
+ pydantic.Field(alias="pqMode"),
219
+ ] = OutputStatsdMode.ERROR
220
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
221
+
222
+ pq_controls: Annotated[
223
+ Optional[OutputStatsdPqControls], pydantic.Field(alias="pqControls")
224
+ ] = None