cribl-control-plane 0.0.21__py3-none-any.whl → 0.4.0b23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (436) hide show
  1. cribl_control_plane/_hooks/clientcredentials.py +113 -48
  2. cribl_control_plane/_version.py +4 -4
  3. cribl_control_plane/acl.py +225 -0
  4. cribl_control_plane/auth_sdk.py +12 -176
  5. cribl_control_plane/basesdk.py +17 -1
  6. cribl_control_plane/branches.py +351 -0
  7. cribl_control_plane/commits.py +1403 -0
  8. cribl_control_plane/commits_files.py +391 -0
  9. cribl_control_plane/configs_versions.py +201 -0
  10. cribl_control_plane/cribl.py +495 -0
  11. cribl_control_plane/destinations.py +146 -805
  12. cribl_control_plane/destinations_pq.py +379 -0
  13. cribl_control_plane/errors/__init__.py +26 -10
  14. cribl_control_plane/errors/apierror.py +2 -0
  15. cribl_control_plane/errors/criblcontrolplaneerror.py +11 -7
  16. cribl_control_plane/errors/error.py +4 -2
  17. cribl_control_plane/errors/healthserverstatus_error.py +41 -0
  18. cribl_control_plane/errors/no_response_error.py +5 -1
  19. cribl_control_plane/errors/responsevalidationerror.py +2 -0
  20. cribl_control_plane/functions.py +367 -0
  21. cribl_control_plane/groups_configs.py +22 -0
  22. cribl_control_plane/groups_sdk.py +333 -578
  23. cribl_control_plane/health.py +38 -18
  24. cribl_control_plane/hectokens.py +503 -0
  25. cribl_control_plane/httpclient.py +0 -1
  26. cribl_control_plane/{lake.py → lakedatasets.py} +207 -115
  27. cribl_control_plane/models/__init__.py +3644 -5986
  28. cribl_control_plane/models/addhectokenrequest.py +7 -1
  29. cribl_control_plane/models/authtoken.py +5 -1
  30. cribl_control_plane/models/backupssettings_union.py +37 -0
  31. cribl_control_plane/models/{lookupversions.py → branchinfo.py} +4 -4
  32. cribl_control_plane/models/cacheconnection.py +30 -2
  33. cribl_control_plane/models/cacheconnectionbackfillstatus.py +2 -1
  34. cribl_control_plane/models/cloudprovider.py +2 -1
  35. cribl_control_plane/models/collectorazureblob.py +130 -0
  36. cribl_control_plane/models/collectorconf.py +56 -0
  37. cribl_control_plane/models/collectorcribllake.py +27 -0
  38. cribl_control_plane/models/collectordatabase.py +92 -0
  39. cribl_control_plane/models/collectorfilesystem.py +66 -0
  40. cribl_control_plane/models/collectorgooglecloudstorage.py +131 -0
  41. cribl_control_plane/models/collectorhealthcheck.py +269 -0
  42. cribl_control_plane/models/collectorrest.py +340 -0
  43. cribl_control_plane/models/collectors3.py +239 -0
  44. cribl_control_plane/models/collectorscript.py +59 -0
  45. cribl_control_plane/models/collectorsplunk.py +253 -0
  46. cribl_control_plane/models/configgroup.py +67 -11
  47. cribl_control_plane/models/configgroupcloud.py +17 -3
  48. cribl_control_plane/models/countedbranchinfo.py +20 -0
  49. cribl_control_plane/models/countedconfiggroup.py +20 -0
  50. cribl_control_plane/models/countedcribllakedataset.py +20 -0
  51. cribl_control_plane/models/counteddistributedsummary.py +20 -0
  52. cribl_control_plane/models/countedfunctionresponse.py +20 -0
  53. cribl_control_plane/models/countedgitcommitsummary.py +20 -0
  54. cribl_control_plane/models/countedgitcountresult.py +20 -0
  55. cribl_control_plane/models/countedgitdiffresult.py +20 -0
  56. cribl_control_plane/models/countedgitfilesresponse.py +20 -0
  57. cribl_control_plane/models/{getversioninfoop.py → countedgitinfo.py} +2 -6
  58. cribl_control_plane/models/countedgitlogresult.py +20 -0
  59. cribl_control_plane/models/countedgitrevertresult.py +20 -0
  60. cribl_control_plane/models/countedgitshowresult.py +20 -0
  61. cribl_control_plane/models/countedgitstatusresult.py +20 -0
  62. cribl_control_plane/models/{listinputop.py → countedinput.py} +2 -6
  63. cribl_control_plane/models/countedinputsplunkhec.py +20 -0
  64. cribl_control_plane/models/countedjobinfo.py +20 -0
  65. cribl_control_plane/models/countedmasterworkerentry.py +20 -0
  66. cribl_control_plane/models/countednumber.py +19 -0
  67. cribl_control_plane/models/{getversionbranchop.py → countedobject.py} +2 -6
  68. cribl_control_plane/models/{listoutputop.py → countedoutput.py} +2 -6
  69. cribl_control_plane/models/countedoutputsamplesresponse.py +20 -0
  70. cribl_control_plane/models/countedoutputtestresponse.py +20 -0
  71. cribl_control_plane/models/countedpackinfo.py +20 -0
  72. cribl_control_plane/models/{createpacksop.py → countedpackinstallinfo.py} +2 -6
  73. cribl_control_plane/models/{listpipelineop.py → countedpipeline.py} +2 -6
  74. cribl_control_plane/models/{listroutesop.py → countedroutes.py} +2 -6
  75. cribl_control_plane/models/countedstring.py +19 -0
  76. cribl_control_plane/models/countedsystemsettingsconf.py +20 -0
  77. cribl_control_plane/models/countedteamaccesscontrollist.py +20 -0
  78. cribl_control_plane/models/counteduseraccesscontrollist.py +20 -0
  79. cribl_control_plane/models/createauthloginop.py +18 -0
  80. cribl_control_plane/models/createconfiggroupbyproductop.py +46 -0
  81. cribl_control_plane/models/createcribllakedatasetbylakeidop.py +3 -21
  82. cribl_control_plane/models/createinputhectokenbyidop.py +3 -21
  83. cribl_control_plane/models/createoutputtestbyidop.py +3 -22
  84. cribl_control_plane/models/createroutesappendbyidop.py +4 -21
  85. cribl_control_plane/models/createversioncommitop.py +27 -20
  86. cribl_control_plane/models/createversionrevertop.py +7 -23
  87. cribl_control_plane/models/createversionundoop.py +7 -22
  88. cribl_control_plane/models/criblevent.py +15 -0
  89. cribl_control_plane/models/cribllakedataset.py +23 -3
  90. cribl_control_plane/models/cribllakedatasetupdate.py +95 -0
  91. cribl_control_plane/models/currentbranchresult.py +13 -0
  92. cribl_control_plane/models/datasetmetadata.py +18 -2
  93. cribl_control_plane/models/deleteconfiggroupbyproductandidop.py +43 -0
  94. cribl_control_plane/models/deletecribllakedatasetbylakeidandidop.py +5 -24
  95. cribl_control_plane/models/deleteinputbyidop.py +3 -22
  96. cribl_control_plane/models/deleteoutputbyidop.py +3 -22
  97. cribl_control_plane/models/deleteoutputpqbyidop.py +3 -21
  98. cribl_control_plane/models/deletepacksbyidop.py +3 -22
  99. cribl_control_plane/models/deletepipelinebyidop.py +3 -22
  100. cribl_control_plane/models/difffiles.py +130 -0
  101. cribl_control_plane/models/diffline.py +26 -0
  102. cribl_control_plane/models/difflinecontext.py +28 -0
  103. cribl_control_plane/models/difflinedelete.py +25 -0
  104. cribl_control_plane/models/difflineinsert.py +25 -0
  105. cribl_control_plane/models/distributedsummary.py +6 -0
  106. cribl_control_plane/models/functionaggregatemetrics.py +206 -0
  107. cribl_control_plane/models/functionaggregation.py +172 -0
  108. cribl_control_plane/models/functionautotimestamp.py +173 -0
  109. cribl_control_plane/models/functioncef.py +111 -0
  110. cribl_control_plane/models/functionchain.py +75 -0
  111. cribl_control_plane/models/functionclone.py +75 -0
  112. cribl_control_plane/models/functioncode.py +96 -0
  113. cribl_control_plane/models/functioncomment.py +75 -0
  114. cribl_control_plane/models/functiondistinct.py +99 -0
  115. cribl_control_plane/models/functiondnslookup.py +250 -0
  116. cribl_control_plane/models/functiondrop.py +73 -0
  117. cribl_control_plane/models/functiondropdimensions.py +87 -0
  118. cribl_control_plane/models/functiondynamicsampling.py +121 -0
  119. cribl_control_plane/models/functioneval.py +103 -0
  120. cribl_control_plane/models/functioneventbreaker.py +103 -0
  121. cribl_control_plane/models/functioneventstats.py +92 -0
  122. cribl_control_plane/models/functionexternaldata.py +73 -0
  123. cribl_control_plane/models/functionflatten.py +90 -0
  124. cribl_control_plane/models/functionfoldkeys.py +89 -0
  125. cribl_control_plane/models/functiongenstats.py +73 -0
  126. cribl_control_plane/models/functiongeoip.py +120 -0
  127. cribl_control_plane/models/functiongrok.py +95 -0
  128. cribl_control_plane/models/functionhandlebar.py +112 -0
  129. cribl_control_plane/models/functionjoin.py +112 -0
  130. cribl_control_plane/models/functionjsonunroll.py +80 -0
  131. cribl_control_plane/models/functionlakeexport.py +102 -0
  132. cribl_control_plane/models/functionlimit.py +75 -0
  133. cribl_control_plane/models/functionlocalsearchdatatypeparser.py +76 -0
  134. cribl_control_plane/models/functionlocalsearchrulesetrunner.py +97 -0
  135. cribl_control_plane/models/functionlookup.py +148 -0
  136. cribl_control_plane/models/functionmask.py +121 -0
  137. cribl_control_plane/models/functionmvexpand.py +128 -0
  138. cribl_control_plane/models/functionmvpull.py +99 -0
  139. cribl_control_plane/models/functionnotificationpolicies.py +186 -0
  140. cribl_control_plane/models/functionnotifications.py +85 -0
  141. cribl_control_plane/models/functionnotify.py +196 -0
  142. cribl_control_plane/models/functionnumerify.py +119 -0
  143. cribl_control_plane/models/functionotlplogs.py +82 -0
  144. cribl_control_plane/models/functionotlpmetrics.py +118 -0
  145. cribl_control_plane/models/functionotlptraces.py +111 -0
  146. cribl_control_plane/models/functionpack.py +80 -0
  147. cribl_control_plane/models/functionpivot.py +85 -0
  148. cribl_control_plane/models/functionpublishmetrics.py +153 -0
  149. cribl_control_plane/models/functionredis.py +173 -0
  150. cribl_control_plane/models/functionregexextract.py +112 -0
  151. cribl_control_plane/models/functionregexfilter.py +95 -0
  152. cribl_control_plane/models/functionrename.py +107 -0
  153. cribl_control_plane/models/functionresponse.py +242 -0
  154. cribl_control_plane/models/functionrollupmetrics.py +114 -0
  155. cribl_control_plane/models/functionsampling.py +90 -0
  156. cribl_control_plane/models/functionsend.py +141 -0
  157. cribl_control_plane/models/functionsensitivedatascanner.py +128 -0
  158. cribl_control_plane/models/functionserde.py +161 -0
  159. cribl_control_plane/models/functionserialize.py +134 -0
  160. cribl_control_plane/models/functionsidlookup.py +93 -0
  161. cribl_control_plane/models/functionsnmptrapserialize.py +144 -0
  162. cribl_control_plane/models/functionsort.py +97 -0
  163. cribl_control_plane/models/functionstore.py +132 -0
  164. cribl_control_plane/models/functionsuppress.py +115 -0
  165. cribl_control_plane/models/functiontee.py +90 -0
  166. cribl_control_plane/models/functiontrimtimestamp.py +75 -0
  167. cribl_control_plane/models/functionunion.py +80 -0
  168. cribl_control_plane/models/functionunroll.py +80 -0
  169. cribl_control_plane/models/functionwindow.py +96 -0
  170. cribl_control_plane/models/functionxmlunroll.py +92 -0
  171. cribl_control_plane/models/getconfiggroupaclbyproductandidop.py +63 -0
  172. cribl_control_plane/models/getconfiggroupaclteamsbyproductandidop.py +63 -0
  173. cribl_control_plane/models/getconfiggroupbyproductandidop.py +53 -0
  174. cribl_control_plane/models/getconfiggroupconfigversionbyproductandidop.py +43 -0
  175. cribl_control_plane/models/getcribllakedatasetbylakeidandidop.py +5 -24
  176. cribl_control_plane/models/getcribllakedatasetbylakeidop.py +56 -16
  177. cribl_control_plane/models/getfunctionsbyidop.py +18 -0
  178. cribl_control_plane/models/getinputbyidop.py +3 -22
  179. cribl_control_plane/models/getmasterworkerentryop.py +22 -0
  180. cribl_control_plane/models/getoutputbyidop.py +3 -22
  181. cribl_control_plane/models/getoutputpqbyidop.py +3 -21
  182. cribl_control_plane/models/getoutputsamplesbyidop.py +3 -22
  183. cribl_control_plane/models/getpacksbyidop.py +18 -0
  184. cribl_control_plane/models/getpacksop.py +3 -21
  185. cribl_control_plane/models/getpipelinebyidop.py +3 -22
  186. cribl_control_plane/models/getroutesbyidop.py +3 -22
  187. cribl_control_plane/models/getsummaryop.py +23 -32
  188. cribl_control_plane/models/getversioncountop.py +10 -27
  189. cribl_control_plane/models/getversiondiffop.py +12 -28
  190. cribl_control_plane/models/getversionfilesop.py +10 -28
  191. cribl_control_plane/models/getversionop.py +30 -0
  192. cribl_control_plane/models/getversionshowop.py +12 -28
  193. cribl_control_plane/models/getversionstatusop.py +7 -23
  194. cribl_control_plane/models/gitcommitsummary.py +3 -3
  195. cribl_control_plane/models/{routecloneconf.py → gitcountresult.py} +4 -4
  196. cribl_control_plane/models/gitdiffresult.py +16 -0
  197. cribl_control_plane/models/gitfilesresponse.py +7 -5
  198. cribl_control_plane/models/gitinfo.py +14 -3
  199. cribl_control_plane/models/gitlogresult.py +33 -0
  200. cribl_control_plane/models/gitrevertparams.py +3 -3
  201. cribl_control_plane/models/gitrevertresult.py +5 -5
  202. cribl_control_plane/models/gitshowresult.py +19 -0
  203. cribl_control_plane/models/gitstatusresult.py +0 -3
  204. cribl_control_plane/models/groupcreaterequest.py +172 -0
  205. cribl_control_plane/models/hbcriblinfo.py +42 -7
  206. cribl_control_plane/models/healthserverstatus.py +55 -0
  207. cribl_control_plane/models/heartbeatmetadata.py +6 -11
  208. cribl_control_plane/models/input.py +89 -81
  209. cribl_control_plane/models/inputappscope.py +131 -35
  210. cribl_control_plane/models/inputazureblob.py +62 -6
  211. cribl_control_plane/models/inputcloudflarehec.py +518 -0
  212. cribl_control_plane/models/inputcollection.py +49 -6
  213. cribl_control_plane/models/inputconfluentcloud.py +262 -22
  214. cribl_control_plane/models/inputcribl.py +52 -9
  215. cribl_control_plane/models/inputcriblhttp.py +124 -33
  216. cribl_control_plane/models/inputcribllakehttp.py +199 -29
  217. cribl_control_plane/models/inputcriblmetrics.py +53 -9
  218. cribl_control_plane/models/inputcribltcp.py +125 -27
  219. cribl_control_plane/models/inputcrowdstrike.py +99 -10
  220. cribl_control_plane/models/inputdatadogagent.py +101 -27
  221. cribl_control_plane/models/inputdatagen.py +47 -4
  222. cribl_control_plane/models/inputedgeprometheus.py +215 -58
  223. cribl_control_plane/models/inputelastic.py +170 -39
  224. cribl_control_plane/models/inputeventhub.py +212 -9
  225. cribl_control_plane/models/inputexec.py +59 -6
  226. cribl_control_plane/models/inputfile.py +83 -15
  227. cribl_control_plane/models/inputfirehose.py +100 -27
  228. cribl_control_plane/models/inputgooglepubsub.py +83 -15
  229. cribl_control_plane/models/{inputgrafana_union.py → inputgrafana.py} +261 -67
  230. cribl_control_plane/models/inputhttp.py +100 -27
  231. cribl_control_plane/models/inputhttpraw.py +100 -27
  232. cribl_control_plane/models/inputjournalfiles.py +51 -7
  233. cribl_control_plane/models/inputkafka.py +257 -19
  234. cribl_control_plane/models/inputkinesis.py +133 -17
  235. cribl_control_plane/models/inputkubeevents.py +52 -9
  236. cribl_control_plane/models/inputkubelogs.py +66 -13
  237. cribl_control_plane/models/inputkubemetrics.py +66 -13
  238. cribl_control_plane/models/inputloki.py +116 -30
  239. cribl_control_plane/models/inputmetrics.py +97 -24
  240. cribl_control_plane/models/inputmodeldriventelemetry.py +110 -29
  241. cribl_control_plane/models/inputmsk.py +148 -21
  242. cribl_control_plane/models/inputnetflow.py +50 -7
  243. cribl_control_plane/models/inputoffice365mgmt.py +115 -17
  244. cribl_control_plane/models/inputoffice365msgtrace.py +117 -19
  245. cribl_control_plane/models/inputoffice365service.py +117 -19
  246. cribl_control_plane/models/inputopentelemetry.py +146 -35
  247. cribl_control_plane/models/inputprometheus.py +196 -47
  248. cribl_control_plane/models/inputprometheusrw.py +117 -30
  249. cribl_control_plane/models/inputrawudp.py +50 -7
  250. cribl_control_plane/models/inputs3.py +85 -8
  251. cribl_control_plane/models/inputs3inventory.py +99 -10
  252. cribl_control_plane/models/inputsecuritylake.py +100 -10
  253. cribl_control_plane/models/inputsnmp.py +115 -24
  254. cribl_control_plane/models/inputsplunk.py +133 -31
  255. cribl_control_plane/models/inputsplunkhec.py +122 -32
  256. cribl_control_plane/models/inputsplunksearch.py +115 -18
  257. cribl_control_plane/models/inputsqs.py +102 -19
  258. cribl_control_plane/models/{inputsyslog_union.py → inputsyslog.py} +193 -51
  259. cribl_control_plane/models/inputsystemmetrics.py +207 -37
  260. cribl_control_plane/models/inputsystemstate.py +66 -13
  261. cribl_control_plane/models/inputtcp.py +125 -29
  262. cribl_control_plane/models/inputtcpjson.py +115 -29
  263. cribl_control_plane/models/inputwef.py +151 -22
  264. cribl_control_plane/models/inputwindowsmetrics.py +191 -38
  265. cribl_control_plane/models/inputwineventlogs.py +93 -11
  266. cribl_control_plane/models/inputwiz.py +176 -11
  267. cribl_control_plane/models/inputwizwebhook.py +466 -0
  268. cribl_control_plane/models/inputzscalerhec.py +122 -32
  269. cribl_control_plane/models/jobinfo.py +34 -0
  270. cribl_control_plane/models/jobstatus.py +48 -0
  271. cribl_control_plane/models/lakedatasetmetrics.py +17 -0
  272. cribl_control_plane/models/lakehouseconnectiontype.py +2 -1
  273. cribl_control_plane/models/listconfiggroupbyproductop.py +46 -0
  274. cribl_control_plane/models/listmasterworkerentryop.py +64 -0
  275. cribl_control_plane/models/logininfo.py +3 -3
  276. cribl_control_plane/models/masterworkerentry.py +20 -13
  277. cribl_control_plane/models/nodeactiveupgradestatus.py +2 -1
  278. cribl_control_plane/models/nodefailedupgradestatus.py +2 -1
  279. cribl_control_plane/models/nodeprovidedinfo.py +13 -11
  280. cribl_control_plane/models/nodeskippedupgradestatus.py +2 -1
  281. cribl_control_plane/models/nodeupgradestate.py +2 -1
  282. cribl_control_plane/models/nodeupgradestatus.py +51 -5
  283. cribl_control_plane/models/outpostnodeinfo.py +16 -0
  284. cribl_control_plane/models/output.py +103 -89
  285. cribl_control_plane/models/outputazureblob.py +174 -21
  286. cribl_control_plane/models/outputazuredataexplorer.py +517 -93
  287. cribl_control_plane/models/outputazureeventhub.py +318 -34
  288. cribl_control_plane/models/outputazurelogs.py +145 -26
  289. cribl_control_plane/models/outputchronicle.py +532 -0
  290. cribl_control_plane/models/outputclickhouse.py +208 -37
  291. cribl_control_plane/models/outputcloudflarer2.py +632 -0
  292. cribl_control_plane/models/outputcloudwatch.py +132 -26
  293. cribl_control_plane/models/outputconfluentcloud.py +387 -46
  294. cribl_control_plane/models/outputcriblhttp.py +203 -36
  295. cribl_control_plane/models/outputcribllake.py +161 -21
  296. cribl_control_plane/models/outputcribltcp.py +199 -34
  297. cribl_control_plane/models/outputcrowdstrikenextgensiem.py +176 -32
  298. cribl_control_plane/models/outputdatabricks.py +501 -0
  299. cribl_control_plane/models/outputdatadog.py +204 -36
  300. cribl_control_plane/models/outputdataset.py +186 -34
  301. cribl_control_plane/models/outputdevnull.py +5 -5
  302. cribl_control_plane/models/outputdiskspool.py +22 -7
  303. cribl_control_plane/models/outputdls3.py +238 -29
  304. cribl_control_plane/models/outputdynatracehttp.py +211 -37
  305. cribl_control_plane/models/outputdynatraceotlp.py +213 -39
  306. cribl_control_plane/models/outputelastic.py +199 -30
  307. cribl_control_plane/models/outputelasticcloud.py +174 -29
  308. cribl_control_plane/models/outputexabeam.py +99 -13
  309. cribl_control_plane/models/outputfilesystem.py +139 -14
  310. cribl_control_plane/models/outputgooglechronicle.py +216 -35
  311. cribl_control_plane/models/outputgooglecloudlogging.py +177 -34
  312. cribl_control_plane/models/outputgooglecloudstorage.py +220 -29
  313. cribl_control_plane/models/outputgooglepubsub.py +138 -51
  314. cribl_control_plane/models/outputgrafanacloud.py +386 -70
  315. cribl_control_plane/models/outputgraphite.py +131 -28
  316. cribl_control_plane/models/outputhoneycomb.py +145 -26
  317. cribl_control_plane/models/outputhumiohec.py +165 -31
  318. cribl_control_plane/models/outputinfluxdb.py +165 -28
  319. cribl_control_plane/models/outputkafka.py +378 -41
  320. cribl_control_plane/models/outputkinesis.py +168 -30
  321. cribl_control_plane/models/outputloki.py +171 -27
  322. cribl_control_plane/models/outputmicrosoftfabric.py +540 -0
  323. cribl_control_plane/models/outputminio.py +228 -28
  324. cribl_control_plane/models/outputmsk.py +270 -43
  325. cribl_control_plane/models/outputnewrelic.py +176 -34
  326. cribl_control_plane/models/outputnewrelicevents.py +166 -31
  327. cribl_control_plane/models/outputopentelemetry.py +240 -40
  328. cribl_control_plane/models/outputprometheus.py +145 -26
  329. cribl_control_plane/models/outputring.py +54 -13
  330. cribl_control_plane/models/outputs3.py +238 -31
  331. cribl_control_plane/models/outputsecuritylake.py +182 -21
  332. cribl_control_plane/models/outputsentinel.py +175 -32
  333. cribl_control_plane/models/outputsentineloneaisiem.py +184 -38
  334. cribl_control_plane/models/outputservicenow.py +226 -41
  335. cribl_control_plane/models/outputsignalfx.py +145 -26
  336. cribl_control_plane/models/outputsns.py +146 -28
  337. cribl_control_plane/models/outputsplunk.py +209 -39
  338. cribl_control_plane/models/outputsplunkhec.py +243 -31
  339. cribl_control_plane/models/outputsplunklb.py +266 -46
  340. cribl_control_plane/models/outputsqs.py +166 -36
  341. cribl_control_plane/models/outputstatsd.py +130 -28
  342. cribl_control_plane/models/outputstatsdext.py +131 -28
  343. cribl_control_plane/models/outputsumologic.py +146 -25
  344. cribl_control_plane/models/outputsyslog.py +323 -51
  345. cribl_control_plane/models/outputtcpjson.py +191 -37
  346. cribl_control_plane/models/outputwavefront.py +145 -26
  347. cribl_control_plane/models/outputwebhook.py +216 -38
  348. cribl_control_plane/models/outputxsiam.py +148 -31
  349. cribl_control_plane/models/packinfo.py +11 -8
  350. cribl_control_plane/models/packinstallinfo.py +14 -11
  351. cribl_control_plane/models/packrequestbody_union.py +140 -0
  352. cribl_control_plane/models/packupgraderequest.py +26 -0
  353. cribl_control_plane/models/piisettings_union.py +31 -0
  354. cribl_control_plane/models/productscore.py +10 -0
  355. cribl_control_plane/models/rbacresource.py +2 -1
  356. cribl_control_plane/models/resourcepolicy.py +15 -2
  357. cribl_control_plane/models/rollbacksettings_union.py +44 -0
  358. cribl_control_plane/models/routeconf.py +3 -4
  359. cribl_control_plane/models/routes.py +0 -24
  360. cribl_control_plane/models/runnablejob.py +27 -0
  361. cribl_control_plane/models/runnablejobcollection.py +628 -0
  362. cribl_control_plane/models/runnablejobexecutor.py +360 -0
  363. cribl_control_plane/models/runnablejobscheduledsearch.py +279 -0
  364. cribl_control_plane/models/schemeclientoauth.py +5 -0
  365. cribl_control_plane/models/snisettings_union.py +31 -0
  366. cribl_control_plane/models/systemsettingsconf.py +291 -0
  367. cribl_control_plane/models/tlssettings_union.py +43 -0
  368. cribl_control_plane/models/updateconfiggroupbyproductandidop.py +56 -0
  369. cribl_control_plane/models/updateconfiggroupdeploybyproductandidop.py +53 -0
  370. cribl_control_plane/models/updatecribllakedatasetbylakeidandidop.py +14 -29
  371. cribl_control_plane/models/updatehectokenrequest.py +7 -1
  372. cribl_control_plane/models/updateinputbyidop.py +5 -23
  373. cribl_control_plane/models/updateinputhectokenbyidandtokenop.py +5 -23
  374. cribl_control_plane/models/updateoutputbyidop.py +5 -23
  375. cribl_control_plane/models/updatepacksbyidop.py +12 -50
  376. cribl_control_plane/models/updatepacksop.py +12 -24
  377. cribl_control_plane/models/updatepipelinebyidop.py +5 -23
  378. cribl_control_plane/models/updateroutesbyidop.py +8 -27
  379. cribl_control_plane/models/upgradegroupsettings_union.py +43 -0
  380. cribl_control_plane/models/upgradepackageurls.py +20 -0
  381. cribl_control_plane/models/upgradesettings.py +38 -0
  382. cribl_control_plane/models/uploadpackresponse.py +13 -0
  383. cribl_control_plane/models/{appmode.py → workertypes.py} +2 -5
  384. cribl_control_plane/{workers_sdk.py → nodes.py} +102 -234
  385. cribl_control_plane/packs.py +385 -184
  386. cribl_control_plane/pipelines.py +116 -66
  387. cribl_control_plane/routes_sdk.py +102 -70
  388. cribl_control_plane/samples.py +407 -0
  389. cribl_control_plane/sdk.py +35 -25
  390. cribl_control_plane/settings.py +20 -0
  391. cribl_control_plane/sources.py +143 -545
  392. cribl_control_plane/statuses.py +195 -0
  393. cribl_control_plane/summaries.py +195 -0
  394. cribl_control_plane/system_sdk.py +20 -0
  395. cribl_control_plane/teams.py +36 -28
  396. cribl_control_plane/tokens.py +210 -0
  397. cribl_control_plane/utils/__init__.py +18 -5
  398. cribl_control_plane/utils/annotations.py +32 -8
  399. cribl_control_plane/utils/eventstreaming.py +10 -0
  400. cribl_control_plane/utils/forms.py +21 -10
  401. cribl_control_plane/utils/queryparams.py +14 -2
  402. cribl_control_plane/utils/retries.py +69 -5
  403. cribl_control_plane/utils/security.py +5 -0
  404. cribl_control_plane/utils/unmarshal_json_response.py +15 -1
  405. cribl_control_plane/versions.py +31 -0
  406. cribl_control_plane/{distributed.py → versions_configs.py} +29 -35
  407. cribl_control_plane-0.4.0b23.dist-info/METADATA +855 -0
  408. cribl_control_plane-0.4.0b23.dist-info/RECORD +450 -0
  409. {cribl_control_plane-0.0.21.dist-info → cribl_control_plane-0.4.0b23.dist-info}/WHEEL +1 -1
  410. cribl_control_plane-0.4.0b23.dist-info/licenses/LICENSE +201 -0
  411. cribl_control_plane/errors/healthstatus_error.py +0 -32
  412. cribl_control_plane/models/createinputop.py +0 -18238
  413. cribl_control_plane/models/createoutputop.py +0 -18437
  414. cribl_control_plane/models/createpipelineop.py +0 -24
  415. cribl_control_plane/models/createproductsgroupsbyproductop.py +0 -54
  416. cribl_control_plane/models/createversionpushop.py +0 -23
  417. cribl_control_plane/models/createversionsyncop.py +0 -23
  418. cribl_control_plane/models/deletegroupsbyidop.py +0 -37
  419. cribl_control_plane/models/getgroupsaclbyidop.py +0 -63
  420. cribl_control_plane/models/getgroupsbyidop.py +0 -49
  421. cribl_control_plane/models/getgroupsconfigversionbyidop.py +0 -36
  422. cribl_control_plane/models/getproductsgroupsaclteamsbyproductandidop.py +0 -78
  423. cribl_control_plane/models/getproductsgroupsbyproductop.py +0 -58
  424. cribl_control_plane/models/getsummaryworkersop.py +0 -39
  425. cribl_control_plane/models/getversioncurrentbranchop.py +0 -23
  426. cribl_control_plane/models/getworkersop.py +0 -82
  427. cribl_control_plane/models/healthstatus.py +0 -33
  428. cribl_control_plane/models/packrequestbody.py +0 -75
  429. cribl_control_plane/models/restartresponse.py +0 -26
  430. cribl_control_plane/models/routesroute_input.py +0 -67
  431. cribl_control_plane/models/updategroupsbyidop.py +0 -48
  432. cribl_control_plane/models/updategroupsdeploybyidop.py +0 -46
  433. cribl_control_plane/models/updateworkersrestartop.py +0 -24
  434. cribl_control_plane/versioning.py +0 -2309
  435. cribl_control_plane-0.0.21.dist-info/METADATA +0 -561
  436. cribl_control_plane-0.0.21.dist-info/RECORD +0 -301
@@ -1,9 +1,13 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from cribl_control_plane import models, utils
4
5
  from cribl_control_plane.types import BaseModel
6
+ from cribl_control_plane.utils import validate_open_enum
5
7
  from enum import Enum
6
8
  import pydantic
9
+ from pydantic import field_serializer
10
+ from pydantic.functional_validators import PlainValidator
7
11
  from typing import List, Optional
8
12
  from typing_extensions import Annotated, NotRequired, TypedDict
9
13
 
@@ -12,12 +16,16 @@ class OutputAzureDataExplorerType(str, Enum):
12
16
  AZURE_DATA_EXPLORER = "azure_data_explorer"
13
17
 
14
18
 
15
- class IngestionMode(str, Enum):
19
+ class IngestionMode(str, Enum, metaclass=utils.OpenEnumMeta):
20
+ # Batching
16
21
  BATCHING = "batching"
22
+ # Streaming
17
23
  STREAMING = "streaming"
18
24
 
19
25
 
20
- class MicrosoftEntraIDAuthenticationEndpoint(str, Enum):
26
+ class OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint(
27
+ str, Enum, metaclass=utils.OpenEnumMeta
28
+ ):
21
29
  r"""Endpoint used to acquire authentication tokens from Azure"""
22
30
 
23
31
  HTTPS_LOGIN_MICROSOFTONLINE_COM = "https://login.microsoftonline.com"
@@ -25,11 +33,16 @@ class MicrosoftEntraIDAuthenticationEndpoint(str, Enum):
25
33
  HTTPS_LOGIN_PARTNER_MICROSOFTONLINE_CN = "https://login.partner.microsoftonline.cn"
26
34
 
27
35
 
28
- class OutputAzureDataExplorerAuthenticationMethod(str, Enum):
36
+ class OutputAzureDataExplorerAuthenticationMethod(
37
+ str, Enum, metaclass=utils.OpenEnumMeta
38
+ ):
29
39
  r"""The type of OAuth 2.0 client credentials grant flow to use"""
30
40
 
41
+ # Client secret
31
42
  CLIENT_SECRET = "clientSecret"
43
+ # Client secret (text secret)
32
44
  CLIENT_TEXT_SECRET = "clientTextSecret"
45
+ # Certificate
33
46
  CERTIFICATE = "certificate"
34
47
 
35
48
 
@@ -45,31 +58,96 @@ class OutputAzureDataExplorerCertificate(BaseModel):
45
58
  r"""The certificate you registered as credentials for your app in the Azure portal"""
46
59
 
47
60
 
48
- class OutputAzureDataExplorerBackpressureBehavior(str, Enum):
49
- r"""How to handle events when all receivers are exerting backpressure"""
50
-
51
- BLOCK = "block"
52
- DROP = "drop"
53
- QUEUE = "queue"
54
-
55
-
56
- class OutputAzureDataExplorerDataFormat(str, Enum):
61
+ class OutputAzureDataExplorerDataFormat(str, Enum, metaclass=utils.OpenEnumMeta):
57
62
  r"""Format of the output data"""
58
63
 
64
+ # JSON
59
65
  JSON = "json"
66
+ # Raw
60
67
  RAW = "raw"
68
+ # Parquet
61
69
  PARQUET = "parquet"
62
70
 
63
71
 
64
- class OutputAzureDataExplorerDiskSpaceProtection(str, Enum):
72
+ class OutputAzureDataExplorerCompressCompression(
73
+ str, Enum, metaclass=utils.OpenEnumMeta
74
+ ):
75
+ r"""Data compression format to apply to HTTP content before it is delivered"""
76
+
77
+ NONE = "none"
78
+ GZIP = "gzip"
79
+
80
+
81
+ class OutputAzureDataExplorerCompressionLevel(str, Enum, metaclass=utils.OpenEnumMeta):
82
+ r"""Compression level to apply before moving files to final destination"""
83
+
84
+ # Best Speed
85
+ BEST_SPEED = "best_speed"
86
+ # Normal
87
+ NORMAL = "normal"
88
+ # Best Compression
89
+ BEST_COMPRESSION = "best_compression"
90
+
91
+
92
+ class OutputAzureDataExplorerParquetVersion(str, Enum, metaclass=utils.OpenEnumMeta):
93
+ r"""Determines which data types are supported and how they are represented"""
94
+
95
+ # 1.0
96
+ PARQUET_1_0 = "PARQUET_1_0"
97
+ # 2.4
98
+ PARQUET_2_4 = "PARQUET_2_4"
99
+ # 2.6
100
+ PARQUET_2_6 = "PARQUET_2_6"
101
+
102
+
103
+ class OutputAzureDataExplorerDataPageVersion(str, Enum, metaclass=utils.OpenEnumMeta):
104
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
105
+
106
+ # V1
107
+ DATA_PAGE_V1 = "DATA_PAGE_V1"
108
+ # V2
109
+ DATA_PAGE_V2 = "DATA_PAGE_V2"
110
+
111
+
112
+ class OutputAzureDataExplorerKeyValueMetadatumTypedDict(TypedDict):
113
+ value: str
114
+ key: NotRequired[str]
115
+
116
+
117
+ class OutputAzureDataExplorerKeyValueMetadatum(BaseModel):
118
+ value: str
119
+
120
+ key: Optional[str] = ""
121
+
122
+
123
+ class OutputAzureDataExplorerBackpressureBehavior(
124
+ str, Enum, metaclass=utils.OpenEnumMeta
125
+ ):
126
+ r"""How to handle events when all receivers are exerting backpressure"""
127
+
128
+ # Block
129
+ BLOCK = "block"
130
+ # Drop
131
+ DROP = "drop"
132
+ # Persistent Queue
133
+ QUEUE = "queue"
134
+
135
+
136
+ class OutputAzureDataExplorerDiskSpaceProtection(
137
+ str, Enum, metaclass=utils.OpenEnumMeta
138
+ ):
65
139
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
66
140
 
141
+ # Block
67
142
  BLOCK = "block"
143
+ # Drop
68
144
  DROP = "drop"
69
145
 
70
146
 
71
- class PrefixOptional(str, Enum):
147
+ class PrefixOptional(str, Enum, metaclass=utils.OpenEnumMeta):
148
+ # drop-by
72
149
  DROP_BY = "dropBy"
150
+ # ingest-by
73
151
  INGEST_BY = "ingestBy"
74
152
 
75
153
 
@@ -81,7 +159,18 @@ class ExtentTagTypedDict(TypedDict):
81
159
  class ExtentTag(BaseModel):
82
160
  value: str
83
161
 
84
- prefix: Optional[PrefixOptional] = None
162
+ prefix: Annotated[
163
+ Optional[PrefixOptional], PlainValidator(validate_open_enum(False))
164
+ ] = None
165
+
166
+ @field_serializer("prefix")
167
+ def serialize_prefix(self, value):
168
+ if isinstance(value, str):
169
+ try:
170
+ return models.PrefixOptional(value)
171
+ except ValueError:
172
+ return value
173
+ return value
85
174
 
86
175
 
87
176
  class IngestIfNotExistTypedDict(TypedDict):
@@ -92,19 +181,25 @@ class IngestIfNotExist(BaseModel):
92
181
  value: str
93
182
 
94
183
 
95
- class ReportLevel(str, Enum):
184
+ class ReportLevel(str, Enum, metaclass=utils.OpenEnumMeta):
96
185
  r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
97
186
 
187
+ # FailuresOnly
98
188
  FAILURES_ONLY = "failuresOnly"
189
+ # DoNotReport
99
190
  DO_NOT_REPORT = "doNotReport"
191
+ # FailuresAndSuccesses
100
192
  FAILURES_AND_SUCCESSES = "failuresAndSuccesses"
101
193
 
102
194
 
103
- class ReportMethod(str, Enum):
195
+ class ReportMethod(str, Enum, metaclass=utils.OpenEnumMeta):
104
196
  r"""Target of the ingestion status reporting. Defaults to Queue."""
105
197
 
198
+ # Queue
106
199
  QUEUE = "queue"
200
+ # Table
107
201
  TABLE = "table"
202
+ # QueueAndTable
108
203
  QUEUE_AND_TABLE = "queueAndTable"
109
204
 
110
205
 
@@ -173,35 +268,37 @@ class OutputAzureDataExplorerTimeoutRetrySettings(BaseModel):
173
268
  r"""The maximum backoff interval, in milliseconds, Cribl Stream should apply. Default (and minimum) is 10,000 ms (10 seconds); maximum is 180,000 ms (180 seconds)."""
174
269
 
175
270
 
176
- class OutputAzureDataExplorerCompressCompression(str, Enum):
177
- r"""Data compression format to apply to HTTP content before it is delivered"""
271
+ class OutputAzureDataExplorerMode(str, Enum, metaclass=utils.OpenEnumMeta):
272
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
178
273
 
179
- NONE = "none"
180
- GZIP = "gzip"
274
+ # Error
275
+ ERROR = "error"
276
+ # Backpressure
277
+ ALWAYS = "always"
278
+ # Always On
279
+ BACKPRESSURE = "backpressure"
181
280
 
182
281
 
183
- class OutputAzureDataExplorerPqCompressCompression(str, Enum):
282
+ class OutputAzureDataExplorerPqCompressCompression(
283
+ str, Enum, metaclass=utils.OpenEnumMeta
284
+ ):
184
285
  r"""Codec to use to compress the persisted data"""
185
286
 
287
+ # None
186
288
  NONE = "none"
289
+ # Gzip
187
290
  GZIP = "gzip"
188
291
 
189
292
 
190
- class OutputAzureDataExplorerQueueFullBehavior(str, Enum):
293
+ class OutputAzureDataExplorerQueueFullBehavior(str, Enum, metaclass=utils.OpenEnumMeta):
191
294
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
192
295
 
296
+ # Block
193
297
  BLOCK = "block"
298
+ # Drop new data
194
299
  DROP = "drop"
195
300
 
196
301
 
197
- class OutputAzureDataExplorerMode(str, Enum):
198
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
199
-
200
- ERROR = "error"
201
- BACKPRESSURE = "backpressure"
202
- ALWAYS = "always"
203
-
204
-
205
302
  class OutputAzureDataExplorerPqControlsTypedDict(TypedDict):
206
303
  pass
207
304
 
@@ -211,6 +308,7 @@ class OutputAzureDataExplorerPqControls(BaseModel):
211
308
 
212
309
 
213
310
  class OutputAzureDataExplorerTypedDict(TypedDict):
311
+ type: OutputAzureDataExplorerType
214
312
  cluster_url: str
215
313
  r"""The base URI for your cluster. Typically, `https://<cluster>.<region>.kusto.windows.net`."""
216
314
  database: str
@@ -225,7 +323,6 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
225
323
  r"""Scope to pass in the OAuth request parameter"""
226
324
  id: NotRequired[str]
227
325
  r"""Unique ID for this output"""
228
- type: NotRequired[OutputAzureDataExplorerType]
229
326
  pipeline: NotRequired[str]
230
327
  r"""Pipeline to process data before sending out to this output"""
231
328
  system_fields: NotRequired[List[str]]
@@ -237,7 +334,9 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
237
334
  validate_database_settings: NotRequired[bool]
238
335
  r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
239
336
  ingest_mode: NotRequired[IngestionMode]
240
- oauth_endpoint: NotRequired[MicrosoftEntraIDAuthenticationEndpoint]
337
+ oauth_endpoint: NotRequired[
338
+ OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint
339
+ ]
241
340
  r"""Endpoint used to acquire authentication tokens from Azure"""
242
341
  oauth_type: NotRequired[OutputAzureDataExplorerAuthenticationMethod]
243
342
  r"""The type of OAuth 2.0 client credentials grant flow to use"""
@@ -247,14 +346,58 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
247
346
  text_secret: NotRequired[str]
248
347
  r"""Select or create a stored text secret"""
249
348
  certificate: NotRequired[OutputAzureDataExplorerCertificateTypedDict]
349
+ format_: NotRequired[OutputAzureDataExplorerDataFormat]
350
+ r"""Format of the output data"""
351
+ compress: NotRequired[OutputAzureDataExplorerCompressCompression]
352
+ r"""Data compression format to apply to HTTP content before it is delivered"""
353
+ compression_level: NotRequired[OutputAzureDataExplorerCompressionLevel]
354
+ r"""Compression level to apply before moving files to final destination"""
355
+ automatic_schema: NotRequired[bool]
356
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
357
+ parquet_schema: NotRequired[str]
358
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
359
+ parquet_version: NotRequired[OutputAzureDataExplorerParquetVersion]
360
+ r"""Determines which data types are supported and how they are represented"""
361
+ parquet_data_page_version: NotRequired[OutputAzureDataExplorerDataPageVersion]
362
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
363
+ parquet_row_group_length: NotRequired[float]
364
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
365
+ parquet_page_size: NotRequired[str]
366
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
367
+ should_log_invalid_rows: NotRequired[bool]
368
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
369
+ key_value_metadata: NotRequired[
370
+ List[OutputAzureDataExplorerKeyValueMetadatumTypedDict]
371
+ ]
372
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
373
+ enable_statistics: NotRequired[bool]
374
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
375
+ enable_write_page_index: NotRequired[bool]
376
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
377
+ enable_page_checksum: NotRequired[bool]
378
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
379
+ remove_empty_dirs: NotRequired[bool]
380
+ r"""Remove empty staging directories after moving files"""
381
+ empty_dir_cleanup_sec: NotRequired[float]
382
+ r"""How frequently, in seconds, to clean up empty directories"""
383
+ directory_batch_size: NotRequired[float]
384
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
385
+ deadletter_enabled: NotRequired[bool]
386
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
387
+ deadletter_path: NotRequired[str]
388
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
389
+ max_retry_num: NotRequired[float]
390
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
391
+ is_mapping_obj: NotRequired[bool]
392
+ r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
393
+ mapping_obj: NotRequired[str]
394
+ r"""Enter a JSON object that defines your desired data mapping"""
395
+ mapping_ref: NotRequired[str]
396
+ r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
250
397
  ingest_url: NotRequired[str]
251
398
  r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
252
399
  on_backpressure: NotRequired[OutputAzureDataExplorerBackpressureBehavior]
253
400
  r"""How to handle events when all receivers are exerting backpressure"""
254
- is_mapping_obj: NotRequired[bool]
255
- r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
256
- format_: NotRequired[OutputAzureDataExplorerDataFormat]
257
- r"""Format of the output data"""
258
401
  stage_path: NotRequired[str]
259
402
  r"""Filesystem location in which to buffer files before compressing and moving to final destination. Use performant and stable storage."""
260
403
  file_name_suffix: NotRequired[str]
@@ -273,10 +416,6 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
273
416
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
274
417
  add_id_to_stage_path: NotRequired[bool]
275
418
  r"""Add the Output ID value to staging location"""
276
- remove_empty_dirs: NotRequired[bool]
277
- r"""Remove empty staging directories after moving files"""
278
- deadletter_enabled: NotRequired[bool]
279
- r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
280
419
  timeout_sec: NotRequired[float]
281
420
  r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
282
421
  flush_immediately: NotRequired[bool]
@@ -302,10 +441,6 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
302
441
  ]
303
442
  response_honor_retry_after_header: NotRequired[bool]
304
443
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
305
- compress: NotRequired[OutputAzureDataExplorerCompressCompression]
306
- r"""Data compression format to apply to HTTP content before it is delivered"""
307
- mapping_ref: NotRequired[str]
308
- r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
309
444
  concurrency: NotRequired[float]
310
445
  r"""Maximum number of ongoing requests before blocking"""
311
446
  max_payload_size_kb: NotRequired[float]
@@ -323,6 +458,16 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
323
458
  r"""Enable round-robin DNS lookup. When a DNS server returns multiple addresses, @{product} will cycle through them in the order returned. For optimal performance, consider enabling this setting for non-load balanced destinations."""
324
459
  keep_alive: NotRequired[bool]
325
460
  r"""Disable to close the connection immediately after sending the outgoing request"""
461
+ pq_strict_ordering: NotRequired[bool]
462
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
463
+ pq_rate_per_sec: NotRequired[float]
464
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
465
+ pq_mode: NotRequired[OutputAzureDataExplorerMode]
466
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
467
+ pq_max_buffer_size: NotRequired[float]
468
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
469
+ pq_max_backpressure_sec: NotRequired[float]
470
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
326
471
  pq_max_file_size: NotRequired[str]
327
472
  r"""The maximum size to store in each queue file before closing and optionally compressing (KB, MB, etc.)"""
328
473
  pq_max_size: NotRequired[str]
@@ -333,12 +478,12 @@ class OutputAzureDataExplorerTypedDict(TypedDict):
333
478
  r"""Codec to use to compress the persisted data"""
334
479
  pq_on_backpressure: NotRequired[OutputAzureDataExplorerQueueFullBehavior]
335
480
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
336
- pq_mode: NotRequired[OutputAzureDataExplorerMode]
337
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
338
481
  pq_controls: NotRequired[OutputAzureDataExplorerPqControlsTypedDict]
339
482
 
340
483
 
341
484
  class OutputAzureDataExplorer(BaseModel):
485
+ type: OutputAzureDataExplorerType
486
+
342
487
  cluster_url: Annotated[str, pydantic.Field(alias="clusterUrl")]
343
488
  r"""The base URI for your cluster. Typically, `https://<cluster>.<region>.kusto.windows.net`."""
344
489
 
@@ -360,8 +505,6 @@ class OutputAzureDataExplorer(BaseModel):
360
505
  id: Optional[str] = None
361
506
  r"""Unique ID for this output"""
362
507
 
363
- type: Optional[OutputAzureDataExplorerType] = None
364
-
365
508
  pipeline: Optional[str] = None
366
509
  r"""Pipeline to process data before sending out to this output"""
367
510
 
@@ -382,17 +525,24 @@ class OutputAzureDataExplorer(BaseModel):
382
525
  r"""When saving or starting the Destination, validate the database name and credentials; also validate table name, except when creating a new table. Disable if your Azure app does not have both the Database Viewer and the Table Viewer role."""
383
526
 
384
527
  ingest_mode: Annotated[
385
- Optional[IngestionMode], pydantic.Field(alias="ingestMode")
528
+ Annotated[Optional[IngestionMode], PlainValidator(validate_open_enum(False))],
529
+ pydantic.Field(alias="ingestMode"),
386
530
  ] = IngestionMode.BATCHING
387
531
 
388
532
  oauth_endpoint: Annotated[
389
- Optional[MicrosoftEntraIDAuthenticationEndpoint],
533
+ Annotated[
534
+ Optional[OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint],
535
+ PlainValidator(validate_open_enum(False)),
536
+ ],
390
537
  pydantic.Field(alias="oauthEndpoint"),
391
- ] = MicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
538
+ ] = OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint.HTTPS_LOGIN_MICROSOFTONLINE_COM
392
539
  r"""Endpoint used to acquire authentication tokens from Azure"""
393
540
 
394
541
  oauth_type: Annotated[
395
- Optional[OutputAzureDataExplorerAuthenticationMethod],
542
+ Annotated[
543
+ Optional[OutputAzureDataExplorerAuthenticationMethod],
544
+ PlainValidator(validate_open_enum(False)),
545
+ ],
396
546
  pydantic.Field(alias="oauthType"),
397
547
  ] = OutputAzureDataExplorerAuthenticationMethod.CLIENT_SECRET
398
548
  r"""The type of OAuth 2.0 client credentials grant flow to use"""
@@ -407,24 +557,144 @@ class OutputAzureDataExplorer(BaseModel):
407
557
 
408
558
  certificate: Optional[OutputAzureDataExplorerCertificate] = None
409
559
 
410
- ingest_url: Annotated[Optional[str], pydantic.Field(alias="ingestUrl")] = None
411
- r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
560
+ format_: Annotated[
561
+ Annotated[
562
+ Optional[OutputAzureDataExplorerDataFormat],
563
+ PlainValidator(validate_open_enum(False)),
564
+ ],
565
+ pydantic.Field(alias="format"),
566
+ ] = OutputAzureDataExplorerDataFormat.JSON
567
+ r"""Format of the output data"""
412
568
 
413
- on_backpressure: Annotated[
414
- Optional[OutputAzureDataExplorerBackpressureBehavior],
415
- pydantic.Field(alias="onBackpressure"),
416
- ] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
417
- r"""How to handle events when all receivers are exerting backpressure"""
569
+ compress: Annotated[
570
+ Optional[OutputAzureDataExplorerCompressCompression],
571
+ PlainValidator(validate_open_enum(False)),
572
+ ] = OutputAzureDataExplorerCompressCompression.GZIP
573
+ r"""Data compression format to apply to HTTP content before it is delivered"""
574
+
575
+ compression_level: Annotated[
576
+ Annotated[
577
+ Optional[OutputAzureDataExplorerCompressionLevel],
578
+ PlainValidator(validate_open_enum(False)),
579
+ ],
580
+ pydantic.Field(alias="compressionLevel"),
581
+ ] = OutputAzureDataExplorerCompressionLevel.BEST_SPEED
582
+ r"""Compression level to apply before moving files to final destination"""
583
+
584
+ automatic_schema: Annotated[
585
+ Optional[bool], pydantic.Field(alias="automaticSchema")
586
+ ] = False
587
+ r"""Automatically calculate the schema based on the events of each Parquet file generated"""
588
+
589
+ parquet_schema: Annotated[Optional[str], pydantic.Field(alias="parquetSchema")] = (
590
+ None
591
+ )
592
+ r"""To add a new schema, navigate to Processing > Knowledge > Parquet Schemas"""
593
+
594
+ parquet_version: Annotated[
595
+ Annotated[
596
+ Optional[OutputAzureDataExplorerParquetVersion],
597
+ PlainValidator(validate_open_enum(False)),
598
+ ],
599
+ pydantic.Field(alias="parquetVersion"),
600
+ ] = OutputAzureDataExplorerParquetVersion.PARQUET_2_6
601
+ r"""Determines which data types are supported and how they are represented"""
602
+
603
+ parquet_data_page_version: Annotated[
604
+ Annotated[
605
+ Optional[OutputAzureDataExplorerDataPageVersion],
606
+ PlainValidator(validate_open_enum(False)),
607
+ ],
608
+ pydantic.Field(alias="parquetDataPageVersion"),
609
+ ] = OutputAzureDataExplorerDataPageVersion.DATA_PAGE_V2
610
+ r"""Serialization format of data pages. Note that some reader implementations use Data page V2's attributes to work more efficiently, while others ignore it."""
611
+
612
+ parquet_row_group_length: Annotated[
613
+ Optional[float], pydantic.Field(alias="parquetRowGroupLength")
614
+ ] = 10000
615
+ r"""The number of rows that every group will contain. The final group can contain a smaller number of rows."""
616
+
617
+ parquet_page_size: Annotated[
618
+ Optional[str], pydantic.Field(alias="parquetPageSize")
619
+ ] = "1MB"
620
+ r"""Target memory size for page segments, such as 1MB or 128MB. Generally, lower values improve reading speed, while higher values improve compression."""
621
+
622
+ should_log_invalid_rows: Annotated[
623
+ Optional[bool], pydantic.Field(alias="shouldLogInvalidRows")
624
+ ] = None
625
+ r"""Log up to 3 rows that @{product} skips due to data mismatch"""
626
+
627
+ key_value_metadata: Annotated[
628
+ Optional[List[OutputAzureDataExplorerKeyValueMetadatum]],
629
+ pydantic.Field(alias="keyValueMetadata"),
630
+ ] = None
631
+ r"""The metadata of files the Destination writes will include the properties you add here as key-value pairs. Useful for tagging. Examples: \"key\":\"OCSF Event Class\", \"value\":\"9001\" """
632
+
633
+ enable_statistics: Annotated[
634
+ Optional[bool], pydantic.Field(alias="enableStatistics")
635
+ ] = True
636
+ r"""Statistics profile an entire file in terms of minimum/maximum values within data, numbers of nulls, etc. You can use Parquet tools to view statistics."""
637
+
638
+ enable_write_page_index: Annotated[
639
+ Optional[bool], pydantic.Field(alias="enableWritePageIndex")
640
+ ] = True
641
+ r"""One page index contains statistics for one data page. Parquet readers use statistics to enable page skipping."""
642
+
643
+ enable_page_checksum: Annotated[
644
+ Optional[bool], pydantic.Field(alias="enablePageChecksum")
645
+ ] = False
646
+ r"""Parquet tools can use the checksum of a Parquet page to verify data integrity"""
647
+
648
+ remove_empty_dirs: Annotated[
649
+ Optional[bool], pydantic.Field(alias="removeEmptyDirs")
650
+ ] = True
651
+ r"""Remove empty staging directories after moving files"""
652
+
653
+ empty_dir_cleanup_sec: Annotated[
654
+ Optional[float], pydantic.Field(alias="emptyDirCleanupSec")
655
+ ] = 300
656
+ r"""How frequently, in seconds, to clean up empty directories"""
657
+
658
+ directory_batch_size: Annotated[
659
+ Optional[float], pydantic.Field(alias="directoryBatchSize")
660
+ ] = 1000
661
+ r"""Number of directories to process in each batch during cleanup of empty directories. Minimum is 10, maximum is 10000. Higher values may require more memory."""
662
+
663
+ deadletter_enabled: Annotated[
664
+ Optional[bool], pydantic.Field(alias="deadletterEnabled")
665
+ ] = False
666
+ r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
667
+
668
+ deadletter_path: Annotated[
669
+ Optional[str], pydantic.Field(alias="deadletterPath")
670
+ ] = "$CRIBL_HOME/state/outputs/dead-letter"
671
+ r"""Storage location for files that fail to reach their final destination after maximum retries are exceeded"""
672
+
673
+ max_retry_num: Annotated[Optional[float], pydantic.Field(alias="maxRetryNum")] = 20
674
+ r"""The maximum number of times a file will attempt to move to its final destination before being dead-lettered"""
418
675
 
419
676
  is_mapping_obj: Annotated[Optional[bool], pydantic.Field(alias="isMappingObj")] = (
420
677
  False
421
678
  )
422
679
  r"""Send a JSON mapping object instead of specifying an existing named data mapping"""
423
680
 
424
- format_: Annotated[
425
- Optional[OutputAzureDataExplorerDataFormat], pydantic.Field(alias="format")
426
- ] = OutputAzureDataExplorerDataFormat.JSON
427
- r"""Format of the output data"""
681
+ mapping_obj: Annotated[Optional[str], pydantic.Field(alias="mappingObj")] = None
682
+ r"""Enter a JSON object that defines your desired data mapping"""
683
+
684
+ mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
685
+ r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
686
+
687
+ ingest_url: Annotated[Optional[str], pydantic.Field(alias="ingestUrl")] = None
688
+ r"""The ingestion service URI for your cluster. Typically, `https://ingest-<cluster>.<region>.kusto.windows.net`."""
689
+
690
+ on_backpressure: Annotated[
691
+ Annotated[
692
+ Optional[OutputAzureDataExplorerBackpressureBehavior],
693
+ PlainValidator(validate_open_enum(False)),
694
+ ],
695
+ pydantic.Field(alias="onBackpressure"),
696
+ ] = OutputAzureDataExplorerBackpressureBehavior.BLOCK
697
+ r"""How to handle events when all receivers are exerting backpressure"""
428
698
 
429
699
  stage_path: Annotated[Optional[str], pydantic.Field(alias="stagePath")] = (
430
700
  "$CRIBL_HOME/state/outputs/staging"
@@ -462,7 +732,10 @@ class OutputAzureDataExplorer(BaseModel):
462
732
  r"""Maximum number of parts to upload in parallel per file"""
463
733
 
464
734
  on_disk_full_backpressure: Annotated[
465
- Optional[OutputAzureDataExplorerDiskSpaceProtection],
735
+ Annotated[
736
+ Optional[OutputAzureDataExplorerDiskSpaceProtection],
737
+ PlainValidator(validate_open_enum(False)),
738
+ ],
466
739
  pydantic.Field(alias="onDiskFullBackpressure"),
467
740
  ] = OutputAzureDataExplorerDiskSpaceProtection.BLOCK
468
741
  r"""How to handle events when disk space is below the global 'Min free disk space' limit"""
@@ -472,16 +745,6 @@ class OutputAzureDataExplorer(BaseModel):
472
745
  ] = True
473
746
  r"""Add the Output ID value to staging location"""
474
747
 
475
- remove_empty_dirs: Annotated[
476
- Optional[bool], pydantic.Field(alias="removeEmptyDirs")
477
- ] = True
478
- r"""Remove empty staging directories after moving files"""
479
-
480
- deadletter_enabled: Annotated[
481
- Optional[bool], pydantic.Field(alias="deadletterEnabled")
482
- ] = False
483
- r"""If a file fails to move to its final destination after the maximum number of retries, move it to a designated directory to prevent further errors"""
484
-
485
748
  timeout_sec: Annotated[Optional[float], pydantic.Field(alias="timeoutSec")] = 30
486
749
  r"""Amount of time, in seconds, to wait for a request to complete before canceling it"""
487
750
 
@@ -506,12 +769,14 @@ class OutputAzureDataExplorer(BaseModel):
506
769
  r"""Prevents duplicate ingestion by verifying whether an extent with the specified ingest-by tag already exists"""
507
770
 
508
771
  report_level: Annotated[
509
- Optional[ReportLevel], pydantic.Field(alias="reportLevel")
772
+ Annotated[Optional[ReportLevel], PlainValidator(validate_open_enum(False))],
773
+ pydantic.Field(alias="reportLevel"),
510
774
  ] = ReportLevel.FAILURES_ONLY
511
775
  r"""Level of ingestion status reporting. Defaults to FailuresOnly."""
512
776
 
513
777
  report_method: Annotated[
514
- Optional[ReportMethod], pydantic.Field(alias="reportMethod")
778
+ Annotated[Optional[ReportMethod], PlainValidator(validate_open_enum(False))],
779
+ pydantic.Field(alias="reportMethod"),
515
780
  ] = ReportMethod.QUEUE
516
781
  r"""Target of the ingestion status reporting. Defaults to Queue."""
517
782
 
@@ -533,17 +798,9 @@ class OutputAzureDataExplorer(BaseModel):
533
798
 
534
799
  response_honor_retry_after_header: Annotated[
535
800
  Optional[bool], pydantic.Field(alias="responseHonorRetryAfterHeader")
536
- ] = False
801
+ ] = True
537
802
  r"""Honor any Retry-After header that specifies a delay (in seconds) no longer than 180 seconds after the retry request. @{product} limits the delay to 180 seconds, even if the Retry-After header specifies a longer delay. When enabled, takes precedence over user-configured retry options. When disabled, all Retry-After headers are ignored."""
538
803
 
539
- compress: Optional[OutputAzureDataExplorerCompressCompression] = (
540
- OutputAzureDataExplorerCompressCompression.GZIP
541
- )
542
- r"""Data compression format to apply to HTTP content before it is delivered"""
543
-
544
- mapping_ref: Annotated[Optional[str], pydantic.Field(alias="mappingRef")] = None
545
- r"""Enter the name of a data mapping associated with your target table. Or, if incoming event and target table fields match exactly, you can leave the field empty."""
546
-
547
804
  concurrency: Optional[float] = 5
548
805
  r"""Maximum number of ongoing requests before blocking"""
549
806
 
@@ -578,6 +835,35 @@ class OutputAzureDataExplorer(BaseModel):
578
835
  keep_alive: Annotated[Optional[bool], pydantic.Field(alias="keepAlive")] = True
579
836
  r"""Disable to close the connection immediately after sending the outgoing request"""
580
837
 
838
+ pq_strict_ordering: Annotated[
839
+ Optional[bool], pydantic.Field(alias="pqStrictOrdering")
840
+ ] = True
841
+ r"""Use FIFO (first in, first out) processing. Disable to forward new events to receivers before queue is flushed."""
842
+
843
+ pq_rate_per_sec: Annotated[
844
+ Optional[float], pydantic.Field(alias="pqRatePerSec")
845
+ ] = 0
846
+ r"""Throttling rate (in events per second) to impose while writing to Destinations from PQ. Defaults to 0, which disables throttling."""
847
+
848
+ pq_mode: Annotated[
849
+ Annotated[
850
+ Optional[OutputAzureDataExplorerMode],
851
+ PlainValidator(validate_open_enum(False)),
852
+ ],
853
+ pydantic.Field(alias="pqMode"),
854
+ ] = OutputAzureDataExplorerMode.ERROR
855
+ r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
856
+
857
+ pq_max_buffer_size: Annotated[
858
+ Optional[float], pydantic.Field(alias="pqMaxBufferSize")
859
+ ] = 42
860
+ r"""The maximum number of events to hold in memory before writing the events to disk"""
861
+
862
+ pq_max_backpressure_sec: Annotated[
863
+ Optional[float], pydantic.Field(alias="pqMaxBackpressureSec")
864
+ ] = 30
865
+ r"""How long (in seconds) to wait for backpressure to resolve before engaging the queue"""
866
+
581
867
  pq_max_file_size: Annotated[
582
868
  Optional[str], pydantic.Field(alias="pqMaxFileSize")
583
869
  ] = "1 MB"
@@ -592,22 +878,160 @@ class OutputAzureDataExplorer(BaseModel):
592
878
  r"""The location for the persistent queue files. To this field's value, the system will append: /<worker-id>/<output-id>."""
593
879
 
594
880
  pq_compress: Annotated[
595
- Optional[OutputAzureDataExplorerPqCompressCompression],
881
+ Annotated[
882
+ Optional[OutputAzureDataExplorerPqCompressCompression],
883
+ PlainValidator(validate_open_enum(False)),
884
+ ],
596
885
  pydantic.Field(alias="pqCompress"),
597
886
  ] = OutputAzureDataExplorerPqCompressCompression.NONE
598
887
  r"""Codec to use to compress the persisted data"""
599
888
 
600
889
  pq_on_backpressure: Annotated[
601
- Optional[OutputAzureDataExplorerQueueFullBehavior],
890
+ Annotated[
891
+ Optional[OutputAzureDataExplorerQueueFullBehavior],
892
+ PlainValidator(validate_open_enum(False)),
893
+ ],
602
894
  pydantic.Field(alias="pqOnBackpressure"),
603
895
  ] = OutputAzureDataExplorerQueueFullBehavior.BLOCK
604
896
  r"""How to handle events when the queue is exerting backpressure (full capacity or low disk). 'Block' is the same behavior as non-PQ blocking. 'Drop new data' throws away incoming data, while leaving the contents of the PQ unchanged."""
605
897
 
606
- pq_mode: Annotated[
607
- Optional[OutputAzureDataExplorerMode], pydantic.Field(alias="pqMode")
608
- ] = OutputAzureDataExplorerMode.ERROR
609
- r"""In Error mode, PQ writes events to the filesystem if the Destination is unavailable. In Backpressure mode, PQ writes events to the filesystem when it detects backpressure from the Destination. In Always On mode, PQ always writes events to the filesystem."""
610
-
611
898
  pq_controls: Annotated[
612
899
  Optional[OutputAzureDataExplorerPqControls], pydantic.Field(alias="pqControls")
613
900
  ] = None
901
+
902
+ @field_serializer("ingest_mode")
903
+ def serialize_ingest_mode(self, value):
904
+ if isinstance(value, str):
905
+ try:
906
+ return models.IngestionMode(value)
907
+ except ValueError:
908
+ return value
909
+ return value
910
+
911
+ @field_serializer("oauth_endpoint")
912
+ def serialize_oauth_endpoint(self, value):
913
+ if isinstance(value, str):
914
+ try:
915
+ return models.OutputAzureDataExplorerMicrosoftEntraIDAuthenticationEndpoint(
916
+ value
917
+ )
918
+ except ValueError:
919
+ return value
920
+ return value
921
+
922
+ @field_serializer("oauth_type")
923
+ def serialize_oauth_type(self, value):
924
+ if isinstance(value, str):
925
+ try:
926
+ return models.OutputAzureDataExplorerAuthenticationMethod(value)
927
+ except ValueError:
928
+ return value
929
+ return value
930
+
931
+ @field_serializer("format_")
932
+ def serialize_format_(self, value):
933
+ if isinstance(value, str):
934
+ try:
935
+ return models.OutputAzureDataExplorerDataFormat(value)
936
+ except ValueError:
937
+ return value
938
+ return value
939
+
940
+ @field_serializer("compress")
941
+ def serialize_compress(self, value):
942
+ if isinstance(value, str):
943
+ try:
944
+ return models.OutputAzureDataExplorerCompressCompression(value)
945
+ except ValueError:
946
+ return value
947
+ return value
948
+
949
+ @field_serializer("compression_level")
950
+ def serialize_compression_level(self, value):
951
+ if isinstance(value, str):
952
+ try:
953
+ return models.OutputAzureDataExplorerCompressionLevel(value)
954
+ except ValueError:
955
+ return value
956
+ return value
957
+
958
+ @field_serializer("parquet_version")
959
+ def serialize_parquet_version(self, value):
960
+ if isinstance(value, str):
961
+ try:
962
+ return models.OutputAzureDataExplorerParquetVersion(value)
963
+ except ValueError:
964
+ return value
965
+ return value
966
+
967
+ @field_serializer("parquet_data_page_version")
968
+ def serialize_parquet_data_page_version(self, value):
969
+ if isinstance(value, str):
970
+ try:
971
+ return models.OutputAzureDataExplorerDataPageVersion(value)
972
+ except ValueError:
973
+ return value
974
+ return value
975
+
976
+ @field_serializer("on_backpressure")
977
+ def serialize_on_backpressure(self, value):
978
+ if isinstance(value, str):
979
+ try:
980
+ return models.OutputAzureDataExplorerBackpressureBehavior(value)
981
+ except ValueError:
982
+ return value
983
+ return value
984
+
985
+ @field_serializer("on_disk_full_backpressure")
986
+ def serialize_on_disk_full_backpressure(self, value):
987
+ if isinstance(value, str):
988
+ try:
989
+ return models.OutputAzureDataExplorerDiskSpaceProtection(value)
990
+ except ValueError:
991
+ return value
992
+ return value
993
+
994
+ @field_serializer("report_level")
995
+ def serialize_report_level(self, value):
996
+ if isinstance(value, str):
997
+ try:
998
+ return models.ReportLevel(value)
999
+ except ValueError:
1000
+ return value
1001
+ return value
1002
+
1003
+ @field_serializer("report_method")
1004
+ def serialize_report_method(self, value):
1005
+ if isinstance(value, str):
1006
+ try:
1007
+ return models.ReportMethod(value)
1008
+ except ValueError:
1009
+ return value
1010
+ return value
1011
+
1012
+ @field_serializer("pq_mode")
1013
+ def serialize_pq_mode(self, value):
1014
+ if isinstance(value, str):
1015
+ try:
1016
+ return models.OutputAzureDataExplorerMode(value)
1017
+ except ValueError:
1018
+ return value
1019
+ return value
1020
+
1021
+ @field_serializer("pq_compress")
1022
+ def serialize_pq_compress(self, value):
1023
+ if isinstance(value, str):
1024
+ try:
1025
+ return models.OutputAzureDataExplorerPqCompressCompression(value)
1026
+ except ValueError:
1027
+ return value
1028
+ return value
1029
+
1030
+ @field_serializer("pq_on_backpressure")
1031
+ def serialize_pq_on_backpressure(self, value):
1032
+ if isinstance(value, str):
1033
+ try:
1034
+ return models.OutputAzureDataExplorerQueueFullBehavior(value)
1035
+ except ValueError:
1036
+ return value
1037
+ return value